summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorisaacs <i@izs.me>2012-09-18 15:20:38 -0700
committerBert Belder <bertbelder@gmail.com>2012-09-21 01:52:24 +0200
commit3411a03dd114d635800cc50749d2351cd734eb2a (patch)
tree0ba1e52ab2236286894b33400302181ece91b63a /deps
parentcc1b09d6b7c3cc6b8729804cbf644634ba5d0815 (diff)
downloadandroid-node-v8-3411a03dd114d635800cc50749d2351cd734eb2a.tar.gz
android-node-v8-3411a03dd114d635800cc50749d2351cd734eb2a.tar.bz2
android-node-v8-3411a03dd114d635800cc50749d2351cd734eb2a.zip
V8: Upgrade to 3.13.7.1
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.gitignore5
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/ChangeLog327
-rw-r--r--deps/v8/Makefile89
-rw-r--r--deps/v8/Makefile.android92
-rw-r--r--deps/v8/build/android.gypi84
-rw-r--r--deps/v8/build/common.gypi62
-rw-r--r--deps/v8/build/standalone.gypi11
-rwxr-xr-xdeps/v8/include/v8-debug.h10
-rw-r--r--deps/v8/include/v8-preparser.h7
-rw-r--r--deps/v8/include/v8-profiler.h27
-rw-r--r--deps/v8/include/v8-testing.h7
-rw-r--r--deps/v8/include/v8.h132
-rw-r--r--deps/v8/preparser/preparser-process.cc6
-rw-r--r--deps/v8/samples/lineprocessor.cc38
-rw-r--r--deps/v8/samples/process.cc14
-rw-r--r--deps/v8/samples/shell.cc2
-rwxr-xr-xdeps/v8/src/SConscript29
-rw-r--r--deps/v8/src/accessors.cc69
-rw-r--r--deps/v8/src/accessors.h4
-rw-r--r--deps/v8/src/api.cc243
-rw-r--r--deps/v8/src/api.h139
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h5
-rw-r--r--deps/v8/src/arm/assembler-arm.cc171
-rw-r--r--deps/v8/src/arm/assembler-arm.h48
-rw-r--r--deps/v8/src/arm/builtins-arm.cc65
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc372
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h16
-rw-r--r--deps/v8/src/arm/codegen-arm.cc37
-rw-r--r--deps/v8/src/arm/constants-arm.h5
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc174
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc235
-rw-r--r--deps/v8/src/arm/ic-arm.cc2
-rw-r--r--deps/v8/src/arm/lithium-arm.cc237
-rw-r--r--deps/v8/src/arm/lithium-arm.h136
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc503
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h51
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc207
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h28
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h9
-rw-r--r--deps/v8/src/arm/simulator-arm.cc88
-rw-r--r--deps/v8/src/arm/simulator-arm.h36
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc312
-rw-r--r--deps/v8/src/array.js151
-rw-r--r--deps/v8/src/assembler.cc51
-rw-r--r--deps/v8/src/assembler.h15
-rw-r--r--deps/v8/src/ast.cc60
-rw-r--r--deps/v8/src/ast.h401
-rw-r--r--deps/v8/src/bootstrapper.cc793
-rw-r--r--deps/v8/src/bootstrapper.h4
-rw-r--r--deps/v8/src/builtins.cc43
-rw-r--r--deps/v8/src/builtins.h10
-rw-r--r--deps/v8/src/checks.h8
-rw-r--r--deps/v8/src/code-stubs.cc24
-rw-r--r--deps/v8/src/code-stubs.h45
-rw-r--r--deps/v8/src/collection.js27
-rw-r--r--deps/v8/src/compilation-cache.cc38
-rw-r--r--deps/v8/src/compilation-cache.h21
-rw-r--r--deps/v8/src/compiler.cc557
-rw-r--r--deps/v8/src/compiler.h198
-rw-r--r--deps/v8/src/contexts.cc53
-rw-r--r--deps/v8/src/contexts.h64
-rw-r--r--deps/v8/src/conversions-inl.h36
-rw-r--r--deps/v8/src/conversions.h11
-rw-r--r--deps/v8/src/counters.cc26
-rw-r--r--deps/v8/src/counters.h60
-rw-r--r--deps/v8/src/cpu-profiler.h2
-rw-r--r--deps/v8/src/d8.cc495
-rw-r--r--deps/v8/src/d8.h24
-rw-r--r--deps/v8/src/date.js41
-rw-r--r--deps/v8/src/debug-debugger.js38
-rw-r--r--deps/v8/src/debug.cc335
-rw-r--r--deps/v8/src/debug.h23
-rw-r--r--deps/v8/src/deoptimizer.cc290
-rw-r--r--deps/v8/src/deoptimizer.h33
-rw-r--r--deps/v8/src/disassembler.cc4
-rw-r--r--deps/v8/src/elements.cc2
-rw-r--r--deps/v8/src/execution.cc59
-rw-r--r--deps/v8/src/execution.h5
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc153
-rw-r--r--deps/v8/src/extensions/statistics-extension.h49
-rw-r--r--deps/v8/src/factory.cc164
-rw-r--r--deps/v8/src/factory.h35
-rw-r--r--deps/v8/src/flag-definitions.h51
-rw-r--r--deps/v8/src/flags.cc14
-rw-r--r--deps/v8/src/frames.cc24
-rw-r--r--deps/v8/src/frames.h2
-rw-r--r--deps/v8/src/full-codegen.cc132
-rw-r--r--deps/v8/src/full-codegen.h46
-rw-r--r--deps/v8/src/globals.h15
-rw-r--r--deps/v8/src/handles-inl.h30
-rw-r--r--deps/v8/src/handles.cc156
-rw-r--r--deps/v8/src/handles.h36
-rw-r--r--deps/v8/src/hashmap.h8
-rw-r--r--deps/v8/src/heap-inl.h41
-rw-r--r--deps/v8/src/heap.cc653
-rw-r--r--deps/v8/src/heap.h198
-rw-r--r--deps/v8/src/hydrogen-instructions.cc196
-rw-r--r--deps/v8/src/hydrogen-instructions.h323
-rw-r--r--deps/v8/src/hydrogen.cc1933
-rw-r--r--deps/v8/src/hydrogen.h225
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h11
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc23
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h15
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc45
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc155
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc170
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc12
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc218
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc495
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h41
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc288
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h171
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc187
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h20
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc5
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h9
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc283
-rw-r--r--deps/v8/src/ic.cc134
-rw-r--r--deps/v8/src/incremental-marking-inl.h2
-rw-r--r--deps/v8/src/incremental-marking.cc189
-rw-r--r--deps/v8/src/incremental-marking.h9
-rw-r--r--deps/v8/src/interface.cc14
-rw-r--r--deps/v8/src/interface.h42
-rw-r--r--deps/v8/src/isolate.cc141
-rw-r--r--deps/v8/src/isolate.h66
-rw-r--r--deps/v8/src/json-parser.h4
-rw-r--r--deps/v8/src/json.js33
-rw-r--r--deps/v8/src/jsregexp.cc342
-rw-r--r--deps/v8/src/jsregexp.h133
-rw-r--r--deps/v8/src/lithium-allocator.cc2
-rw-r--r--deps/v8/src/lithium-allocator.h6
-rw-r--r--deps/v8/src/lithium.cc198
-rw-r--r--deps/v8/src/lithium.h86
-rw-r--r--deps/v8/src/liveedit-debugger.js18
-rw-r--r--deps/v8/src/liveedit.cc223
-rw-r--r--deps/v8/src/liveedit.h6
-rw-r--r--deps/v8/src/liveobjectlist.cc4
-rw-r--r--deps/v8/src/log.cc113
-rw-r--r--deps/v8/src/log.h25
-rw-r--r--deps/v8/src/mark-compact.cc717
-rw-r--r--deps/v8/src/mark-compact.h7
-rw-r--r--deps/v8/src/messages.js54
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h5
-rw-r--r--deps/v8/src/mips/assembler-mips.cc6
-rw-r--r--deps/v8/src/mips/assembler-mips.h15
-rw-r--r--deps/v8/src/mips/builtins-mips.cc63
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc208
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc167
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc210
-rw-r--r--deps/v8/src/mips/ic-mips.cc2
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc535
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h51
-rw-r--r--deps/v8/src/mips/lithium-mips.cc243
-rw-r--r--deps/v8/src/mips/lithium-mips.h136
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc135
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h15
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h8
-rw-r--r--deps/v8/src/mips/simulator-mips.cc9
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc268
-rw-r--r--deps/v8/src/mirror-debugger.js162
-rw-r--r--deps/v8/src/mksnapshot.cc59
-rw-r--r--deps/v8/src/objects-debug.cc98
-rw-r--r--deps/v8/src/objects-inl.h842
-rw-r--r--deps/v8/src/objects-printer.cc113
-rw-r--r--deps/v8/src/objects-visiting-inl.h180
-rw-r--r--deps/v8/src/objects-visiting.h196
-rw-r--r--deps/v8/src/objects.cc2760
-rw-r--r--deps/v8/src/objects.h850
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc127
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h101
-rw-r--r--deps/v8/src/parser.cc356
-rw-r--r--deps/v8/src/parser.h50
-rw-r--r--deps/v8/src/platform-linux.cc151
-rw-r--r--deps/v8/src/platform-openbsd.cc5
-rw-r--r--deps/v8/src/platform-posix.cc5
-rw-r--r--deps/v8/src/platform-win32.cc5
-rw-r--r--deps/v8/src/platform.h20
-rw-r--r--deps/v8/src/preparser.cc13
-rw-r--r--deps/v8/src/profile-generator-inl.h1
-rw-r--r--deps/v8/src/profile-generator.cc115
-rw-r--r--deps/v8/src/profile-generator.h4
-rw-r--r--deps/v8/src/property-details.h60
-rw-r--r--deps/v8/src/property.cc64
-rw-r--r--deps/v8/src/property.h166
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc6
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h2
-rw-r--r--deps/v8/src/rewriter.cc14
-rw-r--r--deps/v8/src/runtime-profiler.cc29
-rw-r--r--deps/v8/src/runtime.cc1796
-rw-r--r--deps/v8/src/runtime.h25
-rwxr-xr-xdeps/v8/src/scanner.cc1
-rw-r--r--deps/v8/src/scopeinfo.cc9
-rw-r--r--deps/v8/src/scopes.cc215
-rw-r--r--deps/v8/src/scopes.h30
-rw-r--r--deps/v8/src/serialize.cc201
-rw-r--r--deps/v8/src/serialize.h3
-rw-r--r--deps/v8/src/smart-pointers.h (renamed from deps/v8/src/smart-array-pointer.h)67
-rw-r--r--deps/v8/src/snapshot-common.cc5
-rw-r--r--deps/v8/src/snapshot.h2
-rw-r--r--deps/v8/src/spaces.cc9
-rw-r--r--deps/v8/src/spaces.h4
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/stub-cache.cc127
-rw-r--r--deps/v8/src/stub-cache.h47
-rw-r--r--deps/v8/src/transitions-inl.h219
-rw-r--r--deps/v8/src/transitions.cc128
-rw-r--r--deps/v8/src/transitions.h190
-rw-r--r--deps/v8/src/type-info.cc154
-rw-r--r--deps/v8/src/type-info.h26
-rw-r--r--deps/v8/src/unicode-inl.h2
-rw-r--r--deps/v8/src/unicode.h4
-rw-r--r--deps/v8/src/utils.h47
-rw-r--r--deps/v8/src/v8-counters.cc58
-rw-r--r--deps/v8/src/v8-counters.h144
-rw-r--r--deps/v8/src/v8.cc11
-rw-r--r--deps/v8/src/v8globals.h47
-rw-r--r--deps/v8/src/v8threads.cc23
-rw-r--r--deps/v8/src/v8threads.h5
-rw-r--r--deps/v8/src/v8utils.cc9
-rw-r--r--deps/v8/src/v8utils.h3
-rw-r--r--deps/v8/src/variables.cc7
-rw-r--r--deps/v8/src/variables.h15
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/vm-state-inl.h2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h11
-rw-r--r--deps/v8/src/x64/assembler-x64.cc22
-rw-r--r--deps/v8/src/x64/assembler-x64.h12
-rw-r--r--deps/v8/src/x64/builtins-x64.cc47
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc199
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc170
-rw-r--r--deps/v8/src/x64/disasm-x64.cc3
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc224
-rw-r--r--deps/v8/src/x64/ic-x64.cc9
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc603
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h37
-rw-r--r--deps/v8/src/x64/lithium-x64.cc309
-rw-r--r--deps/v8/src/x64/lithium-x64.h157
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc270
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h29
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc8
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc272
-rw-r--r--deps/v8/src/zone-inl.h17
-rw-r--r--deps/v8/src/zone.cc10
-rw-r--r--deps/v8/src/zone.h9
-rw-r--r--deps/v8/test/cctest/cctest.gyp2
-rw-r--r--deps/v8/test/cctest/cctest.status20
-rw-r--r--deps/v8/test/cctest/test-alloc.cc23
-rw-r--r--deps/v8/test/cctest/test-api.cc501
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc14
-rw-r--r--deps/v8/test/cctest/test-ast.cc6
-rw-r--r--deps/v8/test/cctest/test-compiler.cc64
-rw-r--r--deps/v8/test/cctest/test-dataflow.cc4
-rw-r--r--deps/v8/test/cctest/test-debug.cc56
-rw-r--r--deps/v8/test/cctest/test-decls.cc327
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc28
-rw-r--r--deps/v8/test/cctest/test-flags.cc15
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc11
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc68
-rw-r--r--deps/v8/test/cctest/test-heap.cc465
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc6
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc21
-rwxr-xr-xdeps/v8/test/cctest/test-parsing.cc40
-rw-r--r--deps/v8/test/cctest/test-platform-linux.cc6
-rw-r--r--deps/v8/test/cctest/test-platform-win32.cc7
-rw-r--r--deps/v8/test/cctest/test-random.cc6
-rw-r--r--deps/v8/test/cctest/test-regexp.cc119
-rw-r--r--deps/v8/test/cctest/test-serialize.cc189
-rw-r--r--deps/v8/test/cctest/test-strings.cc36
-rw-r--r--deps/v8/test/cctest/test-utils.cc16
-rw-r--r--deps/v8/test/cctest/testcfg.py3
-rw-r--r--deps/v8/test/message/testcfg.py13
-rw-r--r--deps/v8/test/mjsunit/accessor-map-sharing.js18
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js16
-rw-r--r--deps/v8/test/mjsunit/array-iteration.js2
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js2
-rw-r--r--deps/v8/test/mjsunit/assert-opt-and-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/alloc-object-huge.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-accessors.js368
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arguments.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-construct.js98
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-literals.js39
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-closures.js57
-rw-r--r--deps/v8/test/mjsunit/compiler/uint32.js173
-rw-r--r--deps/v8/test/mjsunit/count-based-osr.js3
-rw-r--r--deps/v8/test/mjsunit/date.js22
-rw-r--r--deps/v8/test/mjsunit/debug-break-inline.js1
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js34
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js31
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-double-call.js142
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-restart-frame.js153
-rw-r--r--deps/v8/test/mjsunit/debug-script-breakpoints-closure.js67
-rw-r--r--deps/v8/test/mjsunit/debug-script-breakpoints-nested.js82
-rw-r--r--deps/v8/test/mjsunit/debug-script.js2
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js2
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js1
-rw-r--r--deps/v8/test/mjsunit/eval-stack-trace.js203
-rw-r--r--deps/v8/test/mjsunit/external-array.js293
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/block-conflicts.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-crankshaft.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/collections.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/module-linking.js183
-rw-r--r--deps/v8/test/mjsunit/harmony/module-parsing.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/module-recompile.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/module-resolution.js17
-rw-r--r--deps/v8/test/mjsunit/math-floor-negative.js59
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js40
-rw-r--r--deps/v8/test/mjsunit/mirror-object.js52
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status44
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js109
-rw-r--r--deps/v8/test/mjsunit/packed-elements.js4
-rw-r--r--deps/v8/test/mjsunit/parse-int-float.js8
-rw-r--r--deps/v8/test/mjsunit/pixel-array-rounding.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-global.js113
-rw-r--r--deps/v8/test/mjsunit/regexp-results-cache.js78
-rw-r--r--deps/v8/test/mjsunit/regress-2286.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1118.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-131994.js70
-rw-r--r--deps/v8/test/mjsunit/regress/regress-136048.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-137768.js73
-rw-r--r--deps/v8/test/mjsunit/regress/regress-148378.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1563.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1591.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2119.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2172.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2185-2.js145
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2185.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2186.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2193.js58
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2219.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2225.js65
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2226.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2234.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2249.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2250.js68
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2284.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2285.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2289.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2294.js70
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2296.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-125148.js72
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-134609.js59
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-135008.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-135066.js53
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-137689.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-138887.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-140083.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-142087.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-142218.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-145961.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-147475.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-148376.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-load-elements.js49
-rw-r--r--deps/v8/test/mjsunit/str-to-num.js9
-rw-r--r--deps/v8/test/mjsunit/string-charcodeat.js3
-rw-r--r--deps/v8/test/mjsunit/testcfg.py6
-rw-r--r--deps/v8/test/mjsunit/typed-array-slice.js61
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js2
-rw-r--r--deps/v8/test/mozilla/mozilla.status95
-rw-r--r--deps/v8/test/preparser/strict-identifiers.pyt2
-rw-r--r--deps/v8/test/sputnik/sputnik.status2
-rw-r--r--deps/v8/test/test262/test262.status28
-rw-r--r--deps/v8/tools/android-build.sh0
-rw-r--r--deps/v8/tools/android-ll-prof.sh69
-rw-r--r--deps/v8/tools/android-run.py109
-rw-r--r--deps/v8/tools/android-sync.sh105
-rwxr-xr-xdeps/v8/tools/grokdump.py96
-rw-r--r--deps/v8/tools/gyp/v8.gyp74
-rwxr-xr-xdeps/v8/tools/linux-tick-processor10
-rwxr-xr-xdeps/v8/tools/ll_prof.py27
-rwxr-xr-xdeps/v8/tools/push-to-trunk.sh17
-rwxr-xr-xdeps/v8/tools/run-valgrind.py56
-rwxr-xr-xdeps/v8/tools/test-wrapper-gypbuild.py21
-rwxr-xr-xdeps/v8/tools/test.py10
-rw-r--r--deps/v8/tools/tickprocessor-driver.js4
-rw-r--r--deps/v8/tools/tickprocessor.js18
380 files changed, 27561 insertions, 11842 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 088daeabf9..77f38dd9c6 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -9,9 +9,11 @@
*.pdb
*.pyc
*.scons*
+*.sln
*.so
*.suo
*.user
+*.vcproj
*.xcodeproj
#*#
*~
@@ -20,13 +22,16 @@ d8
d8_g
shell
shell_g
+/build/Debug
/build/gyp
+/build/Release
/obj/
/out/
/test/es5conform/data
/test/mozilla/data
/test/sputnik/sputniktests
/test/test262/data
+/third_party
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 6e46b3d621..1156d94958 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -24,9 +24,11 @@ Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
+Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
+James Pike <g00gle@chilon.net>
Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Jonathan Liu <net147@gmail.com>
@@ -46,6 +48,7 @@ Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
+Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index fae15e58ee..7110aa83e3 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,330 @@
+2012-09-11: Version 3.13.7
+
+ Enable/disable LiveEdit using the (C++) debug API.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-09-06: Version 3.13.6
+
+ Added validity checking to API functions and calls.
+
+ Disabled accessor inlining (Chromium issue 134609).
+
+ Fixed bug in Math.min/max in optimized code (Chromium issue 145961).
+
+ Directly use %ObjectKeys in json stringify (Chromium issue 2312).
+
+ Fixed VS2005 build (issue 2313).
+
+ Activated fixed ES5 readonly semantics by default.
+
+ Added hardfp flag to the Makefile.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-29: Version 3.13.5
+
+ Release stack trace data after firing Error.stack accessor.
+ (issue 2308)
+
+ Added a new API V8::SetJitCodeEventHandler to push code name and
+ location to users such as profilers.
+
+ Allocate block-scoped global bindings to global context.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-28: Version 3.13.4
+
+ Print reason for disabling optimization. Kill --trace-bailout flag.
+
+ Provided option to disable full DEBUG build on Android.
+
+ Introduced global contexts to represent lexical global scope(s).
+
+ Fixed rounding in Uint8ClampedArray setter. (issue 2294)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-21: Version 3.13.3
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-20: Version 3.13.2
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-16: Version 3.13.1
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-10: Version 3.13.0
+
+ Added histograms for total allocated/live heap size, as well as
+ allocated size and percentage of total for map and cell space.
+
+ Fixed parseInt's octal parsing behavior (ECMA-262 Annex E 15.1.2.2).
+ (issue 1645)
+
+ Added checks for interceptors to negative lookup code in Crankshaft.
+ (Chromium issue 140473)
+
+ Made incremental marking clear ICs and type feedback cells.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-08-01: Version 3.12.19
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-30: Version 3.12.18
+
+ Forced using bit-pattern for signed zero double. (issue 2239)
+
+ Made sure double to int conversion is correct. (issue 2260)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-27: Version 3.12.17
+
+ Always set the callee's context when calling a function from optimized
+ code.
+ (Chromium issue 138887)
+
+ Fixed building with GCC 3.x
+ (issue 2016, 2017)
+
+ Improved API calls that return empty handles.
+ (issue 2245)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-25: Version 3.12.16
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-24: Version 3.12.15
+
+ Added PRESERVE_ASCII_NULL option to String::WriteAscii.
+ (issue 2252)
+
+ Added dependency to HLoadKeyed* instructions to prevent invalid
+ hoisting. (Chromium issue 137768)
+
+ Enabled building d8 for Android on Mac.
+
+ Interpret negative hexadecimal literals as NaN.
+ (issue 2240)
+
+ Expose counters in javascript when using --track-gc-object-stats.
+
+ Enabled building and testing V8 on Android IA.
+
+ Added --trace-parse flag to parser.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-18: Version 3.12.14
+
+ Deactivated optimization of packed arrays.
+ (Chromium issue 137768)
+
+ Fixed broken accessor transition.
+ (Chromium issue 137689)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-17: Version 3.12.13
+
+ Fixed missing tagging of stack value in finally block.
+ (Chromium issue 137496)
+
+ Added more support for heap analysis.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-16: Version 3.12.12
+
+ Added an option to the tickprocessor to specify the directory for lib
+ lookup.
+
+ Fixed ICs for slow objects with native accessor (Chromium issue 137002).
+
+ Fixed transcendental cache on ARM in optimized code (issue 2234).
+
+ New heap inspection tools: counters for object sizes and counts,
+ histograms for external fragmentation.
+
+ Incorporated constness into inferred interfaces (in preparation for
+ handling imports) (issue 1569).
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-12: Version 3.12.11
+
+ Renamed "mips" arch to "mipsel" in the GYP build.
+
+ Fixed computation of call targets on prototypes in Crankshaft.
+ (Chromium issue 125148)
+
+ Removed use of __lookupGetter__ when generating stack trace.
+ (issue 1591)
+
+ Turned on ES 5.2 globals semantics by default.
+ (issue 1991, Chromium issue 80591)
+
+ Synced preparser and parser wrt syntax error in switch..case.
+ (issue 2210)
+
+ Fixed reporting of octal literals in strict mode when preparsing.
+ (issue 2220)
+
+ Fixed inline constructors for Harmony Proxy prototypes.
+ (issue 2225)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-10: Version 3.12.10
+
+ Re-enabled and fixed issue with array bounds check elimination
+ (Chromium issue 132114).
+
+ Fixed Debug::Break crash. (Chromium issue 131642)
+
+ Added optimizing compiler support for JavaScript getters.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-06: Version 3.12.9
+
+ Correctly advance the scanner when scanning unicode regexp flag.
+ (Chromium issue 136084)
+
+ Fixed unhandlified code calling Harmony Proxy traps.
+ (issue 2219)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-05: Version 3.12.8
+
+ Implemented TypedArray.set and ArrayBuffer.slice in d8.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-07-03: Version 3.12.7
+
+ Fixed lazy compilation for strict eval scopes.
+ (Chromium issue 135066)
+
+ Made MACOSX_DEPLOYMENT_TARGET configurable in GYP.
+ (issue 2151)
+
+ Report "hidden properties" in heap profiler for properties case.
+ (issue 2212)
+
+ Activated optimization of packed arrays by default.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-06-29: Version 3.12.6
+
+ Cleaned up hardfp ABI detection for ARM (V8 issue 2140).
+
+ Extended TypedArray support in d8.
+
+
+2012-06-28: Version 3.12.5
+
+ Fixed lazy parsing heuristics to respect outer scope.
+ (Chromium issue 135008)
+
+ Allow using test-wrapper-gypbuild.py on Windows when no python
+ interpreter is registered.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-06-27: Version 3.12.4
+
+ Removed -fomit-frame-pointer flag from Release builds to make
+ the stack walkable by TCMalloc (Chromium issue 133723).
+
+ Ported r7868 (constant masking) to x64 (issue 1374).
+
+ Expose more detailed memory statistics (issue 2201).
+
+ Fixed Harmony Maps and WeakMaps for undefined values
+ (Chromium issue 132744).
+
+ Correctly throw reference error in strict mode with ICs disabled
+ (issue 2119).
+
+ Performance and stability improvements on all platforms.
+
+
+2012-06-25: Version 3.12.3
+
+ Reverted r11835 'Unify promotion and allocation limit computation' due
+ to V8 Splay performance regression on Mac. (Chromium issue 134183)
+
+ Fixed sharing of literal boilerplates for optimized code. (issue 2193)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-06-22: Version 3.12.2
+
+ Made near-jump check more strict in LoadNamedFieldPolymorphic on
+ ia32/x64. (Chromium issue 134055)
+
+ Fixed lazy sweeping heuristics to prevent old-space expansion.
+ (issue 2194)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-06-21: Version 3.12.1
+
+ Performance and stability improvements on all platforms.
+
+
+2012-06-20: Version 3.12.0
+
+ Fixed Chromium issues:
+ 115100, 129628, 131994, 132727, 132741, 132742, 133211
+
+ Fixed V8 issues:
+ 915, 1914, 2034, 2087, 2094, 2134, 2156, 2166, 2172, 2177, 2179, 2185
+
+ Added --extra-code flag to mksnapshot to load JS code into the VM
+ before creating the snapshot.
+
+ Support 'restart call frame' command in the debugger.
+
+ Performance and stability improvements on all platforms.
+
+
2012-06-13: Version 3.11.10
Implemented heap profiler memory usage reporting.
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 0d825c0795..8db3193860 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -34,7 +34,8 @@ TESTJOBS ?= -j16
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
-ANDROID_TOOL_PREFIX = $(ANDROID_NDK_ROOT)/toolchain/bin/arm-linux-androideabi
+ANDROID_TOOLCHAIN ?=
+ANDROID_V8 ?= /data/local/v8
# Special build flags. Use them like this: "make library=shared"
@@ -61,6 +62,13 @@ endif
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
endif
+# extrachecks=on/off
+ifeq ($(extrachecks), on)
+ GYPFLAGS += -Dv8_enable_extra_checks=1
+endif
+ifeq ($(extrachecks), off)
+ GYPFLAGS += -Dv8_enable_extra_checks=0
+endif
# gdbjit=on
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
@@ -95,6 +103,14 @@ endif
ifeq ($(strictaliasing), off)
GYPFLAGS += -Dv8_no_strict_aliasing=1
endif
+# regexp=interpreted
+ifeq ($(regexp), interpreted)
+ GYPFLAGS += -Dv8_interpreted_regexp=1
+endif
+# hardfp=on
+ifeq ($(hardfp), on)
+ GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
+endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
@@ -103,7 +119,7 @@ endif
# - every combination <arch>.<mode>, e.g. "ia32.release"
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
-# - "android": cross-compile for Android/ARM (release mode)
+# - "android": cross-compile for Android/ARM
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
@@ -113,9 +129,10 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm mips
+ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
+ANDROID_ARCHES = android_ia32 android_arm
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
@@ -124,15 +141,19 @@ GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
+ANDROID_BUILDS = $(foreach mode,$(MODES), \
+ $(addsuffix .$(mode),$(ANDROID_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
+ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
- must-set-ANDROID_NDK_ROOT
+ $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
+ must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN
# Target definitions. "all" is the default.
all: $(MODES)
@@ -143,6 +164,10 @@ buildbot:
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
+mips mips.release mips.debug:
+ @echo "V8 does not support big-endian MIPS builds at the moment," \
+ "please use little-endian builds (mipsel)."
+
# Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@@ -162,17 +187,15 @@ native: $(OUTDIR)/Makefile.native
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
-# TODO(jkummerow): add "android.debug" when we need it.
-android android.release: $(OUTDIR)/Makefile.android
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.android \
- CXX="$(ANDROID_TOOL_PREFIX)-g++" \
- AR="$(ANDROID_TOOL_PREFIX)-ar" \
- RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
- CC="$(ANDROID_TOOL_PREFIX)-gcc" \
- LD="$(ANDROID_TOOL_PREFIX)-ld" \
- LINK="$(ANDROID_TOOL_PREFIX)-g++" \
- BUILDTYPE=Release \
- builddir="$(shell pwd)/$(OUTDIR)/android.release"
+$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
+
+$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
+ must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN Makefile.android
+ @$(MAKE) -f Makefile.android $@ \
+ ARCH="$(basename $@)" \
+ MODE="$(subst .,,$(suffix $@))" \
+ OUTDIR="$(OUTDIR)" \
+ GYPFLAGS="$(GYPFLAGS)"
# Test targets.
check: all
@@ -192,12 +215,25 @@ $(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
+$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
+ @tools/android-sync.sh $(basename $@) $(OUTDIR) \
+ $(shell pwd) $(ANDROID_V8)
+
+$(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
+ @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch-and-mode=$(basename $@) \
+ --timeout=600 \
+ --special-command="tools/android-run.py @"
+
+$(addsuffix .check, $(ANDROID_ARCHES)): \
+ $(addprefix $$(basename $$@).,$(MODES)).check
+
native.check: native
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
-$(addsuffix .clean,$(ARCHES)) android.clean:
+$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
@@ -208,11 +244,11 @@ native.clean:
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
-clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean
+clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean
# GYP file generation targets.
-MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
-$(MAKEFILES): $(GYPFILES) $(ENVFILE)
+OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
+$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
@@ -224,18 +260,11 @@ $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
-$(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
- must-set-ANDROID_NDK_ROOT
- GYP_GENERATORS=make \
- CC="${ANDROID_TOOL_PREFIX}-gcc" \
- CXX="${ANDROID_TOOL_PREFIX}-g++" \
- build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
- -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
- -S.android $(GYPFLAGS)
-
-must-set-ANDROID_NDK_ROOT:
+must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
ifndef ANDROID_NDK_ROOT
- $(error ANDROID_NDK_ROOT is not set)
+ifndef ANDROID_TOOLCHAIN
+ $(error ANDROID_NDK_ROOT or ANDROID_TOOLCHAIN must be set))
+endif
endif
# Replaces the old with the new environment file if they're different, which
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
new file mode 100644
index 0000000000..a8d7fe148e
--- /dev/null
+++ b/deps/v8/Makefile.android
@@ -0,0 +1,92 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Those definitions should be consistent with the main Makefile
+ANDROID_ARCHES = android_ia32 android_arm
+MODES = release debug
+
+# Generates all combinations of ANDROID ARCHES and MODES,
+# e.g. "android_ia32.release" or "android_arm.release"
+ANDROID_BUILDS = $(foreach mode,$(MODES), \
+ $(addsuffix .$(mode),$(ANDROID_ARCHES)))
+
+HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
+ifeq ($(HOST_OS), linux)
+ TOOLCHAIN_DIR = linux-x86
+else
+ ifeq ($(HOST_OS), mac)
+ TOOLCHAIN_DIR = darwin-x86
+ else
+ $(error Host platform "${HOST_OS}" is not supported)
+ endif
+endif
+
+ifeq ($(ARCH), android_arm)
+ DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
+ DEFINES += arm_neon=0 armv7=1
+ TOOLCHAIN_ARCH = arm-linux-androideabi-4.4.3
+else
+ ifeq ($(ARCH), android_ia32)
+ DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
+ TOOLCHAIN_ARCH = x86-4.4.3
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
+endif
+
+TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
+ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
+ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
+ $(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}")
+endif
+
+# For mksnapshot host generation.
+DEFINES += host_os=${HOST_OS}
+
+.SECONDEXPANSION:
+$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
+ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
+ CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
+ AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
+ RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
+ CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
+ LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
+ LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
+ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
+ python -c "print raw_input().capitalize()") \
+ builddir="$(shell pwd)/$(OUTDIR)/$@"
+
+# Android GYP file generation targets.
+ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_ARCHES))
+$(ANDROID_MAKEFILES):
+ @GYP_GENERATORS=make-android \
+ GYP_DEFINES="${DEFINES}" \
+ CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
+ CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
+ build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
+ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
+ -S.${ARCH} ${GYPFLAGS}
diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi
index ffd06484f7..d2d1a35726 100644
--- a/deps/v8/build/android.gypi
+++ b/deps/v8/build/android.gypi
@@ -33,35 +33,40 @@
'variables': {
# Location of Android NDK.
'variables': {
- 'variables': {
- 'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
- 'android_target_arch%': 'arm', # target_arch in android terms.
-
- # Switch between different build types, currently only '0' is
- # supported.
- 'android_build_type%': 0,
- },
- 'android_ndk_root%': '<(android_ndk_root)',
- 'android_ndk_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)',
- 'android_build_type%': '<(android_build_type)',
+ 'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
+ 'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
+ # Switch between different build types, currently only '0' is
+ # supported.
+ 'android_build_type%': 0,
},
- 'android_ndk_root%': '<(android_ndk_root)',
- 'android_ndk_sysroot': '<(android_ndk_sysroot)',
- 'android_ndk_include': '<(android_ndk_sysroot)/usr/include',
- 'android_ndk_lib': '<(android_ndk_sysroot)/usr/lib',
+ 'conditions': [
+ ['android_ndk_root==""', {
+ 'variables': {
+ 'android_sysroot': '<(android_toolchain)/sysroot/',
+ 'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
+ },
+ 'android_include': '<(android_sysroot)/usr/include',
+ 'android_lib': '<(android_sysroot)/usr/lib',
+ 'android_stlport_include': '<(android_stlport)/stlport',
+ 'android_stlport_libs': '<(android_stlport)/libs',
+ }, {
+ 'variables': {
+ 'android_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)',
+ 'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
+ },
+ 'android_include': '<(android_sysroot)/usr/include',
+ 'android_lib': '<(android_sysroot)/usr/lib',
+ 'android_stlport_include': '<(android_stlport)/stlport',
+ 'android_stlport_libs': '<(android_stlport)/libs',
+ }],
+ ],
# Enable to use the system stlport, otherwise statically
# link the NDK one?
'use_system_stlport%': '<(android_build_type)',
'android_stlport_library': 'stlport_static',
# Copy it out one scope.
'android_build_type%': '<(android_build_type)',
-
'OS': 'android',
- 'target_arch': 'arm',
- 'v8_target_arch': 'arm',
- 'armv7': 1,
- 'arm_neon': 0,
- 'arm_fpu': 'vfpv3',
}, # variables
'target_defaults': {
'defines': [
@@ -100,10 +105,7 @@
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
- '-I<(android_ndk_include)',
- '-march=armv7-a',
- '-mtune=cortex-a8',
- '-mfpu=vfp3',
+ '-I<(android_include)',
],
'defines': [
'ANDROID',
@@ -120,7 +122,6 @@
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
- '-Wl,--icf=safe', # Enable identical code folding to reduce size
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
@@ -144,8 +145,21 @@
'conditions': [
['android_build_type==0', {
'ldflags': [
- '-Wl,-rpath-link=<(android_ndk_lib)',
- '-L<(android_ndk_lib)',
+ '-Wl,-rpath-link=<(android_lib)',
+ '-L<(android_lib)',
+ ],
+ }],
+ ['target_arch == "arm"', {
+ 'ldflags': [
+ # Enable identical code folding to reduce size.
+ '-Wl,--icf=safe',
+ ],
+ }],
+ ['target_arch=="arm" and armv7==1', {
+ 'cflags': [
+ '-march=armv7-a',
+ '-mtune=cortex-a8',
+ '-mfpu=vfp3',
],
}],
# NOTE: The stlport header include paths below are specified in
@@ -156,22 +170,22 @@
# The include ordering here is important; change with caution.
['use_system_stlport==0', {
'cflags': [
- '-I<(android_ndk_root)/sources/cxx-stl/stlport/stlport',
+ '-I<(android_stlport_include)',
],
'conditions': [
['target_arch=="arm" and armv7==1', {
'ldflags': [
- '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi-v7a',
+ '-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and armv7==0', {
'ldflags': [
- '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi',
+ '-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="ia32"', {
'ldflags': [
- '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/x86',
+ '-L<(android_stlport_libs)/x86',
],
}],
],
@@ -194,12 +208,12 @@
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
- '<(android_ndk_lib)/crtbegin_dynamic.o',
+ '<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
- '<(android_ndk_lib)/crtend_android.o',
+ '<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
@@ -222,4 +236,4 @@
}],
], # target_conditions
}, # target_defaults
-} \ No newline at end of file
+}
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 1609197876..fdf4d9c307 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -48,7 +48,8 @@
# both for the snapshot and for the ARM target. Leaving the default value
# of 'false' will avoid VFP instructions in the snapshot and use CPU feature
# probing when running on the target.
- 'v8_can_use_vfp_instructions%': 'false',
+ 'v8_can_use_vfp2_instructions%': 'false',
+ 'v8_can_use_vfp3_instructions%': 'false',
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
@@ -69,6 +70,9 @@
'v8_enable_disassembler%': 0,
+ # Enable extra checks in API functions and other strategic places.
+ 'v8_enable_extra_checks%': 1,
+
'v8_object_print%': 0,
'v8_enable_gdbjit%': 0,
@@ -95,6 +99,10 @@
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
+
+ # Interpreted regexp engine exists as platform-independent alternative
+ # based where the regular expression is compiled to a bytecode.
+ 'v8_interpreted_regexp%': 0,
},
'target_defaults': {
'conditions': [
@@ -104,12 +112,18 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
+ ['v8_enable_extra_checks==1', {
+ 'defines': ['ENABLE_EXTRA_CHECKS',],
+ }],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
+ ['v8_interpreted_regexp==1', {
+ 'defines': ['V8_INTERPRETED_REGEXP',],
+ }],
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@@ -125,9 +139,14 @@
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
- [ 'v8_can_use_vfp_instructions=="true"', {
+ [ 'v8_can_use_vfp2_instructions=="true"', {
'defines': [
- 'CAN_USE_VFP_INSTRUCTIONS',
+ 'CAN_USE_VFP2_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_can_use_vfp3_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'v8_use_arm_eabi_hardfloat=="true"', {
@@ -152,12 +171,12 @@
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
- ['v8_target_arch=="mips"', {
+ ['v8_target_arch=="mipsel"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
- 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
+ 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
@@ -207,7 +226,7 @@
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
],
- }], # v8_target_arch=="mips"
+ }], # v8_target_arch=="mipsel"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
@@ -220,6 +239,7 @@
'StackReserveSize': '2097152',
},
},
+ 'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
['v8_use_liveobjectlist=="true"', {
'defines': [
@@ -239,6 +259,7 @@
'WIN32',
],
'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
@@ -264,7 +285,7 @@
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
- v8_target_arch=="mips")', {
+ v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
@@ -323,19 +344,36 @@
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
- 'cflags': [ '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
+ ['OS=="android"', {
+ 'variables': {
+ 'android_full_debug%': 1,
+ },
+ 'conditions': [
+ ['android_full_debug==0', {
+ # Disable full debug if we want a faster v8 in a debug build.
+ # TODO(2304): pass DISABLE_DEBUG_ASSERT instead of hiding DEBUG.
+ 'defines!': [
+ 'DEBUG',
+ ],
+ }],
+ ],
+ }],
],
}, # Debug
'Release': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
+ 'cflags!': [
+ '-O2',
+ '-Os',
+ ],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
- '-fomit-frame-pointer',
'-O3',
],
'conditions': [
@@ -365,15 +403,17 @@
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
- 'OmitFramePointers': 'true',
'StringPooling': 'true',
-
'conditions': [
['OS=="win" and component=="shared_library"', {
'RuntimeLibrary': '2', #/MD
}, {
'RuntimeLibrary': '0', #/MT
}],
+ ['v8_target_arch=="x64"', {
+ # TODO(2207): remove this option once the bug is fixed.
+ 'WholeProgramOptimization': 'true',
+ }],
],
},
'VCLinkerTool': {
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index ebdf557230..7145a16e0c 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -33,6 +33,7 @@
'component%': 'static_library',
'visibility%': 'hidden',
'msvs_multi_core_compile%': '1',
+ 'mac_deployment_target%': '10.5',
'variables': {
'variables': {
'variables': {
@@ -45,7 +46,7 @@
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
- s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
+ s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@@ -66,8 +67,9 @@
'werror%': '-Werror',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
- (v8_target_arch=="mips" and host_arch!="mips") or \
- (v8_target_arch=="x64" and host_arch!="x64")', {
+ (v8_target_arch=="mipsel" and host_arch!="mipsel") or \
+ (v8_target_arch=="x64" and host_arch!="x64") or \
+ (OS=="android")', {
'want_separate_host_toolset': 1,
}, {
'want_separate_host_toolset': 0,
@@ -191,7 +193,8 @@
'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
'GCC_VERSION': '4.2',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
- 'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
+ # MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
+ 'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)',
'PREBINDING': 'NO', # No -Wl,-prebind
'SYMROOT': '<(DEPTH)/xcodebuild',
'USE_HEADERMAP': 'NO',
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 9e85dc462c..f432de0be8 100755
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -321,7 +321,7 @@ class EXPORT Debug {
* \endcode
*/
static Local<Value> Call(v8::Handle<v8::Function> fun,
- Handle<Value> data = Handle<Value>());
+ Handle<Value> data = Handle<Value>());
/**
* Returns a mirror object for the given object.
@@ -388,6 +388,14 @@ class EXPORT Debug {
* to change.
*/
static Local<Context> GetDebugContext();
+
+
+ /**
+ * Enable/disable LiveEdit functionality for the given Isolate
+ * (default Isolate if not provided). V8 will abort if LiveEdit is
+ * unexpectedly used. LiveEdit is enabled by default.
+ */
+ static void SetLiveEditEnabled(bool enable, Isolate* isolate = NULL);
};
diff --git a/deps/v8/include/v8-preparser.h b/deps/v8/include/v8-preparser.h
index f11d05ef79..389949d200 100644
--- a/deps/v8/include/v8-preparser.h
+++ b/deps/v8/include/v8-preparser.h
@@ -55,11 +55,12 @@
// Setup for Linux shared library export. There is no need to distinguish
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
+ (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
+#else
#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
+#endif
#endif // _WIN32
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index cda2463362..c1e9a9e0b8 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -50,11 +50,12 @@
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
+ (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
+#else
#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
+#endif
#endif // _WIN32
@@ -280,32 +281,12 @@ class V8EXPORT HeapGraphNode {
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
- /**
- * Returns node's retained size, in bytes. That is, self + sizes of
- * the objects that are reachable only from this object. In other
- * words, the size of memory that will be reclaimed having this node
- * collected.
- */
- int GetRetainedSize() const;
-
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
/** Retrieves a child by index. */
const HeapGraphEdge* GetChild(int index) const;
- /** Returns retainer nodes count of the node. */
- int GetRetainersCount() const;
-
- /** Returns a retainer by index. */
- const HeapGraphEdge* GetRetainer(int index) const;
-
- /**
- * Returns a dominator node. This is the node that participates in every
- * path from the snapshot root to the current node.
- */
- const HeapGraphNode* GetDominatorNode() const;
-
/**
* Finds and returns a value from the heap corresponding to this node,
* if the value is still reachable.
diff --git a/deps/v8/include/v8-testing.h b/deps/v8/include/v8-testing.h
index 245f74d878..59eebf9db4 100644
--- a/deps/v8/include/v8-testing.h
+++ b/deps/v8/include/v8-testing.h
@@ -50,11 +50,12 @@
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
+ (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
+#else
#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
+#endif
#endif // _WIN32
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 77ffb385ab..ddde388cd4 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -63,15 +63,16 @@
#else // _WIN32
// Setup for Linux shared library export.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
+ (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
+#else
#define V8EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
+#endif
#endif // _WIN32
@@ -1064,7 +1065,8 @@ class String : public Primitive {
enum WriteOptions {
NO_OPTIONS = 0,
HINT_MANY_WRITES_EXPECTED = 1,
- NO_NULL_TERMINATION = 2
+ NO_NULL_TERMINATION = 2,
+ PRESERVE_ASCII_NULL = 4
};
// 16-bit character codes.
@@ -1551,6 +1553,12 @@ class Object : public Value {
V8EXPORT Local<String> ObjectProtoToString();
/**
+ * Returns the function invoked as a constructor for this object.
+ * May be the null value.
+ */
+ V8EXPORT Local<Value> GetConstructor();
+
+ /**
* Returns the name of the function invoked as a constructor for this object.
*/
V8EXPORT Local<String> GetConstructorName();
@@ -2909,16 +2917,85 @@ typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
- * When invoked, return_addr_location will point to a location on stack where
- * a machine return address resides, this function should return either the
- * same pointer, or a pointer to the profiler's copy of the original return
- * address.
+ *
+ * \param return_addr_location points to a location on stack where a machine
+ * return address resides.
+ * \returns either return_addr_location, or else a pointer to the profiler's
+ * copy of the original return address.
+ *
+ * \note the resolver function must not cause garbage collection.
*/
typedef uintptr_t (*ReturnAddressLocationResolver)(
uintptr_t return_addr_location);
/**
+ * FunctionEntryHook is the type of the profile entry hook called at entry to
+ * any generated function when function-level profiling is enabled.
+ *
+ * \param function the address of the function that's being entered.
+ * \param return_addr_location points to a location on stack where the machine
+ * return address resides. This can be used to identify the caller of
+ * \p function, and/or modified to divert execution when \p function exits.
+ *
+ * \note the entry hook must not cause garbage collection.
+ */
+typedef void (*FunctionEntryHook)(uintptr_t function,
+ uintptr_t return_addr_location);
+
+
+/**
+ * A JIT code event is issued each time code is added, moved or removed.
+ *
+ * \note removal events are not currently issued.
+ */
+struct JitCodeEvent {
+ enum EventType {
+ CODE_ADDED,
+ CODE_MOVED,
+ CODE_REMOVED
+ };
+
+ // Type of event.
+ EventType type;
+ // Start of the instructions.
+ void* code_start;
+ // Size of the instructions.
+ size_t code_len;
+
+ union {
+ // Only valid for CODE_ADDED.
+ struct {
+ // Name of the object associated with the code, note that the string is
+ // not zero-terminated.
+ const char* str;
+ // Number of chars in str.
+ size_t len;
+ } name;
+ // New location of instructions. Only valid for CODE_MOVED.
+ void* new_code_start;
+ };
+};
+
+/**
+ * Option flags passed to the SetJitCodeEventHandler function.
+ */
+enum JitCodeEventOptions {
+ kJitCodeEventDefault = 0,
+ // Generate callbacks for already existent code.
+ kJitCodeEventEnumExisting = 1
+};
+
+
+/**
+ * Callback function passed to SetJitCodeEventHandler.
+ *
+ * \param event code add, move or removal event.
+ */
+typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
+
+
+/**
* Interface for iterating though all external resources in the heap.
*/
class V8EXPORT ExternalResourceVisitor { // NOLINT
@@ -3179,6 +3256,43 @@ class V8EXPORT V8 {
ReturnAddressLocationResolver return_address_resolver);
/**
+ * Allows the host application to provide the address of a function that's
+ * invoked on entry to every V8-generated function.
+ * Note that \p entry_hook is invoked at the very start of each
+ * generated function.
+ *
+ * \param entry_hook a function that will be invoked on entry to every
+ * V8-generated function.
+ * \returns true on success on supported platforms, false on failure.
+ * \note Setting a new entry hook function when one is already active will
+ * fail.
+ */
+ static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
+
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ *
+ * \param options options for the JIT code event handler.
+ * \param event_handler the JIT code event handler, which will be invoked
+ * each time code is added, moved or removed.
+ * \note \p event_handler won't get notified of existent code.
+ * \note since code removal notifications are not currently issued, the
+ * \p event_handler may get notifications of code that overlaps earlier
+ * code notifications. This happens when code areas are reused, and the
+ * earlier overlapping code areas should therefore be discarded.
+ * \note the events passed to \p event_handler and the strings they point to
+ * are not guaranteed to live past each call. The \p event_handler must
+ * copy strings and other parameters it needs to keep around.
+ * \note the set of events declared in JitCodeEvent::EventType is expected to
+ * grow over time, and the JitCodeEvent structure is expected to accrue
+ * new members. The \p event_handler function must ignore event codes
+ * it does not recognize to maintain future compatibility.
+ */
+ static void SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler);
+
+ /**
* Adjusts the amount of registered external memory. Used to give
* V8 an indication of the amount of externally allocated memory
* that is kept alive by JavaScript objects. V8 uses this to decide
@@ -3928,7 +4042,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptySymbolRootIndex = 128;
+ static const int kEmptySymbolRootIndex = 116;
static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80;
diff --git a/deps/v8/preparser/preparser-process.cc b/deps/v8/preparser/preparser-process.cc
index 368f63f6ce..1bcc804923 100644
--- a/deps/v8/preparser/preparser-process.cc
+++ b/deps/v8/preparser/preparser-process.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -202,7 +202,7 @@ void fail(v8::PreParserData* data, const char* message, ...) {
fflush(stderr);
if (data != NULL) {
// Print preparser data to stdout.
- uint32_t size = data->size();
+ uint32_t size = static_cast<uint32_t>(data->size());
fprintf(stderr, "LOG: data size: %u\n", size);
if (!WriteBuffer(stdout, data->data(), size)) {
perror("ERROR: Writing data");
@@ -232,7 +232,7 @@ struct ExceptionExpectation {
void CheckException(v8::PreParserData* data,
ExceptionExpectation* expects) {
- PreparseDataInterpreter reader(data->data(), data->size());
+ PreparseDataInterpreter reader(data->data(), static_cast<int>(data->size()));
if (expects->throws) {
if (!reader.throws()) {
if (expects->type == NULL) {
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 7a84a2a0ff..26e787f2b7 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -25,19 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This controls whether this sample is compiled with debugger support.
-// You may trace its usages in source text to see what parts of program
-// are responsible for debugging support.
-// Note that V8 itself should be compiled with enabled debugger support
-// to have it all working.
-#define SUPPORT_DEBUGGING
-
#include <v8.h>
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
#include <v8-debug.h>
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
#include <fcntl.h>
#include <string.h>
@@ -116,7 +108,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions);
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Persistent<v8::Context> debug_message_context;
void DispatchDebugMessages() {
@@ -135,7 +127,7 @@ void DispatchDebugMessages() {
v8::Debug::ProcessDebugMessages();
}
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
int RunMain(int argc, char* argv[]) {
@@ -146,11 +138,11 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::Value> script_name(NULL);
int script_param_counter = 0;
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
int port_number = -1;
bool wait_for_connection = false;
bool support_callback = false;
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
MainCycleType cycle_type = CycleInCpp;
@@ -164,7 +156,7 @@ int RunMain(int argc, char* argv[]) {
cycle_type = CycleInCpp;
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (strcmp(str, "--callback") == 0) {
support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) {
@@ -172,7 +164,7 @@ int RunMain(int argc, char* argv[]) {
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
port_number = atoi(argv[i + 1]); // NOLINT
i++;
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@@ -219,7 +211,7 @@ int RunMain(int argc, char* argv[]) {
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
debug_message_context = v8::Persistent<v8::Context>::New(context);
v8::Locker locker;
@@ -231,7 +223,7 @@ int RunMain(int argc, char* argv[]) {
if (port_number != -1) {
v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
}
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
bool report_exceptions = true;
@@ -272,9 +264,9 @@ int RunMain(int argc, char* argv[]) {
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions) {
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Locker lock;
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
v8::Handle<v8::Value> process_val =
@@ -347,7 +339,7 @@ v8::Handle<v8::String> ReadFile(const char* name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
@@ -427,9 +419,9 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
-#ifdef SUPPORT_DEBUGGING
+#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Unlocker unlocker;
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}
if (res == NULL) {
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index c0cee4c28e..ae6a5500cd 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -351,7 +351,7 @@ Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
- return String::New(value.c_str(), value.length());
+ return String::New(value.c_str(), static_cast<int>(value.length()));
}
@@ -443,7 +443,7 @@ Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
- return String::New(path.c_str(), path.length());
+ return String::New(path.c_str(), static_cast<int>(path.length()));
}
@@ -451,7 +451,7 @@ Handle<Value> JsHttpRequestProcessor::GetReferrer(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
- return String::New(path.c_str(), path.length());
+ return String::New(path.c_str(), static_cast<int>(path.length()));
}
@@ -459,7 +459,7 @@ Handle<Value> JsHttpRequestProcessor::GetHost(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
- return String::New(path.c_str(), path.length());
+ return String::New(path.c_str(), static_cast<int>(path.length()));
}
@@ -467,7 +467,7 @@ Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
- return String::New(path.c_str(), path.length());
+ return String::New(path.c_str(), static_cast<int>(path.length()));
}
@@ -557,7 +557,7 @@ Handle<String> ReadFile(const string& name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index db0cc1a930..821ef75a76 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -205,7 +205,7 @@ v8::Handle<v8::String> ReadFile(const char* name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 2482b379ac..16bfb55b38 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -43,8 +43,8 @@ SOURCES = {
assembler.cc
ast.cc
atomicops_internals_x86_gcc.cc
- bignum.cc
bignum-dtoa.cc
+ bignum.cc
bootstrapper.cc
builtins.cc
cached-powers.cc
@@ -67,27 +67,30 @@ SOURCES = {
disassembler.cc
diy-fp.cc
dtoa.cc
- elements.cc
elements-kind.cc
+ elements.cc
execution.cc
+ extensions/externalize-string-extension.cc
+ extensions/gc-extension.cc
+ extensions/statistics-extension.cc
factory.cc
+ fast-dtoa.cc
+ fixed-dtoa.cc
flags.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
gdb-jit.cc
global-handles.cc
- fast-dtoa.cc
- fixed-dtoa.cc
handles.cc
heap-profiler.cc
heap.cc
- hydrogen.cc
hydrogen-instructions.cc
+ hydrogen.cc
ic.cc
incremental-marking.cc
- interface.cc
inspector.cc
+ interface.cc
interpreter-irregexp.cc
isolate.cc
jsregexp.cc
@@ -99,34 +102,37 @@ SOURCES = {
log.cc
mark-compact.cc
messages.cc
- objects.cc
objects-printer.cc
objects-visiting.cc
+ objects.cc
once.cc
+ optimizing-compiler-thread.cc
parser.cc
- preparser.cc
preparse-data.cc
+ preparser.cc
profile-generator.cc
property.cc
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
rewriter.cc
- runtime.cc
runtime-profiler.cc
+ runtime.cc
safepoint-table.cc
- scanner.cc
scanner-character-streams.cc
+ scanner.cc
scopeinfo.cc
scopes.cc
serialize.cc
snapshot-common.cc
spaces.cc
+ store-buffer.cc
string-search.cc
string-stream.cc
strtod.cc
stub-cache.cc
token.cc
+ transitions.cc
type-info.cc
unicode.cc
utils.cc
@@ -137,10 +143,7 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
- store-buffer.cc
zone.cc
- extensions/gc-extension.cc
- extensions/externalize-string-extension.cc
"""),
'arch:arm': Split("""
arm/builtins-arm.cc
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 8aabad0d09..9da6141c5b 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -92,9 +92,9 @@ MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->global_context()->number_function()->
+ ASSERT(Isolate::Current()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = Isolate::Current()->context()->global_context()->
+ Map* number_map = Isolate::Current()->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
@@ -805,4 +805,69 @@ const AccessorDescriptor Accessors::ObjectPrototype = {
0
};
+
+//
+// Accessors::MakeModuleExport
+//
+
+static v8::Handle<v8::Value> ModuleGetExport(
+ v8::Local<v8::String> property,
+ const v8::AccessorInfo& info) {
+ JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
+ Context* context = Context::cast(instance->context());
+ ASSERT(context->IsModuleContext());
+ int slot = info.Data()->Int32Value();
+ Object* value = context->get(slot);
+ if (value->IsTheHole()) {
+ Handle<String> name = v8::Utils::OpenHandle(*property);
+ Isolate* isolate = instance->GetIsolate();
+ isolate->ScheduleThrow(
+ *isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1)));
+ return v8::Handle<v8::Value>();
+ }
+ return v8::Utils::ToLocal(Handle<Object>(value));
+}
+
+
+static void ModuleSetExport(
+ v8::Local<v8::String> property,
+ v8::Local<v8::Value> value,
+ const v8::AccessorInfo& info) {
+ JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
+ Context* context = Context::cast(instance->context());
+ ASSERT(context->IsModuleContext());
+ int slot = info.Data()->Int32Value();
+ Object* old_value = context->get(slot);
+ if (old_value->IsTheHole()) {
+ Handle<String> name = v8::Utils::OpenHandle(*property);
+ Isolate* isolate = instance->GetIsolate();
+ isolate->ScheduleThrow(
+ *isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1)));
+ return;
+ }
+ context->set(slot, *v8::Utils::OpenHandle(*value));
+}
+
+
+Handle<AccessorInfo> Accessors::MakeModuleExport(
+ Handle<String> name,
+ int index,
+ PropertyAttributes attributes) {
+ Factory* factory = name->GetIsolate()->factory();
+ Handle<AccessorInfo> info = factory->NewAccessorInfo();
+ info->set_property_attributes(attributes);
+ info->set_all_can_read(true);
+ info->set_all_can_write(true);
+ info->set_name(*name);
+ info->set_data(Smi::FromInt(index));
+ Handle<Object> getter = v8::FromCData(&ModuleGetExport);
+ Handle<Object> setter = v8::FromCData(&ModuleSetExport);
+ info->set_getter(*getter);
+ if (!(attributes & ReadOnly)) info->set_setter(*setter);
+ return info;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 36b9a9984a..250f742fa0 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -85,6 +85,10 @@ class Accessors : public AllStatic {
void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
+ // Accessor infos.
+ static Handle<AccessorInfo> MakeModuleExport(
+ Handle<String> name, int index, PropertyAttributes attributes);
+
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionGetLength(Object* object, void*);
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 0d88047aa2..dcbc894574 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -33,6 +33,7 @@
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "conversions-inl.h"
#include "counters.h"
@@ -540,7 +541,9 @@ Extension::Extension(const char* name,
source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
- auto_enable_(false) { }
+ auto_enable_(false) {
+ CHECK(source != NULL || source_length_ == 0);
+}
v8::Handle<Primitive> Undefined() {
@@ -767,8 +770,8 @@ void Context::SetData(v8::Handle<String> data) {
i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
+ ASSERT(env->IsNativeContext());
+ if (env->IsNativeContext()) {
env->set_data(*raw_data);
}
}
@@ -781,8 +784,8 @@ v8::Local<v8::Value> Context::GetData() {
return v8::Local<Value>();
}
i::Object* raw_result = NULL;
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
+ ASSERT(env->IsNativeContext());
+ if (env->IsNativeContext()) {
raw_result = env->data();
} else {
return Local<Value>();
@@ -1066,7 +1069,6 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
- ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
@@ -1537,9 +1539,10 @@ Local<Script> Script::New(v8::Handle<String> source,
name_obj,
line_offset,
column_offset,
+ isolate->global_context(),
NULL,
pre_data_impl,
- Utils::OpenHandle(*script_data),
+ Utils::OpenHandle(*script_data, true),
i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
@@ -3031,6 +3034,17 @@ Local<String> v8::Object::ObjectProtoToString() {
}
+Local<Value> v8::Object::GetConstructor() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetConstructor()",
+ return Local<v8::Function>());
+ ENTER_V8(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> constructor(self->GetConstructor());
+ return Utils::ToLocal(constructor);
+}
+
+
Local<String> v8::Object::GetConstructorName() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
@@ -3223,7 +3237,7 @@ void v8::Object::TurnOnAccessCheck() {
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map =
- isolate->factory()->CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
+ isolate->factory()->CopyMap(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
obj->set_map(*new_map);
}
@@ -3258,7 +3272,7 @@ static i::Context* GetCreationContext(i::JSObject* object) {
} else {
function = i::JSFunction::cast(constructor);
}
- return function->context()->global_context();
+ return function->context()->native_context();
}
@@ -3287,13 +3301,15 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
+ if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
+ i::JSObject::SetHiddenProperty(self, key_symbol, value_obj);
return *result == *self;
}
@@ -3305,7 +3321,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
+ i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3318,7 +3335,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- self->DeleteHiddenProperty(*key_obj);
+ i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
+ self->DeleteHiddenProperty(*key_symbol);
return true;
}
@@ -3386,7 +3404,7 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
+ if (!ApiCheck(length >= 0 && length <= i::ExternalPixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
@@ -3442,7 +3460,7 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
+ if (!ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
return;
@@ -3835,6 +3853,9 @@ int String::WriteUtf8(char* buffer,
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (options & HINT_MANY_WRITES_EXPECTED) {
+ FlattenString(str); // Flatten the string for efficiency.
+ }
int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
@@ -3891,11 +3912,7 @@ int String::WriteUtf8(char* buffer,
// Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- FlattenString(str);
- }
+
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@@ -3937,8 +3954,9 @@ int String::WriteUtf8(char* buffer,
c,
unibrow::Utf16::kNoPreviousCharacter);
if (pos + written <= capacity) {
- for (int j = 0; j < written; j++)
+ for (int j = 0; j < written; j++) {
buffer[pos + j] = intermediate[j];
+ }
pos += written;
nchars++;
} else {
@@ -3951,8 +3969,9 @@ int String::WriteUtf8(char* buffer,
}
if (nchars_ref != NULL) *nchars_ref = nchars;
if (!(options & NO_NULL_TERMINATION) &&
- (i == len && (capacity == -1 || pos < capacity)))
+ (i == len && (capacity == -1 || pos < capacity))) {
buffer[pos++] = '\0';
+ }
return pos;
}
@@ -3965,28 +3984,45 @@ int String::WriteAscii(char* buffer,
if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
LOG_API(isolate, "String::WriteAscii");
ENTER_V8(isolate);
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str); // Flatten the string for efficiency.
}
+
+ if (str->IsAsciiRepresentation()) {
+ // WriteToFlat is faster than using the StringInputBuffer.
+ if (length == -1) length = str->length() + 1;
+ int len = i::Min(length, str->length() - start);
+ i::String::WriteToFlat(*str, buffer, start, start + len);
+ if (!(options & PRESERVE_ASCII_NULL)) {
+ for (int i = 0; i < len; i++) {
+ if (buffer[i] == '\0') buffer[i] = ' ';
+ }
+ }
+ if (!(options & NO_NULL_TERMINATION) && length > len) {
+ buffer[len] = '\0';
+ }
+ return len;
+ }
+
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
int end = length;
- if ( (length == -1) || (length > str->length() - start) )
+ if ((length == -1) || (length > str->length() - start)) {
end = str->length() - start;
+ }
if (end < 0) return 0;
write_input_buffer.Reset(start, *str);
int i;
for (i = 0; i < end; i++) {
char c = static_cast<char>(write_input_buffer.GetNext());
- if (c == '\0') c = ' ';
+ if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
buffer[i] = c;
}
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length))
+ if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
buffer[i] = '\0';
+ }
return i;
}
@@ -4005,7 +4041,7 @@ int String::Write(uint16_t* buffer,
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str);
}
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
@@ -4191,8 +4227,9 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(
reinterpret_cast<i::Address>(value), i::TENURED);
- if (!foreign.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ if (!foreign.is_null()) {
+ Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ }
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -4221,6 +4258,20 @@ void v8::V8::SetReturnAddressLocationResolver(
}
+bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
+ return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
+}
+
+
+void v8::V8::SetJitCodeEventHandler(
+ JitCodeEventOptions options, JitCodeEventHandler event_handler) {
+ i::Isolate* isolate = i::Isolate::Current();
+ // Ensure that logging is initialized for our isolate.
+ isolate->InitializeLoggingAndCounters();
+ isolate->logger()->SetCodeEventHandler(options, event_handler);
+}
+
+
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@@ -4355,7 +4406,7 @@ Persistent<Context> v8::Context::New(
// Create the environment.
env = isolate->bootstrapper()->CreateEnvironment(
isolate,
- Utils::OpenHandle(*global_object),
+ Utils::OpenHandle(*global_object, true),
proxy_template,
extensions);
@@ -4399,7 +4450,7 @@ void v8::Context::UseDefaultSecurityToken() {
}
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
- env->set_security_token(env->global());
+ env->set_security_token(env->global_object());
}
@@ -4444,7 +4495,7 @@ v8::Local<v8::Context> Context::GetCurrent() {
if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
return Local<Context>();
}
- i::Handle<i::Object> current = isolate->global_context();
+ i::Handle<i::Object> current = isolate->native_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
@@ -4457,7 +4508,7 @@ v8::Local<v8::Context> Context::GetCalling() {
return Local<Context>();
}
i::Handle<i::Object> calling =
- isolate->GetCallingGlobalContext();
+ isolate->GetCallingNativeContext();
if (calling.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
@@ -4494,9 +4545,9 @@ void Context::ReattachGlobal(Handle<Object> global_object) {
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- isolate->bootstrapper()->ReattachGlobal(
- context,
- Utils::OpenHandle(*global_object));
+ i::Handle<i::JSGlobalProxy> global_proxy =
+ i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
+ isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
}
@@ -4750,6 +4801,7 @@ Local<String> v8::String::NewExternal(
EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
+ CHECK(resource && resource->data());
i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@@ -4770,6 +4822,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
+ CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -4784,6 +4837,7 @@ Local<String> v8::String::NewExternal(
EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
+ CHECK(resource && resource->data());
i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@@ -4805,6 +4859,7 @@ bool v8::String::MakeExternal(
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
+ CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -5184,6 +5239,8 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
+ isolate->InitializeLoggingAndCounters();
+ isolate->counters()->ResetHistograms();
}
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
@@ -5233,8 +5290,9 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() ||
+ IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
return 0;
}
return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
@@ -5586,7 +5644,8 @@ bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
foreign =
isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
}
- isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(foreign,
+ Utils::OpenHandle(*data, true));
return true;
}
@@ -5601,7 +5660,8 @@ bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
if (that != NULL) {
foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
- isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(foreign,
+ Utils::OpenHandle(*data, true));
return true;
}
@@ -5612,7 +5672,7 @@ bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8(isolate);
isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data));
+ Utils::OpenHandle(*data, true));
return true;
}
@@ -5751,7 +5811,7 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
v8::HandleScope scope;
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
- i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global());
+ i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
i::Handle<i::String> name =
isolate->factory()->LookupAsciiSymbol("MakeMirror");
i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
@@ -5783,6 +5843,7 @@ void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMessages(true);
}
+
Local<Context> Debug::GetDebugContext() {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
@@ -5790,6 +5851,20 @@ Local<Context> Debug::GetDebugContext() {
return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
}
+
+void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ i::Debugger* debugger;
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ debugger = internal_isolate->debugger();
+ } else {
+ debugger = i::Isolate::GetDefaultIsolateDebugger();
+ }
+ debugger->set_live_edit_enabled(enable);
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -6374,12 +6449,28 @@ char* HandleScopeImplementer::RestoreThread(char* storage) {
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
+#ifdef DEBUG
+ bool found_block_before_deferred = false;
+#endif
// Iterate over all handles in the blocks except for the last.
for (int i = blocks()->length() - 2; i >= 0; --i) {
Object** block = blocks()->at(i);
- v->VisitPointers(block, &block[kHandleBlockSize]);
+ if (last_handle_before_deferred_block_ != NULL &&
+ (last_handle_before_deferred_block_ < &block[kHandleBlockSize]) &&
+ (last_handle_before_deferred_block_ >= block)) {
+ v->VisitPointers(block, last_handle_before_deferred_block_);
+ ASSERT(!found_block_before_deferred);
+#ifdef DEBUG
+ found_block_before_deferred = true;
+#endif
+ } else {
+ v->VisitPointers(block, &block[kHandleBlockSize]);
+ }
}
+ ASSERT(last_handle_before_deferred_block_ == NULL ||
+ found_block_before_deferred);
+
// Iterate over live handles in the last block (if any).
if (!blocks()->is_empty()) {
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
@@ -6407,4 +6498,66 @@ char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
return storage + ArchiveSpacePerThread();
}
+
+DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
+ DeferredHandles* deferred =
+ new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
+
+ while (!blocks_.is_empty()) {
+ Object** block_start = blocks_.last();
+ Object** block_limit = &block_start[kHandleBlockSize];
+ // We should not need to check for NoHandleAllocation here. Assert
+ // this.
+ ASSERT(prev_limit == block_limit ||
+ !(block_start <= prev_limit && prev_limit <= block_limit));
+ if (prev_limit == block_limit) break;
+ deferred->blocks_.Add(blocks_.last());
+ blocks_.RemoveLast();
+ }
+
+ // deferred->blocks_ now contains the blocks installed on the
+ // HandleScope stack since BeginDeferredScope was called, but in
+ // reverse order.
+
+ ASSERT(prev_limit == NULL || !blocks_.is_empty());
+
+ ASSERT(!blocks_.is_empty() && prev_limit != NULL);
+ ASSERT(last_handle_before_deferred_block_ != NULL);
+ last_handle_before_deferred_block_ = NULL;
+ return deferred;
+}
+
+
+void HandleScopeImplementer::BeginDeferredScope() {
+ ASSERT(last_handle_before_deferred_block_ == NULL);
+ last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
+}
+
+
+DeferredHandles::~DeferredHandles() {
+ isolate_->UnlinkDeferredHandles(this);
+
+ for (int i = 0; i < blocks_.length(); i++) {
+#ifdef DEBUG
+ HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
+#endif
+ isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
+ }
+}
+
+
+void DeferredHandles::Iterate(ObjectVisitor* v) {
+ ASSERT(!blocks_.is_empty());
+
+ ASSERT((first_block_limit_ >= blocks_.first()) &&
+ (first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
+
+ v->VisitPointers(blocks_.first(), first_block_limit_);
+
+ for (int i = 1; i < blocks_.length(); i++) {
+ v->VisitPointers(blocks_[i], &blocks_[i][kHandleBlockSize]);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 58e6a6e410..7197b6cb54 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -159,6 +159,27 @@ class RegisteredExtension {
};
+#define OPEN_HANDLE_LIST(V) \
+ V(Template, TemplateInfo) \
+ V(FunctionTemplate, FunctionTemplateInfo) \
+ V(ObjectTemplate, ObjectTemplateInfo) \
+ V(Signature, SignatureInfo) \
+ V(AccessorSignature, FunctionTemplateInfo) \
+ V(TypeSwitch, TypeSwitchInfo) \
+ V(Data, Object) \
+ V(RegExp, JSRegExp) \
+ V(Object, JSObject) \
+ V(Array, JSArray) \
+ V(String, String) \
+ V(Script, Object) \
+ V(Function, JSFunction) \
+ V(Message, JSObject) \
+ V(Context, Context) \
+ V(External, Foreign) \
+ V(StackTrace, JSArray) \
+ V(StackFrame, JSObject)
+
+
class Utils {
public:
static bool ReportApiFailure(const char* location, const char* message);
@@ -205,42 +226,13 @@ class Utils {
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
- static inline v8::internal::Handle<v8::internal::TemplateInfo>
- OpenHandle(const Template* that);
- static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
- OpenHandle(const FunctionTemplate* that);
- static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
- OpenHandle(const ObjectTemplate* that);
- static inline v8::internal::Handle<v8::internal::Object>
- OpenHandle(const Data* data);
- static inline v8::internal::Handle<v8::internal::JSRegExp>
- OpenHandle(const RegExp* data);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const v8::Object* data);
- static inline v8::internal::Handle<v8::internal::JSArray>
- OpenHandle(const v8::Array* data);
- static inline v8::internal::Handle<v8::internal::String>
- OpenHandle(const String* data);
- static inline v8::internal::Handle<v8::internal::Object>
- OpenHandle(const Script* data);
- static inline v8::internal::Handle<v8::internal::JSFunction>
- OpenHandle(const Function* data);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const Message* message);
- static inline v8::internal::Handle<v8::internal::JSArray>
- OpenHandle(const StackTrace* stack_trace);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const StackFrame* stack_frame);
- static inline v8::internal::Handle<v8::internal::Context>
- OpenHandle(const v8::Context* context);
- static inline v8::internal::Handle<v8::internal::SignatureInfo>
- OpenHandle(const v8::Signature* sig);
- static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
- OpenHandle(const v8::AccessorSignature* sig);
- static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
- OpenHandle(const v8::TypeSwitch* that);
- static inline v8::internal::Handle<v8::internal::Foreign>
- OpenHandle(const v8::External* that);
+#define DECLARE_OPEN_HANDLE(From, To) \
+ static inline v8::internal::Handle<v8::internal::To> \
+ OpenHandle(const From* that, bool allow_empty_handle = false);
+
+OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
+
+#undef DECLARE_OPEN_HANDLE
};
@@ -257,7 +249,7 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
if (!is_null()) {
handle = *this;
}
- return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)));
+ return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
}
@@ -294,33 +286,18 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
// Implementations of OpenHandle
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle(\
- const v8::From* that) { \
- return v8::internal::Handle<v8::internal::To>( \
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ EXTRA_CHECK(allow_empty_handle || that != NULL); \
+ return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
-MAKE_OPEN_HANDLE(Template, TemplateInfo)
-MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
-MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
-MAKE_OPEN_HANDLE(Signature, SignatureInfo)
-MAKE_OPEN_HANDLE(AccessorSignature, FunctionTemplateInfo)
-MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
-MAKE_OPEN_HANDLE(Data, Object)
-MAKE_OPEN_HANDLE(RegExp, JSRegExp)
-MAKE_OPEN_HANDLE(Object, JSObject)
-MAKE_OPEN_HANDLE(Array, JSArray)
-MAKE_OPEN_HANDLE(String, String)
-MAKE_OPEN_HANDLE(Script, Object)
-MAKE_OPEN_HANDLE(Function, JSFunction)
-MAKE_OPEN_HANDLE(Message, JSObject)
-MAKE_OPEN_HANDLE(Context, Context)
-MAKE_OPEN_HANDLE(External, Foreign)
-MAKE_OPEN_HANDLE(StackTrace, JSArray)
-MAKE_OPEN_HANDLE(StackFrame, JSObject)
+OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
+#undef OPEN_HANDLE_LIST
namespace internal {
@@ -392,6 +369,32 @@ class StringTracker {
};
+class DeferredHandles {
+ public:
+ ~DeferredHandles();
+
+ private:
+ DeferredHandles(Object** first_block_limit, Isolate* isolate)
+ : next_(NULL),
+ previous_(NULL),
+ first_block_limit_(first_block_limit),
+ isolate_(isolate) {
+ isolate->LinkDeferredHandles(this);
+ }
+
+ void Iterate(ObjectVisitor* v);
+
+ List<Object**> blocks_;
+ DeferredHandles* next_;
+ DeferredHandles* previous_;
+ Object** first_block_limit_;
+ Isolate* isolate_;
+
+ friend class HandleScopeImplementer;
+ friend class Isolate;
+};
+
+
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
// neat in some ways, but it would expose internal implementation details in
@@ -409,7 +412,8 @@ class HandleScopeImplementer {
entered_contexts_(0),
saved_contexts_(0),
spare_(NULL),
- call_depth_(0) { }
+ call_depth_(0),
+ last_handle_before_deferred_block_(NULL) { }
~HandleScopeImplementer() {
DeleteArray(spare_);
@@ -445,6 +449,13 @@ class HandleScopeImplementer {
inline bool HasSavedContexts();
inline List<internal::Object**>* blocks() { return &blocks_; }
+ Isolate* isolate() const { return isolate_; }
+
+ void ReturnBlock(Object** block) {
+ ASSERT(block != NULL);
+ if (spare_ != NULL) DeleteArray(spare_);
+ spare_ = block;
+ }
private:
void ResetAfterArchive() {
@@ -452,6 +463,7 @@ class HandleScopeImplementer {
entered_contexts_.Initialize(0);
saved_contexts_.Initialize(0);
spare_ = NULL;
+ last_handle_before_deferred_block_ = NULL;
call_depth_ = 0;
}
@@ -469,6 +481,9 @@ class HandleScopeImplementer {
ASSERT(call_depth_ == 0);
}
+ void BeginDeferredScope();
+ DeferredHandles* Detach(Object** prev_limit);
+
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
@@ -477,6 +492,7 @@ class HandleScopeImplementer {
List<Context*> saved_contexts_;
Object** spare_;
int call_depth_;
+ Object** last_handle_before_deferred_block_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
@@ -484,6 +500,9 @@ class HandleScopeImplementer {
char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to);
+ friend class DeferredHandles;
+ friend class DeferredHandleScope;
+
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
};
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index d5db686c0e..c47c094756 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -141,10 +141,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index ec28da4002..30a8830c9e 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -52,17 +52,20 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
+// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot.
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
-#endif // def CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << ARMv7;
-#endif // def CAN_USE_VFP_INSTRUCTIONS
+#endif // CAN_USE_ARMV7_INSTRUCTIONS
+#ifdef CAN_USE_VFP3_INSTRUCTIONS
+ answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
+#endif // CAN_USE_VFP3_INSTRUCTIONS
+#ifdef CAN_USE_VFP2_INSTRUCTIONS
+ answer |= 1u << VFP2;
+#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
@@ -70,18 +73,18 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
// point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7;
+ answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
-#endif // def __arm__
+#endif // _arm__
return answer;
}
void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
+ unsigned standard_features = static_cast<unsigned>(
+ OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
@@ -101,27 +104,32 @@ void CpuFeatures::Probe() {
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3 | 1u << ARMv7;
+ supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
-#else // def __arm__
+#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
+ // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
// 0406B, page A1-6.
- supported_ |= 1u << VFP3 | 1u << ARMv7;
- found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
+ } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
+ found_by_runtime_probing_ |= 1u << VFP2;
}
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
- supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
+
+ supported_ |= found_by_runtime_probing_;
#endif
+
+ // Assert that VFP3 implies VFP2 and ARMv7.
+ ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
}
@@ -292,8 +300,10 @@ static const int kMinimalBufferSize = 4*KB;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
+ recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -746,7 +756,7 @@ static bool fits_shifter(uint32_t imm32,
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
@@ -754,7 +764,7 @@ static bool fits_shifter(uint32_t imm32,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
@@ -775,13 +785,14 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Operand::must_use_constant_pool() const {
+bool Operand::must_use_constant_pool(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif // def DEBUG
+ if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled();
} else if (rmode_ == RelocInfo::NONE) {
return false;
@@ -790,16 +801,17 @@ bool Operand::must_use_constant_pool() const {
}
-bool Operand::is_single_instruction(Instr instr) const {
+bool Operand::is_single_instruction(const Assembler* assembler,
+ Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
- if (must_use_constant_pool() ||
+ if (must_use_constant_pool(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool() ||
+ if (must_use_constant_pool(assembler) ||
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
@@ -833,7 +845,7 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_use_constant_pool() ||
+ if (x.must_use_constant_pool(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@@ -842,7 +854,7 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() ||
+ if (x.must_use_constant_pool(this) ||
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
@@ -854,7 +866,7 @@ void Assembler::addrmod1(Instr instr,
} else {
// If this is not a mov or mvn instruction we may still be able to avoid
// a constant pool entry by using mvn or movw.
- if (!x.must_use_constant_pool() &&
+ if (!x.must_use_constant_pool(this) &&
(instr & kMovMvnMask) != kMovMvnPattern) {
mov(ip, x, LeaveCC, cond);
} else {
@@ -1379,7 +1391,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_use_constant_pool() ||
+ if (src.must_use_constant_pool(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
@@ -1656,7 +1668,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1698,7 +1710,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1742,7 +1754,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1783,7 +1795,7 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1826,7 +1838,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1834,6 +1846,7 @@ void Assembler::vldm(BlockAddrMode am,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1847,7 +1860,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1855,6 +1868,7 @@ void Assembler::vstm(BlockAddrMode am,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1867,7 +1881,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1888,7 +1902,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1911,7 +1925,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
//
@@ -1964,10 +1978,10 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
uint32_t enc;
- if (FitsVMOVDoubleImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
} else {
@@ -2001,7 +2015,7 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@@ -2014,7 +2028,7 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
@@ -2028,7 +2042,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@@ -2043,7 +2057,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@@ -2057,7 +2071,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@@ -2072,7 +2086,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@@ -2197,7 +2211,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -2206,7 +2220,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@@ -2215,7 +2229,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -2224,7 +2238,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2233,7 +2247,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2242,7 +2256,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -2251,7 +2265,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2259,6 +2273,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
0x5*B9 | B8 | B6 | src.code());
}
@@ -2267,6 +2282,7 @@ void Assembler::vneg(const DwVfpRegister dst,
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
0x5*B9 | B8 | 0x3*B6 | src.code());
}
@@ -2281,7 +2297,7 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2296,7 +2312,7 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@@ -2311,7 +2327,7 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2326,7 +2342,7 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2339,7 +2355,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@@ -2352,7 +2368,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
@@ -2363,7 +2379,7 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2373,7 +2389,7 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2384,7 +2400,7 @@ void Assembler::vsqrt(const DwVfpRegister dst,
const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
}
@@ -2435,6 +2451,14 @@ void Assembler::RecordComment(const char* msg) {
}
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -2511,12 +2535,15 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL)) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
@@ -2542,7 +2569,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ RelocInfo reloc_info_with_ast_id(pc_,
+ rmode,
+ RecordedAstId().ToInt(),
+ NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2602,13 +2632,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
- int needed_space = jump_instr + kInstrSize +
- num_pending_reloc_info_ * kInstrSize + kGap;
+ int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
+ int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(size);
// Emit jump over constant pool if necessary.
Label after_pool;
@@ -2616,8 +2648,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool);
}
- RecordComment("[ Constant Pool");
-
// Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding.
emit(kConstantPoolMarker | num_pending_reloc_info_);
@@ -2627,7 +2657,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index e2d5f598b7..7f2ce30aee 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -424,8 +424,8 @@ class Operand BASE_EMBEDDED {
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(Instr instr = 0) const;
- bool must_use_constant_pool() const;
+ bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
+ bool must_use_constant_pool(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
@@ -510,6 +510,7 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ if (f == VFP2 && !FLAG_enable_vfp2) return false;
return (supported_ & (1u << f)) != 0;
}
@@ -535,6 +536,8 @@ class CpuFeatures : public AllStatic {
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
+ // VFP2 and ARMv7 are implied by VFP3.
+ if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
@@ -645,6 +648,11 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -1164,6 +1172,8 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
+ bool predictable_code_size() const { return predictable_code_size_; }
+
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1203,22 +1213,41 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void SetRecordedAstId(unsigned ast_id) {
- ASSERT(recorded_ast_id_ == kNoASTId);
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
- unsigned RecordedAstId() {
- ASSERT(recorded_ast_id_ != kNoASTId);
+ TypeFeedbackId RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
- void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant pool depends on the size of the code generated and
+ // the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (Debug::GenerateSlot()). This may affect the emission of the constant
+ // pools and cause the version of the code with debugger support to have
+ // constant pools generated in different places.
+ // Recording the position and size of emitted constant pools allows to
+ // correctly compute the offset mappings between the different versions of a
+ // function in all situations.
+ //
+ // The parameter indicates the size of the constant pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
@@ -1283,7 +1312,7 @@ class Assembler : public AssemblerBase {
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned recorded_ast_id_;
+ TypeFeedbackId recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
@@ -1425,7 +1454,10 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
+
bool emit_debug_code_;
+ bool predictable_code_size_;
+
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 578bd810d4..2d1d7b1199 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -75,12 +75,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the global context.
+ // Load the native context.
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the InternalArray function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(
@@ -90,12 +91,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
+ // Load the native context.
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@@ -697,6 +699,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(pc, r2);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+
+ __ push(r1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore receiver.
+ __ pop(r1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -1246,7 +1285,7 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!CPU::SupportsCrankshaft()) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
@@ -1366,9 +1405,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
@@ -1561,9 +1600,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 761123f639..d9e3a3da9f 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -85,6 +85,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
// Pop the function info from the stack.
@@ -98,32 +100,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
+
int map_index = (language_mode_ == CLASSIC_MODE)
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
+ __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
+ __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ ldr(r1,
+ FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ tst(r1, r1);
+ __ b(ne, &check_optimized);
+ }
+ __ bind(&install_unoptimized);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
@@ -131,6 +145,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Return result. The argument function info has been popped already.
__ Ret();
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
+
+ // r2 holds native context, r1 points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into r4.
+ __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
+ __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
+ __ cmp(r2, r5);
+ __ b(eq, &install_optimized);
+
+ // Iterate through the rest of map backwards. r4 holds an index as a Smi.
+ Label loop;
+ __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ // Do not double check first entry.
+
+ __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ b(eq, &install_unoptimized);
+ __ sub(r4, r4, Operand(
+ Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r5, MemOperand(r5));
+ __ cmp(r2, r5);
+ __ b(ne, &loop);
+ // Hit: fetch the optimized code.
+ __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r5, r5, Operand(kPointerSize));
+ __ ldr(r4, MemOperand(r5));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(),
+ 1, r6, r7);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+
+ // Now link a function into a list of optimized functions.
+ __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
+ // No need for write barrier as JSFunction (eax) is in the new space.
+
+ __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Store JSFunction (eax) into edx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ mov(r4, r0);
+ __ RecordWriteContextSlot(
+ r2,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ r4,
+ r1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
@@ -162,12 +242,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
// Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -210,9 +290,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
@@ -222,16 +302,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(eq, message);
}
__ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
+ __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -519,8 +599,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1);
__ vcvt_f64_s32(d7, d7.high());
@@ -589,9 +669,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP3) &&
+ if (CpuFeatures::IsSupported(VFP2) &&
destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
// Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(dst, scratch1, HeapNumber::kValueOffset);
@@ -604,8 +684,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Convert smi to double using VFP instructions.
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
@@ -682,8 +762,8 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Label done;
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
@@ -776,8 +856,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
@@ -847,8 +927,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
SwVfpRegister single_scratch = double_scratch.low();
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
@@ -978,7 +1058,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@@ -990,7 +1070,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vstr(d0,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
@@ -1209,9 +1289,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
// Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, rhs, Operand(kHeapObjectTag));
@@ -1249,8 +1329,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
@@ -1362,7 +1442,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ push(lr);
__ PrepareCallCFunction(0, 2, r5);
if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@@ -1437,8 +1517,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
__ sub(r7, lhs, Operand(kHeapObjectTag));
@@ -1527,8 +1607,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ CheckMap(object,
scratch1,
Heap::kHeapNumberMapRootIndex,
@@ -1659,9 +1739,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
__ VFPCompareAndSetFlags(d7, d6);
@@ -1779,11 +1859,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
- // This stub uses VFP3 instructions.
- CpuFeatures::Scope scope(VFP3);
-
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
+ const Register temp = map;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
@@ -1836,13 +1914,56 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ VFPCompareAndSetFlags(d1, 0.0);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+ } else {
+ Label done, not_nan, not_zero;
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
+ // -0 maps to false:
+ __ bic(
+ temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
+ __ b(ne, &not_zero);
+ // If exponent word is zero then the answer depends on the mantissa word.
+ __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
+ __ jmp(&done);
+
+ // Check for NaN.
+ __ bind(&not_zero);
+ // We already zeroed the sign bit, now shift out the mantissa so we only
+ // have the exponent left.
+ __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
+ unsigned int shifted_exponent_mask =
+ HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
+ __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
+ __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
+
+ // Reload exponent word.
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
+ __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
+ // If mantissa is not zero then we have a NaN, so return 0.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ b(ne, &done);
+
+ // Load mantissa word.
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
+ __ cmp(temp, Operand(0, RelocInfo::NONE));
+ // If mantissa is not zero then we have a NaN, so return 0.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ b(ne, &done);
+
+ __ bind(&not_nan);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
+ __ bind(&done);
+ }
__ Ret();
__ bind(&not_heap_number);
}
@@ -1892,7 +2013,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
@@ -1910,7 +2031,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
@@ -2140,9 +2261,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ mov(r0, r2); // Move newly allocated heap number to r0.
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r2, r0, Operand(kHeapObjectTag));
@@ -2442,7 +2563,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
+ CpuFeatures::IsSupported(VFP2) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@@ -2469,7 +2590,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Using VFP registers:
// d6: Left value
// d7: Right value
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
@@ -2558,7 +2679,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
@@ -2597,10 +2718,10 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
@@ -2759,7 +2880,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
+ (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
? FloatingPointHelper::kVFPRegisters
: FloatingPointHelper::kCoreRegisters;
@@ -2787,7 +2908,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -2954,9 +3075,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// We only get a negative result if the shift value (r2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
- // The non vfp3 code does not support this special case, so jump to
+ // The non vfp2 code does not support this special case, so jump to
// runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ b(mi, (result_type_ <= BinaryOpIC::INT32)
? &transition
: &return_heap_number);
@@ -2991,8 +3112,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
&call_runtime);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2);
@@ -3221,8 +3342,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (tagged) {
// Argument is a number and is on stack and in r0.
// Load argument and check if it is a smi.
@@ -3323,23 +3444,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
- if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
- CpuFeatures::Scope scope(VFP3);
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
- // Register r0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ push(cache_entry);
+ // r0: precalculated cache entry address.
+ // r2 and r3: parts of the double value.
+ // Store r0, r2 and r3 on stack for later before calling C function.
+ __ Push(r3, r2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(d2);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ pop(cache_entry);
+ __ Pop(r3, r2, cache_entry);
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
@@ -3385,6 +3506,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
Isolate* isolate = masm->isolate();
__ push(lr);
@@ -3445,7 +3567,7 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp3_scope(VFP3);
+ CpuFeatures::Scope vfp2_scope(VFP2);
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
@@ -3544,7 +3666,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1);
+ __ vmov(double_result, 1.0);
__ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch);
__ jmp(&done);
@@ -3901,8 +4023,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
@@ -3917,7 +4039,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
}
__ ldr(r4, MemOperand(sp, offset_to_argv));
@@ -4055,8 +4177,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
#endif
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Restore callee-saved vfp registers.
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
}
@@ -4385,14 +4507,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r0 = address of new object(s) (tagged)
// r2 = argument count (tagged)
- // Get the arguments boilerplate from the current (global) context into r4.
+ // Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ cmp(r1, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@@ -4565,9 +4687,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
- // Get the arguments boilerplate from the current (global) context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
@@ -4696,7 +4818,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r2, r2, Operand(2)); // r2 was a smi.
// Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
__ b(hi, &runtime);
// r2: Number of capture registers
@@ -5082,10 +5204,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
__ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -5191,7 +5313,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r3,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
@@ -6583,8 +6706,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Load left and right operand
__ sub(r2, r1, Operand(kHeapObjectTag));
@@ -7131,6 +7254,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
+ // FastNewClosureStub::Generate
+ { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7431,6 +7556,65 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret();
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ __ push(lr);
+ __ CallStub(&stub);
+ __ pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push lr" instruction, followed by a call.
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + Assembler::kInstrSize;
+
+ // Save live volatile registers.
+ __ Push(lr, r5, r1);
+ const int32_t kNumSavedRegs = 3;
+
+ // Compute the function's address for the first argument.
+ __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(r5, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ and_(sp, sp, Operand(-frame_alignment));
+ }
+
+#if defined(V8_HOST_ARCH_ARM)
+ __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
+ __ ldr(ip, MemOperand(ip));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ Address trampoline_address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
+ ApiFunction dispatcher(trampoline_address);
+ __ mov(ip, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+#endif
+ __ Call(ip);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, r5);
+ }
+
+ __ Pop(lr, r5, r1);
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 38ed476cc1..3ddc405715 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -149,7 +149,7 @@ class BinaryOpStub: public CodeStub {
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+ use_vfp2_ = CpuFeatures::IsSupported(VFP2);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -159,7 +159,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- use_vfp3_(VFP3Bits::decode(key)),
+ use_vfp2_(VFP2Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
@@ -171,7 +171,7 @@ class BinaryOpStub: public CodeStub {
Token::Value op_;
OverwriteMode mode_;
- bool use_vfp3_;
+ bool use_vfp2_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
@@ -182,7 +182,7 @@ class BinaryOpStub: public CodeStub {
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP3Bits: public BitField<bool, 9, 1> {};
+ class VFP2Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
@@ -190,7 +190,7 @@ class BinaryOpStub: public CodeStub {
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
- | VFP3Bits::encode(use_vfp3_)
+ | VFP2Bits::encode(use_vfp2_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
@@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
@@ -586,7 +586,7 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index e00afb9035..09166c3c01 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -107,7 +107,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+ bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
@@ -121,15 +121,34 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
- __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
- __ add(lr, lr, Operand(r5, LSL, 2));
+ // Use lr as a temporary register.
+ __ mov(lr, Operand(r5, LSL, 2));
+ __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
- // r6: destination FixedDoubleArray, not tagged as heap object
+ // r6: destination FixedDoubleArray, not tagged as heap object.
+
+ // Align the array conveniently for doubles.
+ // Store a filler value in the unused memory.
+ Label aligned, aligned_done;
+ __ tst(r6, Operand(kDoubleAlignmentMask));
+ __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
+ __ b(eq, &aligned);
+ // Store at the beginning of the allocated memory and update the base pointer.
+ __ str(ip, MemOperand(r6, kPointerSize, PostIndex));
+ __ b(&aligned_done);
+
+ __ bind(&aligned);
+ // Store the filler at the end of the allocated memory.
+ __ sub(lr, lr, Operand(kPointerSize));
+ __ str(ip, MemOperand(r6, lr));
+
+ __ bind(&aligned_done);
+
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Update receiver's map.
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
@@ -163,7 +182,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp3_supported) __ Push(r1, r0);
+ if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry);
@@ -191,8 +210,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
- if (vfp3_supported) {
- CpuFeatures::Scope scope(VFP3);
+ if (vfp2_supported) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
@@ -225,7 +244,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6);
__ b(lt, &loop);
- if (!vfp3_supported) __ Pop(r1, r0);
+ if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index e767001e1d..5aadc3caeb 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -56,8 +56,9 @@
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 699e6aa4b1..5339be1d84 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -50,6 +50,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@@ -69,8 +73,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
- int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ // We need calls to have a predictable size in the unoptimized code, but
+ // this is optimized code, so we don't have to have a predictable size.
+ int call_size_in_bytes =
+ MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
+ RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
@@ -97,8 +104,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ // Iterate over all the functions which share the same code object
+ // and make them use unoptimized version.
+ Context* context = function->context()->native_context();
+ Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ SharedFunctionInfo* shared = function->shared();
+ while (!element->IsUndefined()) {
+ JSFunction* func = JSFunction::cast(element);
+ // Grab element before code replacement as ReplaceCode alters the list.
+ element = func->next_function_link();
+ if (func->code() == code) {
+ func->ReplaceCode(shared->code());
+ }
+ }
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -196,11 +214,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -219,7 +237,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -239,9 +257,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -352,8 +370,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@@ -577,19 +595,145 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
+ // code object, see MacroAssembler::EnterFrame). For a setter stub frames we
+ // need one additional entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index ff7c3c139e..b2f629b26c 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -134,6 +134,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -182,10 +184,13 @@ void FullCodeGenerator::Generate() {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
+ // Argument to NewContext is the function, which is still in r1.
+ Comment cmnt(masm_, "[ Allocate context");
__ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
@@ -262,7 +267,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -277,7 +282,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
@@ -328,7 +333,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
- reset_value = 10;
+ reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
__ mov(r3, Operand(Smi::FromInt(reset_value)));
@@ -336,10 +341,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -353,7 +354,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
@@ -405,7 +406,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -675,18 +676,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(VFP3)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
- __ tst(result_register(), result_register());
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -787,7 +779,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
@@ -840,10 +832,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -1133,26 +1124,34 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ mov(r2, r0);
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
+ __ cmp(r2, ip);
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
- __ LoadInstanceDescriptors(r0, r1);
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ EnumLength(r1, r0);
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ b(eq, &no_descriptors);
+
+ __ LoadInstanceDescriptors(r0, r2, r4);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(r0); // Map.
- __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ jmp(&exit);
+
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1161,7 +1160,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
@@ -1317,9 +1316,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ Move(next, current);
}
__ bind(&loop);
- // Terminate at global context.
+ // Terminate at native context.
__ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ b(eq, &fast);
// Check that extension is NULL.
@@ -1607,7 +1606,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1633,7 +1632,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1839,11 +1838,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1900,7 +1899,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1908,7 +1907,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1935,7 +1934,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2018,7 +2018,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -2149,7 +2150,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -2207,7 +2208,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2253,7 +2254,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2277,6 +2278,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
@@ -2290,7 +2292,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
__ Call(code, rmode, ast_id);
}
@@ -2312,7 +2314,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2345,7 +2347,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2365,16 +2367,14 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(r2, Operand(cell));
- }
+ // Record call targets in unoptimized code.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ mov(r2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2564,21 +2564,15 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(r2, Operand(cell));
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ mov(r2, Operand(cell));
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
@@ -2720,7 +2714,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(r0);
+ if (generate_debug_code_) __ AbortIfSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
@@ -2737,7 +2731,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Look for valueOf symbol in the descriptor array, and indicate false if
// found. The type is not checked, so if it is a transition it is a false
// negative.
- __ LoadInstanceDescriptors(r1, r4);
+ __ LoadInstanceDescriptors(r1, r4, r3);
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: descriptor array
// r3: length of descriptor array
@@ -2751,8 +2745,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate location of the first key name.
__ add(r4,
r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
+ Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@@ -2764,7 +2757,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, MemOperand(r4, 0));
__ cmp(r3, ip);
__ b(eq, if_false);
- __ add(r4, r4, Operand(kPointerSize));
+ __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmp(r4, Operand(r2));
__ b(ne, &loop);
@@ -2774,8 +2767,8 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
__ JumpIfSmi(r2, if_false);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
__ b(ne, if_false);
@@ -3052,13 +3045,14 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ PrepareCallCFunction(1, r0);
- __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
@@ -3075,9 +3069,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
- __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ ldr(r1,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -3139,20 +3134,19 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = r0;
Register result = r0;
Register scratch0 = r9;
Register scratch1 = r1;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ Assert(eq, "Trying to get date field from non-date.");
-#endif
+ __ b(ne, &not_date_object);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3169,8 +3163,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ PrepareCallCFunction(2, scratch1);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(r0);
}
@@ -3181,7 +3179,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
@@ -3433,10 +3431,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
- __ b(eq, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ __ b(ne, &runtime);
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
@@ -3446,7 +3445,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(r0);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3474,7 +3473,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3486,8 +3485,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = r0;
Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
__ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ ldr(cache,
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
@@ -3584,9 +3583,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(r0);
- }
+ __ AbortIfNotString(r0);
+
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
@@ -3658,7 +3656,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmp(array_length, Operand(0));
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@@ -3856,7 +3854,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -4011,7 +4009,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(r0);
}
@@ -4069,7 +4068,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4122,7 +4121,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4154,7 +4153,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4171,7 +4170,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4380,7 +4379,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
@@ -4464,7 +4463,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4509,6 +4508,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
__ ldr(r1, MemOperand(ip));
+ __ SmiTag(r1);
__ push(r1);
ExternalReference pending_message_script =
@@ -4529,6 +4529,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
__ str(r1, MemOperand(ip));
__ pop(r1);
+ __ SmiUntag(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index fd93480986..404f3c6145 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -396,7 +396,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 283862c787..fc1d64079a 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -407,24 +407,14 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index, zone());
@@ -434,120 +424,9 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
}
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap, zone());
- index = instructions_.length();
- instructions_.Add(instr, zone());
- } else {
- index = instructions_.length();
- instructions_.Add(instr, zone());
- instructions_.Add(gap, zone());
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map(), zone());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id(), zone());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(from, to, zone());
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -562,17 +441,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -741,7 +611,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -836,13 +706,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -975,8 +848,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@@ -1001,7 +874,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1480,6 +1355,25 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1645,6 +1539,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1662,12 +1562,12 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterAtStart(instr->index());
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@@ -1751,7 +1651,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LNumberTagU* result = new(zone()) LNumberTagU(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ } else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -1759,8 +1662,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else {
ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
+ }
}
}
UNREACHABLE();
@@ -1956,9 +1864,10 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
@@ -1968,7 +1877,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@@ -1987,7 +1897,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
@@ -2015,7 +1926,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
@@ -2032,7 +1944,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@@ -2053,7 +1966,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
@@ -2309,7 +2223,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2335,7 +2249,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
@@ -2381,8 +2295,9 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
+ LOperand* scratch = TempRegister();
return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
+ new(zone()) LForInCacheArray(map, scratch)));
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 869a80a280..e6e102f762 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -108,6 +108,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -115,7 +116,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -132,11 +132,14 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -163,6 +166,7 @@ class LCodeGen;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -257,8 +261,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
@@ -270,6 +272,11 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -289,7 +296,6 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
@@ -299,6 +305,9 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ virtual int InputCount() { return I; }
};
@@ -859,6 +868,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -993,6 +1003,16 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
@@ -1083,6 +1103,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1591,6 +1623,16 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1601,6 +1643,16 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberTagU(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -2177,13 +2229,15 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LForInCacheArray(LOperand* map) {
+ explicit LForInCacheArray(LOperand* map, LOperand* scratch) {
inputs_[0] = map;
+ temps_[0] = scratch;
}
LOperand* map() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
@@ -2222,65 +2276,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure, zone());
- }
-
- Zone* zone() const { return graph_->zone(); }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2299,10 +2301,10 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
@@ -2321,7 +2323,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2331,7 +2333,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2421,7 +2423,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2433,7 +2435,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index b03ba8c396..e9ba5eec72 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -91,17 +91,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -127,6 +118,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -322,7 +315,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -360,7 +354,8 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -386,9 +381,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
@@ -398,33 +393,33 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Operand(static_cast<int32_t>(literal->Number()));
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort("ToOperand Unsupported double immediate.");
}
ASSERT(r.IsTagged());
- return Operand(literal);
+ return Operand(constant->handle());
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@@ -478,7 +473,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -486,11 +484,19 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@@ -502,7 +508,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -510,18 +517,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
+ false,
false);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
@@ -530,6 +542,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -543,6 +557,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -550,8 +566,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
DoubleRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -698,13 +714,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -1528,6 +1544,13 @@ void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->InputAt(0));
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
@@ -1574,11 +1597,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ tst(object, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- __ Assert(eq, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(ne, instr->environment());
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1642,6 +1664,68 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register left_reg = ToRegister(left);
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
+ ? ToOperand(right)
+ : Operand(EmitLoadRegister(right, ip));
+ Register result_reg = ToRegister(instr->result());
+ __ cmp(left_reg, right_op);
+ if (!result_reg.is(left_reg)) {
+ __ mov(result_reg, left_reg, LeaveCC, condition);
+ }
+ __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister left_reg = ToDoubleRegister(left);
+ DoubleRegister right_reg = ToDoubleRegister(right);
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ __ b(vs, &check_nan_left);
+ __ b(eq, &check_zero);
+ __ b(condition, &return_left);
+ __ b(al, &return_right);
+
+ __ bind(&check_zero);
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ __ b(ne, &return_left); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ // We could use a single 'vorr' instruction here if we had NEON support.
+ __ vneg(left_reg, left_reg);
+ __ vsub(result_reg, left_reg, right_reg);
+ __ vneg(result_reg, result_reg);
+ } else {
+ // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+ // the decision for vadd is easy because vand is a NEON instruction.
+ __ vadd(result_reg, left_reg, right_reg);
+ }
+ __ b(al, &done);
+
+ __ bind(&check_nan_left);
+ __ VFPCompareAndSetFlags(left_reg, left_reg);
+ __ b(vs, &return_left); // left == NaN.
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ vmov(result_reg, right_reg);
+ }
+ __ b(al, &done);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ vmov(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
@@ -2152,9 +2236,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AbortIfNotString(input);
__ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2390,12 +2472,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(r4));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 4;
+ static const int kAdditionalDelta = 5;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
+ // The mov above can generate one or two instructions. The delta was computed
+ // for two instructions, so we need to pad here in case of one instruction.
+ if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
+ ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
+ __ nop();
+ }
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
@@ -2574,9 +2662,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
+ type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsFound() && lookup.type() == FIELD) {
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2588,7 +2676,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
@@ -2772,15 +2860,31 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
- Register key = EmitLoadRegister(instr->key(), scratch0());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
- // Load the result.
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- uint32_t offset = FixedArray::kHeaderSize +
- (instr->additional_index() << kPointerSizeLog2);
- __ ldr(result, FieldMemOperand(scratch, offset));
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ ldr(result, FieldMemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2804,8 +2908,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
DwVfpRegister result = ToDoubleRegister(instr->result());
Register scratch = scratch0();
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -2817,14 +2922,15 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) << shift_size) +
+ ? Operand(((constant_key + instr->additional_index()) <<
+ element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ (instr->additional_index() << element_size_shift)));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2837,6 +2943,42 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset) {
+ if (additional_index != 0 && !key_is_constant) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ add(scratch0(), key, Operand(additional_index));
+ }
+
+ if (key_is_constant) {
+ return MemOperand(base,
+ (constant_key << element_size) + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (shift_size >= 0) {
+ return MemOperand(base, key, LSL, shift_size);
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ return MemOperand(base, key, LSR, 1);
+ }
+ }
+
+ if (shift_size >= 0) {
+ return MemOperand(base, scratch0(), LSL, shift_size);
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ return MemOperand(base, scratch0(), LSR, 1);
+ }
+}
+
+
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
@@ -2852,15 +2994,17 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
- ? Operand(constant_key << shift_size)
+ ? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
@@ -2871,15 +3015,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
} else {
Register result = ToRegister(instr->result());
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset)
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@@ -2899,11 +3038,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
- __ cmp(result, Operand(0x80000000));
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(cs, instr->environment());
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ cmp(result, Operand(0x80000000));
+ DeoptimizeIf(cs, instr->environment());
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
@@ -3089,7 +3227,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -3119,7 +3257,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3146,14 +3284,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(r1, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- }
+ // Change context.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -3481,11 +3613,11 @@ void LCodeGen::DoRandom(LRandom* instr) {
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the global context's random seeds
+ // r2: FixedArray of the native context's random seeds
// Load state[0].
__ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
@@ -3775,8 +3907,40 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ if (operand->IsRegister()) {
+ __ tst(ToRegister(operand), Operand(kSmiTagMask));
+ } else {
+ __ mov(ip, ToOperand(operand));
+ __ tst(ip, Operand(kSmiTagMask));
+ }
+ DeoptimizeIf(ne, environment);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ mov(ip, Operand(Smi::FromInt(constant_index)));
+ } else {
+ __ mov(ip, Operand(constant_index));
+ }
+ __ cmp(ip, ToRegister(instr->length()));
+ } else {
+ __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
+ }
DeoptimizeIf(hs, instr->environment());
}
@@ -3786,31 +3950,37 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
- + FixedArray::kHeaderSize;
- __ str(value, FieldMemOperand(elements, offset));
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
} else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- if (instr->additional_index() != 0) {
- __ add(scratch,
- scratch,
- Operand(instr->additional_index() << kPointerSizeLog2));
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
- __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
+ __ str(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
key,
value,
@@ -3841,9 +4011,11 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
- ? Operand((constant_key << shift_size) +
+ ? Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@@ -3861,7 +4033,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs);
}
- __ vstr(value, scratch, instr->additional_index() << shift_size);
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@@ -3881,15 +4053,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant ? Operand(constant_key << shift_size)
- : Operand(key, LSL, shift_size));
+ Operand operand(key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
@@ -3899,16 +4074,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
} else {
Register value(ToRegister(instr->value()));
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size))
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -4131,12 +4300,26 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->InputAt(0);
+ LOperand* output = instr->result();
+
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ __ vmov(flt_scratch, ToRegister(input));
+ __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->InputAt(0),
+ SIGNED_INT32);
+ }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
@@ -4152,9 +4335,38 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->InputAt(0),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmp(reg, Operand(Smi::kMaxValue));
+ __ b(hi, deferred->entry());
+ __ SmiTag(reg, reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness) {
Label slow;
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
DoubleRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
@@ -4162,16 +4374,22 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
Label done;
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ eor(src, src, Operand(0x80000000));
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ eor(src, src, Operand(0x80000000));
+ }
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ } else {
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
@@ -4751,7 +4969,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
@@ -4771,12 +4989,12 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
DeoptimizeIf(ne, instr->environment());
}
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(r3, literals);
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
+ __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@@ -4870,8 +5088,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(r2, Operand(value_low));
@@ -4984,15 +5202,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // r3 = JS function.
// r7 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
// r2 and r4-r6 are used as temporaries.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(r7, instr->hydrogen()->literals());
__ ldr(r1, FieldMemOperand(r7, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
@@ -5356,13 +5572,24 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
- __ LoadInstanceDescriptors(map, result);
+ Register scratch = ToRegister(instr->scratch());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Operand(Smi::FromInt(0)));
+ __ b(ne, &load_cache);
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ jmp(&done);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result, scratch);
__ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand(0));
DeoptimizeIf(eq, instr->environment());
+
+ __ bind(&done);
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index f35c69b8a3..fd4a2a5ca7 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -43,26 +43,25 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
- Zone* zone)
- : chunk_(chunk),
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4, zone),
- deopt_jump_table_(4, zone),
- deoptimization_literals_(8, zone),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- translations_(zone),
- deferred_(8, zone),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
- safepoints_(zone),
- zone_(zone),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -115,7 +114,12 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -133,6 +137,15 @@ class LCodeGen BASE_EMBEDDED {
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset);
+
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -158,7 +171,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -178,7 +191,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -244,7 +257,8 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -289,6 +303,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -350,7 +368,8 @@ class LCodeGen BASE_EMBEDDED {
void EnsureSpaceForLazyDeopt();
- LChunk* const chunk_;
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -372,8 +391,6 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
- Zone* zone_;
-
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 933399e00f..2a677be525 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -137,7 +137,19 @@ int MacroAssembler::CallSize(
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
+ if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(
+ Address target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
+ if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
size += kInstrSize;
}
return size;
@@ -179,7 +191,7 @@ void MacroAssembler::Call(Address target,
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond) {
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
@@ -187,12 +199,12 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond) {
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
@@ -265,8 +277,8 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
vmov(dst, src);
}
@@ -276,12 +288,12 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
- !src2.must_use_constant_pool() &&
+ !src2.must_use_constant_pool(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
- } else if (!src2.is_single_instruction() &&
- !src2.must_use_constant_pool() &&
+ } else if (!src2.is_single_instruction(this) &&
+ !src2.must_use_constant_pool(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
@@ -296,7 +308,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@@ -311,7 +323,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@@ -339,7 +351,7 @@ void MacroAssembler::Bfi(Register dst,
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@@ -353,7 +365,7 @@ void MacroAssembler::Bfi(Register dst,
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
@@ -364,7 +376,7 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@@ -672,7 +684,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@@ -714,7 +726,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@@ -778,7 +790,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@@ -930,6 +942,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1338,31 +1351,32 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(ne, "we should not have an empty lexical context");
#endif
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
+ Check(eq, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
cmp(scratch, Operand(ip));
b(eq, &same_contexts);
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
@@ -1374,13 +1388,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(ne, "JSGlobalProxy::context() should not be null.");
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
+ Check(eq, "JSGlobalObject::native_context should be a native context.");
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
// Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@@ -1967,7 +1981,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
@@ -1984,7 +1998,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
@@ -2135,7 +2149,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
}
@@ -2331,8 +2345,8 @@ void MacroAssembler::ConvertToInt32(Register source,
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(double_scratch.low(), double_scratch);
@@ -2427,8 +2441,8 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
@@ -2546,7 +2560,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch,
Register input_high,
Register input_low) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
@@ -2585,7 +2599,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -2703,7 +2717,8 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(target,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(target, FieldMemOperand(target,
@@ -2869,8 +2884,9 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ ldr(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
ldr(scratch,
@@ -2916,11 +2932,12 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ ldr(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
ldr(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
ldr(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -3332,6 +3349,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@@ -3342,6 +3360,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@@ -3360,6 +3379,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);
@@ -3664,67 +3684,80 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// In 0-255 range, round and truncate.
bind(&in_bounds);
- Vmov(temp_double_reg, 0.5);
- vadd(temp_double_reg, input_reg, temp_double_reg);
- vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
- vmov(result_reg, temp_double_reg.low());
+ // Save FPSCR.
+ vmrs(ip);
+ // Set rounding mode to round to the nearest integer by clearing bits[23:22].
+ bic(result_reg, ip, Operand(kVFPRoundingModeMask));
+ vmsr(result_reg);
+ vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, input_reg.low());
+ // Restore FPSCR.
+ vmsr(ip);
bind(&done);
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- ldr(descriptors,
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi);
+ Register descriptors,
+ Register scratch) {
+ Register temp = descriptors;
+ ldr(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset));
+
+ Label ok, fail;
+ CheckMap(temp,
+ scratch,
+ isolate()->factory()->fixed_array_map(),
+ &fail,
+ DONT_DO_SMI_CHECK);
+ ldr(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
+ jmp(&ok);
+ bind(&fail);
mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
- bind(&not_smi);
+ bind(&ok);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
- // Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r7;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- mov(r1, r0);
- bind(&next);
+ Label next, start;
+ mov(r2, r0);
- // Check that there are no elements. Register r1 contains the
- // current JS object we've reached through the prototype chain.
- ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- cmp(r2, empty_fixed_array_value);
- b(ne, call_runtime);
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r2 for the subsequent
- // prototype load.
- ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(r3, call_runtime);
+ EnumLength(r3, r1);
+ cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+ b(eq, call_runtime);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (r3). This is the case if the next enumeration
- // index field does not contain a smi.
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(r3, call_runtime);
+ jmp(&start);
+
+ bind(&next);
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmp(r1, r0);
- b(eq, &check_prototype);
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmp(r3, empty_fixed_array_value);
+ EnumLength(r3, r1);
+ cmp(r3, Operand(Smi::FromInt(0)));
+ b(ne, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register r2 contains the current JS
+ // object we've reached through the prototype chain.
+ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
+ cmp(r2, empty_fixed_array_value);
b(ne, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
- cmp(r1, null_value);
+ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+ cmp(r2, null_value);
b(ne, &next);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 6b7d116357..8eb97125ea 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -110,17 +110,18 @@ class MacroAssembler: public Assembler {
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- static int CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- Condition cond = al);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Ret(Condition cond = al);
@@ -499,8 +500,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -1268,7 +1269,10 @@ class MacroAssembler: public Assembler {
DoubleRegister temp_double_reg);
- void LoadInstanceDescriptors(Register map, Register descriptors);
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors,
+ Register scratch);
+ void EnumLength(Register dst, Register map);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1376,7 +1380,7 @@ inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 9bebb4d406..f723fa212f 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -35,14 +35,7 @@ namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM();
- virtual ~RegExpMacroAssemblerARM();
-};
-
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 629c209ea2..a057de58cc 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -276,7 +276,7 @@ void ArmDebugger::Debug() {
// make them invisible to all commands.
UndoBreakpoints();
- while (!done) {
+ while (!done && !sim_->has_bad_pc()) {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -945,73 +945,31 @@ unsigned int Simulator::get_s_register(int sreg) const {
}
-void Simulator::set_s_register_from_float(int sreg, const float flt) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the single precision floating point value
- // into the unsigned integer element of vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &flt, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
-}
+template<class InputType, int register_size>
+void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
+ ASSERT(reg_index >= 0);
+ if (register_size == 1) ASSERT(reg_index < num_s_registers);
+ if (register_size == 2) ASSERT(reg_index < num_d_registers);
-
-void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the integer value into the unsigned integer element of
- // vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &sint, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+ char buffer[register_size * sizeof(vfp_register[0])];
+ memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
+ memcpy(&vfp_register[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_register[0]));
}
-void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- // Read the bits from the double precision floating point value into the two
- // consecutive unsigned integer elements of vfp_register[] given by index
- // 2*sreg and 2*sreg+1.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-}
+template<class ReturnType, int register_size>
+ReturnType Simulator::GetFromVFPRegister(int reg_index) {
+ ASSERT(reg_index >= 0);
+ if (register_size == 1) ASSERT(reg_index < num_s_registers);
+ if (register_size == 2) ASSERT(reg_index < num_d_registers);
-
-float Simulator::get_float_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- float sm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
-
-
-int Simulator::get_sinteger_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- int sm_val = 0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
-
-
-double Simulator::get_double_from_d_register(int dreg) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
- return(dm_val);
+ ReturnType value = 0;
+ char buffer[register_size * sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[register_size * reg_index],
+ register_size * sizeof(vfp_register[0]));
+ memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
+ return value;
}
@@ -2408,7 +2366,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "cmn'cond 'rn, 'imm");
alu_out = rn_val + shifter_operand;
SetNZFlags(alu_out);
- SetCFlag(!CarryFrom(rn_val, shifter_operand));
+ SetCFlag(CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else {
// Other instructions matching this pattern are handled in the
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index d1cad15bd0..abc91bbc42 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -163,12 +163,30 @@ class Simulator {
// Support for VFP.
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
- void set_d_register_from_double(int dreg, const double& dbl);
- double get_double_from_d_register(int dreg);
- void set_s_register_from_float(int sreg, const float dbl);
- float get_float_from_s_register(int sreg);
- void set_s_register_from_sinteger(int reg, const int value);
- int get_sinteger_from_s_register(int reg);
+
+ void set_d_register_from_double(int dreg, const double& dbl) {
+ SetVFPRegister<double, 2>(dreg, dbl);
+ }
+
+ double get_double_from_d_register(int dreg) {
+ return GetFromVFPRegister<double, 2>(dreg);
+ }
+
+ void set_s_register_from_float(int sreg, const float flt) {
+ SetVFPRegister<float, 1>(sreg, flt);
+ }
+
+ float get_float_from_s_register(int sreg) {
+ return GetFromVFPRegister<float, 1>(sreg);
+ }
+
+ void set_s_register_from_sinteger(int sreg, const int sint) {
+ SetVFPRegister<int, 1>(sreg, sint);
+ }
+
+ int get_sinteger_from_s_register(int sreg) {
+ return GetFromVFPRegister<int, 1>(sreg);
+ }
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@@ -332,6 +350,12 @@ class Simulator {
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
+ template<class ReturnType, int register_size>
+ ReturnType GetFromVFPRegister(int reg_index);
+
+ template<class InputType, int register_size>
+ void SetVFPRegister(int reg_index, const InputType& value);
+
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index dd9de23fa4..66714f8e44 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -283,11 +283,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
__ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ ldr(prototype,
@@ -304,13 +305,14 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, isolate->global());
+ __ ldr(prototype,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ Move(ip, isolate->global_object());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -986,8 +988,8 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register fval,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
@@ -1230,6 +1232,45 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ cmp(scratch2, Operand(callback));
+ __ b(ne, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1237,6 +1278,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1247,6 +1289,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
@@ -1303,7 +1350,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
@@ -1377,7 +1424,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
@@ -1526,7 +1573,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2071,7 +2118,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2089,11 +2136,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!CpuFeatures::IsSupported(VFP2)) {
return Handle<Code>::null();
}
- CpuFeatures::Scope scope_vfp3(VFP3);
+ CpuFeatures::Scope scope_vfp2(VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2217,7 +2264,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2316,7 +2363,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2533,7 +2580,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2591,7 +2638,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2619,14 +2666,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2634,19 +2684,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- lr : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r1, &miss);
+ CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(r1); // receiver
__ mov(ip, Operand(callback)); // callback info
@@ -2664,37 +2707,40 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<JSObject> receiver,
- Handle<JSFunction> setter,
- Handle<String> name) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
-
{
- FrameScope scope(masm(), StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(r0);
- // Call the JavaScript getter with the receiver and the value on the stack.
- __ Push(r1, r0);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ Push(r1, r0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
// We have to return the passed value, not the return value of the setter.
__ pop(r0);
@@ -2703,13 +2749,38 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r1, &miss);
+ CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2754,7 +2825,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2800,7 +2871,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2835,7 +2906,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2855,7 +2926,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2870,16 +2941,53 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- lr : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(r0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
@@ -2896,25 +3004,13 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
__ JumpIfSmi(r0, &miss);
CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
- {
- FrameScope scope(masm(), StackFrame::INTERNAL);
-
- // Call the JavaScript getter with the receiver on the stack.
- __ push(r0);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
+ GenerateLoadViaGetter(masm(), getter);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2934,7 +3030,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2956,7 +3052,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2998,7 +3094,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -3021,7 +3117,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -3041,12 +3137,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(name));
__ b(ne, &miss);
- GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+ GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3071,7 +3167,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -3097,7 +3193,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3118,7 +3214,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3144,7 +3240,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3169,7 +3265,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3189,7 +3285,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3217,7 +3313,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3256,7 +3352,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -3280,7 +3378,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3319,7 +3417,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3549,8 +3647,8 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch1,
DwVfpRegister double_scratch0,
Label* fail) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
@@ -3636,8 +3734,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
@@ -3645,8 +3743,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 2));
__ vldr(d0, r2, 0);
} else {
@@ -3697,8 +3795,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
@@ -3725,8 +3823,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
@@ -3789,8 +3887,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
@@ -3857,8 +3955,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
@@ -3983,7 +4081,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
@@ -3993,7 +4091,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vstr(d0, r3, 0);
} else {
__ str(r6, MemOperand(r3, 0));
@@ -4028,8 +4126,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index a1cc5b6a7d..1cedd8d476 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -777,78 +777,103 @@ function ArraySort(comparefn) {
}
};
- var QuickSort = function QuickSort(a, from, to) {
- // Insertion sort is faster for short arrays.
- if (to - from <= 10) {
- InsertionSort(a, from, to);
- return;
+ var GetThirdIndex = function(a, from, to) {
+ var t_array = [];
+ // Use both 'from' and 'to' to determine the pivot candidates.
+ var increment = 200 + ((to - from) & 15);
+ for (var i = from + 1; i < to - 1; i += increment) {
+ t_array.push([i, a[i]]);
}
- // Find a pivot as the median of first, last and middle element.
- var v0 = a[from];
- var v1 = a[to - 1];
- var middle_index = from + ((to - from) >> 1);
- var v2 = a[middle_index];
- var c01 = %_CallFunction(receiver, v0, v1, comparefn);
- if (c01 > 0) {
- // v1 < v0, so swap them.
- var tmp = v0;
- v0 = v1;
- v1 = tmp;
- } // v0 <= v1.
- var c02 = %_CallFunction(receiver, v0, v2, comparefn);
- if (c02 >= 0) {
- // v2 <= v0 <= v1.
- var tmp = v0;
- v0 = v2;
- v2 = v1;
- v1 = tmp;
- } else {
- // v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(receiver, v1, v2, comparefn);
- if (c12 > 0) {
- // v0 <= v2 < v1
- var tmp = v1;
- v1 = v2;
- v2 = tmp;
+ t_array.sort(function(a, b) {
+ return %_CallFunction(receiver, a[1], b[1], comparefn) } );
+ var third_index = t_array[t_array.length >> 1][0];
+ return third_index;
+ }
+
+ var QuickSort = function QuickSort(a, from, to) {
+ var third_index = 0;
+ while (true) {
+ // Insertion sort is faster for short arrays.
+ if (to - from <= 10) {
+ InsertionSort(a, from, to);
+ return;
}
- }
- // v0 <= v1 <= v2
- a[from] = v0;
- a[to - 1] = v2;
- var pivot = v1;
- var low_end = from + 1; // Upper bound of elements lower than pivot.
- var high_start = to - 1; // Lower bound of elements greater than pivot.
- a[middle_index] = a[low_end];
- a[low_end] = pivot;
-
- // From low_end to i are elements equal to pivot.
- // From i to high_start are elements that haven't been compared yet.
- partition: for (var i = low_end + 1; i < high_start; i++) {
- var element = a[i];
- var order = %_CallFunction(receiver, element, pivot, comparefn);
- if (order < 0) {
- a[i] = a[low_end];
- a[low_end] = element;
- low_end++;
- } else if (order > 0) {
- do {
- high_start--;
- if (high_start == i) break partition;
- var top_elem = a[high_start];
- order = %_CallFunction(receiver, top_elem, pivot, comparefn);
- } while (order > 0);
- a[i] = a[high_start];
- a[high_start] = element;
+ if (to - from > 1000) {
+ third_index = GetThirdIndex(a, from, to);
+ } else {
+ third_index = from + ((to - from) >> 1);
+ }
+ // Find a pivot as the median of first, last and middle element.
+ var v0 = a[from];
+ var v1 = a[to - 1];
+ var v2 = a[third_index];
+ var c01 = %_CallFunction(receiver, v0, v1, comparefn);
+ if (c01 > 0) {
+ // v1 < v0, so swap them.
+ var tmp = v0;
+ v0 = v1;
+ v1 = tmp;
+ } // v0 <= v1.
+ var c02 = %_CallFunction(receiver, v0, v2, comparefn);
+ if (c02 >= 0) {
+ // v2 <= v0 <= v1.
+ var tmp = v0;
+ v0 = v2;
+ v2 = v1;
+ v1 = tmp;
+ } else {
+ // v0 <= v1 && v0 < v2
+ var c12 = %_CallFunction(receiver, v1, v2, comparefn);
+ if (c12 > 0) {
+ // v0 <= v2 < v1
+ var tmp = v1;
+ v1 = v2;
+ v2 = tmp;
+ }
+ }
+ // v0 <= v1 <= v2
+ a[from] = v0;
+ a[to - 1] = v2;
+ var pivot = v1;
+ var low_end = from + 1; // Upper bound of elements lower than pivot.
+ var high_start = to - 1; // Lower bound of elements greater than pivot.
+ a[third_index] = a[low_end];
+ a[low_end] = pivot;
+
+ // From low_end to i are elements equal to pivot.
+ // From i to high_start are elements that haven't been compared yet.
+ partition: for (var i = low_end + 1; i < high_start; i++) {
+ var element = a[i];
+ var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
- element = a[i];
a[i] = a[low_end];
a[low_end] = element;
low_end++;
+ } else if (order > 0) {
+ do {
+ high_start--;
+ if (high_start == i) break partition;
+ var top_elem = a[high_start];
+ order = %_CallFunction(receiver, top_elem, pivot, comparefn);
+ } while (order > 0);
+ a[i] = a[high_start];
+ a[high_start] = element;
+ if (order < 0) {
+ element = a[i];
+ a[i] = a[low_end];
+ a[low_end] = element;
+ low_end++;
+ }
}
}
+ if (to - high_start < low_end - from) {
+ QuickSort(a, high_start, to);
+ to = low_end;
+ } else {
+ QuickSort(a, from, low_end);
+ from = high_start;
+ }
}
- QuickSort(a, from, low_end);
- QuickSort(a, high_start, to);
};
// Copy elements in the range 0..length from obj's prototype chain
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index d4c49ddd45..a58f77f74b 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -141,7 +141,7 @@ int Label::pos() const {
// an iteration.
//
// The encoding relies on the fact that there are fewer than 14
-// different non-compactly encoded relocation modes.
+// different relocation modes using standard non-compact encoding.
//
// The first byte of a relocation record has a tag in its low 2 bits:
// Here are the record schemes, depending on the low tag and optional higher
@@ -173,7 +173,9 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
-// 1101: not used (would allow one more relocation mode to be added)
+// 1101: constant pool. Used on ARM only for now.
+// The format is: 11 1101 11
+// signed int (size of the constant pool).
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@@ -194,7 +196,7 @@ int Label::pos() const {
// dropped, and last non-zero chunk tagged with 1.)
-const int kMaxRelocModes = 14;
+const int kMaxStandardNonCompactModes = 14;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@@ -228,6 +230,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
+const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
+const int kConstPoolTag = 3;
+
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
@@ -285,6 +290,15 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
}
+void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
+ WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
+ for (int i = 0; i < kIntSize; i++) {
+ *--pos_ = static_cast<byte>(data);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ data = data >> kBitsPerByte;
+ }
+}
+
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
@@ -300,8 +314,8 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
byte* begin_pos = pos_;
#endif
ASSERT(rinfo->pc() - last_pc_ >= 0);
- ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
- kMaxRelocModes);
+ ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
+ <= kMaxStandardNonCompactModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
@@ -347,6 +361,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
+ } else if (RelocInfo::IsConstPool(rmode)) {
+ WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+ WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@@ -397,6 +414,15 @@ void RelocIterator::AdvanceReadId() {
}
+void RelocIterator::AdvanceReadConstPoolData() {
+ int x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+ }
+ rinfo_.data_ = x;
+}
+
+
void RelocIterator::AdvanceReadPosition() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@@ -500,8 +526,7 @@ void RelocIterator::next() {
ASSERT(tag == kDefaultTag);
int extra_tag = GetExtraTag();
if (extra_tag == kPCJumpExtraTag) {
- int top_tag = GetTopTag();
- if (top_tag == kVariableLengthPCJumpTopTag) {
+ if (GetTopTag() == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump();
} else {
AdvanceReadPC();
@@ -531,6 +556,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
+ } else if ((extra_tag == kConstPoolExtraTag) &&
+ (GetTopTag() == kConstPoolTag)) {
+ if (SetMode(RelocInfo::CONST_POOL)) {
+ AdvanceReadConstPoolData();
+ return;
+ }
+ Advance(kIntSize);
} else {
AdvanceReadPC();
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
@@ -613,6 +645,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
+ case RelocInfo::CONST_POOL:
+ return "constant pool";
case RelocInfo::DEBUG_BREAK_SLOT:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@@ -698,6 +732,7 @@ void RelocInfo::Verify() {
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
+ case CONST_POOL:
case DEBUG_BREAK_SLOT:
case NONE:
break;
@@ -1057,7 +1092,7 @@ ExternalReference ExternalReference::re_word_character_map() {
ExternalReference ExternalReference::address_of_static_offsets_vector(
Isolate* isolate) {
return ExternalReference(
- OffsetsVector::static_offsets_vector_address(isolate));
+ reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 619c69c4b2..cb5a72d755 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -51,7 +51,6 @@ class ApiFunction;
namespace internal {
struct StatsCounter;
-const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@@ -204,14 +203,19 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
+ // Marks a constant pool. Only used on ARM.
+ // It uses a custom noncompact encoding.
+ CONST_POOL,
+
// add more as needed
// Pseudo-types
- NUMBER_OF_MODES, // There are at most 14 modes with noncompact encoding.
+ NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
- LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
+ LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
+ LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
};
@@ -240,6 +244,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
}
+ static inline bool IsConstPool(Mode mode) {
+ return mode == CONST_POOL;
+ }
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@@ -416,6 +423,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
+ inline void WriteExtraTaggedConstPoolData(int data);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@@ -466,6 +474,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
+ void AdvanceReadConstPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 9523a34358..6b68705d27 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -85,8 +85,8 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
VariableProxy::VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
- int position,
- Interface* interface)
+ Interface* interface,
+ int position)
: Expression(isolate),
name_(name),
var_(NULL),
@@ -125,7 +125,6 @@ Assignment::Assignment(Isolate* isolate,
value_(value),
pos_(pos),
binary_operation_(NULL),
- compound_load_id_(kNoNumber),
assignment_id_(GetNextId(isolate)),
block_start_(false),
block_end_(false),
@@ -156,6 +155,11 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
+bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
+ return scope()->AllowsLazyCompilationWithoutContext();
+}
+
+
int FunctionLiteral::start_position() const {
return scope()->start_position();
}
@@ -429,7 +433,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
+ oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
}
}
@@ -438,7 +442,8 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+ TypeFeedbackId id = AssignmentFeedbackId();
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
@@ -447,24 +452,26 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
+ oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
}
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+ TypeFeedbackId id = CountStoreFeedbackId();
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_.Add(
+ oracle->StoreMonomorphicReceiverType(id), zone);
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
+ oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
}
@@ -498,7 +505,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
}
LookupResult lookup(type->GetIsolate());
while (true) {
- type->LookupInDescriptors(NULL, *name, &lookup);
+ type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case CONSTANT_FUNCTION:
@@ -513,10 +520,9 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
case INTERCEPTOR:
// We don't know the target.
return false;
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- // Perhaps something interesting is up in the prototype chain...
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
@@ -1027,6 +1033,14 @@ CaseClause::CaseClause(Isolate* isolate,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
+#define DONT_CACHE_NODE(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_flag(kDontOptimize); \
+ add_flag(kDontInline); \
+ add_flag(kDontSelfOptimize); \
+ add_flag(kDontCache); \
+ }
REGULAR_NODE(VariableDeclaration)
REGULAR_NODE(FunctionDeclaration)
@@ -1041,6 +1055,7 @@ REGULAR_NODE(SwitchStatement)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ObjectLiteral)
+REGULAR_NODE(RegExpLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(Property)
@@ -1057,10 +1072,13 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
+// We currently do not optimize any modules. Note in particular, that module
+// instance objects associated with ModuleLiterals are allocated during
+// scope resolution, and references to them are embedded into the code.
+// That code may hence neither be cached nor re-compiled.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
-DONT_OPTIMIZE_NODE(ModuleLiteral)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
@@ -1070,15 +1088,16 @@ DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
-DONT_INLINE_NODE(FunctionLiteral)
-DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
+DONT_INLINE_NODE(FunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_CACHE_NODE(ModuleLiteral)
+
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
@@ -1099,6 +1118,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
#undef DONT_OPTIMIZE_NODE
#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
+#undef DONT_CACHE_NODE
Handle<String> Literal::ToString() {
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 02ece7fe61..e72296cff7 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -37,7 +37,7 @@
#include "list-inl.h"
#include "runtime.h"
#include "small-pointer-list.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "token.h"
#include "utils.h"
#include "variables.h"
@@ -158,14 +158,16 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; }
+ virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+ template<class> friend class AstNodeFactory;
enum AstPropertiesFlag {
kDontInline,
kDontOptimize,
kDontSelfOptimize,
- kDontSoftInline
+ kDontSoftInline,
+ kDontCache
};
@@ -194,13 +196,6 @@ class AstNode: public ZoneObject {
};
#undef DECLARE_TYPE_ENUM
- static const int kNoNumber = -1;
- static const int kFunctionEntryId = 2; // Using 0 could disguise errors.
- // This AST id identifies the point after the declarations have been
- // visited. We need it to capture the environment effects of declarations
- // that emit code (function declarations).
- static const int kDeclarationsId = 3;
-
void* operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
}
@@ -210,7 +205,7 @@ class AstNode: public ZoneObject {
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const { return kInvalid; }
+ virtual Type node_type() const = 0;
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -219,9 +214,6 @@ class AstNode: public ZoneObject {
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
- virtual Declaration* AsDeclaration() { return NULL; }
- virtual Statement* AsStatement() { return NULL; }
- virtual Expression* AsExpression() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
@@ -238,6 +230,12 @@ class AstNode: public ZoneObject {
return tmp;
}
+ // Some nodes re-use bailout IDs for type feedback.
+ static TypeFeedbackId reuse(BailoutId id) {
+ return TypeFeedbackId(id.ToInt());
+ }
+
+
private:
// Hidden to prevent accidental usage. It would have to load the
// current zone from the TLS.
@@ -251,8 +249,6 @@ class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
- virtual Statement* AsStatement() { return this; }
-
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -313,8 +309,6 @@ class Expression: public AstNode {
return 0;
}
- virtual Expression* AsExpression() { return this; }
-
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -355,8 +349,8 @@ class Expression: public AstNode {
return types->at(0);
}
- unsigned id() const { return id_; }
- unsigned test_id() const { return test_id_; }
+ BailoutId id() const { return id_; }
+ TypeFeedbackId test_id() const { return test_id_; }
protected:
explicit Expression(Isolate* isolate)
@@ -364,8 +358,8 @@ class Expression: public AstNode {
test_id_(GetNextId(isolate)) {}
private:
- int id_;
- int test_id_;
+ const BailoutId id_;
+ const TypeFeedbackId test_id_;
};
@@ -389,9 +383,8 @@ class BreakableStatement: public Statement {
// Testers.
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
- // Bailout support.
- int EntryId() const { return entry_id_; }
- int ExitId() const { return exit_id_; }
+ BailoutId EntryId() const { return entry_id_; }
+ BailoutId ExitId() const { return exit_id_; }
protected:
BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
@@ -407,8 +400,8 @@ class BreakableStatement: public Statement {
ZoneStringList* labels_;
Type type_;
Label break_target_;
- int entry_id_;
- int exit_id_;
+ const BailoutId entry_id_;
+ const BailoutId exit_id_;
};
@@ -427,8 +420,6 @@ class Block: public BreakableStatement {
void set_scope(Scope* scope) { scope_ = scope; }
protected:
- template<class> friend class AstNodeFactory;
-
Block(Isolate* isolate,
ZoneStringList* labels,
int capacity,
@@ -455,8 +446,6 @@ class Declaration: public AstNode {
virtual InitializationFlag initialization() const = 0;
virtual bool IsInlineable() const;
- virtual Declaration* AsDeclaration() { return this; }
-
protected:
Declaration(VariableProxy* proxy,
VariableMode mode,
@@ -464,10 +453,7 @@ class Declaration: public AstNode {
: proxy_(proxy),
mode_(mode),
scope_(scope) {
- ASSERT(mode == VAR ||
- mode == CONST ||
- mode == CONST_HARMONY ||
- mode == LET);
+ ASSERT(IsDeclaredVariableMode(mode));
}
private:
@@ -488,8 +474,6 @@ class VariableDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
VariableDeclaration(VariableProxy* proxy,
VariableMode mode,
Scope* scope)
@@ -509,8 +493,6 @@ class FunctionDeclaration: public Declaration {
virtual bool IsInlineable() const;
protected:
- template<class> friend class AstNodeFactory;
-
FunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
@@ -537,8 +519,6 @@ class ModuleDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
ModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
@@ -561,8 +541,6 @@ class ImportDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
ImportDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
@@ -584,25 +562,27 @@ class ExportDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
- ExportDeclaration(VariableProxy* proxy,
- Scope* scope)
- : Declaration(proxy, LET, scope) {
- }
+ ExportDeclaration(VariableProxy* proxy, Scope* scope)
+ : Declaration(proxy, LET, scope) {}
};
class Module: public AstNode {
public:
Interface* interface() const { return interface_; }
+ Block* body() const { return body_; }
protected:
- explicit Module(Zone* zone) : interface_(Interface::NewModule(zone)) {}
- explicit Module(Interface* interface) : interface_(interface) {}
+ explicit Module(Zone* zone)
+ : interface_(Interface::NewModule(zone)),
+ body_(NULL) {}
+ explicit Module(Interface* interface, Block* body = NULL)
+ : interface_(interface),
+ body_(body) {}
private:
Interface* interface_;
+ Block* body_;
};
@@ -610,20 +590,8 @@ class ModuleLiteral: public Module {
public:
DECLARE_NODE_TYPE(ModuleLiteral)
- Block* body() const { return body_; }
- Handle<Context> context() const { return context_; }
-
protected:
- template<class> friend class AstNodeFactory;
-
- ModuleLiteral(Block* body, Interface* interface)
- : Module(interface),
- body_(body) {
- }
-
- private:
- Block* body_;
- Handle<Context> context_;
+ ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
};
@@ -634,8 +602,6 @@ class ModuleVariable: public Module {
VariableProxy* proxy() const { return proxy_; }
protected:
- template<class> friend class AstNodeFactory;
-
inline explicit ModuleVariable(VariableProxy* proxy);
private:
@@ -651,8 +617,6 @@ class ModulePath: public Module {
Handle<String> name() const { return name_; }
protected:
- template<class> friend class AstNodeFactory;
-
ModulePath(Module* module, Handle<String> name, Zone* zone)
: Module(zone),
module_(module),
@@ -672,8 +636,6 @@ class ModuleUrl: public Module {
Handle<String> url() const { return url_; }
protected:
- template<class> friend class AstNodeFactory;
-
ModuleUrl(Handle<String> url, Zone* zone)
: Module(zone), url_(url) {
}
@@ -690,10 +652,9 @@ class IterationStatement: public BreakableStatement {
Statement* body() const { return body_; }
- // Bailout support.
- int OsrEntryId() const { return osr_entry_id_; }
- virtual int ContinueId() const = 0;
- virtual int StackCheckId() const = 0;
+ BailoutId OsrEntryId() const { return osr_entry_id_; }
+ virtual BailoutId ContinueId() const = 0;
+ virtual BailoutId StackCheckId() const = 0;
// Code generation
Label* continue_target() { return &continue_target_; }
@@ -712,7 +673,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
- int osr_entry_id_;
+ const BailoutId osr_entry_id_;
};
@@ -732,14 +693,11 @@ class DoWhileStatement: public IterationStatement {
int condition_position() { return condition_position_; }
void set_condition_position(int pos) { condition_position_ = pos; }
- // Bailout support.
- virtual int ContinueId() const { return continue_id_; }
- virtual int StackCheckId() const { return back_edge_id_; }
- int BackEdgeId() const { return back_edge_id_; }
+ virtual BailoutId ContinueId() const { return continue_id_; }
+ virtual BailoutId StackCheckId() const { return back_edge_id_; }
+ BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
cond_(NULL),
@@ -751,8 +709,8 @@ class DoWhileStatement: public IterationStatement {
private:
Expression* cond_;
int condition_position_;
- int continue_id_;
- int back_edge_id_;
+ const BailoutId continue_id_;
+ const BailoutId back_edge_id_;
};
@@ -773,14 +731,11 @@ class WhileStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- // Bailout support.
- virtual int ContinueId() const { return EntryId(); }
- virtual int StackCheckId() const { return body_id_; }
- int BodyId() const { return body_id_; }
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+ BailoutId BodyId() const { return body_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
WhileStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
cond_(NULL),
@@ -792,7 +747,7 @@ class WhileStatement: public IterationStatement {
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
- int body_id_;
+ const BailoutId body_id_;
};
@@ -821,18 +776,15 @@ class ForStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- // Bailout support.
- virtual int ContinueId() const { return continue_id_; }
- virtual int StackCheckId() const { return body_id_; }
- int BodyId() const { return body_id_; }
+ virtual BailoutId ContinueId() const { return continue_id_; }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+ BailoutId BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- template<class> friend class AstNodeFactory;
-
ForStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
init_(NULL),
@@ -851,8 +803,8 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
- int continue_id_;
- int body_id_;
+ const BailoutId continue_id_;
+ const BailoutId body_id_;
};
@@ -869,14 +821,14 @@ class ForInStatement: public IterationStatement {
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
- virtual int ContinueId() const { return EntryId(); }
- virtual int StackCheckId() const { return body_id_; }
- int BodyId() const { return body_id_; }
- int PrepareId() const { return prepare_id_; }
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+ BailoutId BodyId() const { return body_id_; }
+ BailoutId PrepareId() const { return prepare_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
each_(NULL),
@@ -888,8 +840,8 @@ class ForInStatement: public IterationStatement {
private:
Expression* each_;
Expression* enumerable_;
- int body_id_;
- int prepare_id_;
+ const BailoutId body_id_;
+ const BailoutId prepare_id_;
};
@@ -901,8 +853,6 @@ class ExpressionStatement: public Statement {
Expression* expression() const { return expression_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit ExpressionStatement(Expression* expression)
: expression_(expression) { }
@@ -918,8 +868,6 @@ class ContinueStatement: public Statement {
IterationStatement* target() const { return target_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit ContinueStatement(IterationStatement* target)
: target_(target) { }
@@ -935,8 +883,6 @@ class BreakStatement: public Statement {
BreakableStatement* target() const { return target_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit BreakStatement(BreakableStatement* target)
: target_(target) { }
@@ -952,8 +898,6 @@ class ReturnStatement: public Statement {
Expression* expression() const { return expression_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit ReturnStatement(Expression* expression)
: expression_(expression) { }
@@ -970,8 +914,6 @@ class WithStatement: public Statement {
Statement* statement() const { return statement_; }
protected:
- template<class> friend class AstNodeFactory;
-
WithStatement(Expression* expression, Statement* statement)
: expression_(expression),
statement_(statement) { }
@@ -1000,10 +942,10 @@ class CaseClause: public ZoneObject {
int position() const { return position_; }
void set_position(int pos) { position_ = pos; }
- int EntryId() { return entry_id_; }
- int CompareId() { return compare_id_; }
+ BailoutId EntryId() const { return entry_id_; }
// Type feedback information.
+ TypeFeedbackId CompareId() { return compare_id_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
@@ -1023,8 +965,8 @@ class CaseClause: public ZoneObject {
OBJECT_ONLY
};
CompareTypeFeedback compare_type_;
- int compare_id_;
- int entry_id_;
+ const TypeFeedbackId compare_id_;
+ const BailoutId entry_id_;
};
@@ -1041,8 +983,6 @@ class SwitchStatement: public BreakableStatement {
ZoneList<CaseClause*>* cases() const { return cases_; }
protected:
- template<class> friend class AstNodeFactory;
-
SwitchStatement(Isolate* isolate, ZoneStringList* labels)
: BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
tag_(NULL),
@@ -1070,13 +1010,11 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
- int IfId() const { return if_id_; }
- int ThenId() const { return then_id_; }
- int ElseId() const { return else_id_; }
+ BailoutId IfId() const { return if_id_; }
+ BailoutId ThenId() const { return then_id_; }
+ BailoutId ElseId() const { return else_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
IfStatement(Isolate* isolate,
Expression* condition,
Statement* then_statement,
@@ -1093,9 +1031,9 @@ class IfStatement: public Statement {
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
- int if_id_;
- int then_id_;
- int else_id_;
+ const BailoutId if_id_;
+ const BailoutId then_id_;
+ const BailoutId else_id_;
};
@@ -1112,6 +1050,7 @@ class TargetCollector: public AstNode {
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
+ virtual Type node_type() const { return kInvalid; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1155,8 +1094,6 @@ class TryCatchStatement: public TryStatement {
Block* catch_block() const { return catch_block_; }
protected:
- template<class> friend class AstNodeFactory;
-
TryCatchStatement(int index,
Block* try_block,
Scope* scope,
@@ -1182,8 +1119,6 @@ class TryFinallyStatement: public TryStatement {
Block* finally_block() const { return finally_block_; }
protected:
- template<class> friend class AstNodeFactory;
-
TryFinallyStatement(int index, Block* try_block, Block* finally_block)
: TryStatement(index, try_block),
finally_block_(finally_block) { }
@@ -1198,8 +1133,6 @@ class DebuggerStatement: public Statement {
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- template<class> friend class AstNodeFactory;
-
DebuggerStatement() {}
};
@@ -1209,8 +1142,6 @@ class EmptyStatement: public Statement {
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- template<class> friend class AstNodeFactory;
-
EmptyStatement() {}
};
@@ -1261,9 +1192,9 @@ class Literal: public Expression {
return s1->Equals(*s2);
}
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
+ protected:
Literal(Isolate* isolate, Handle<Object> handle)
: Expression(isolate),
handle_(handle) { }
@@ -1381,8 +1312,6 @@ class ObjectLiteral: public MaterializedLiteral {
};
protected:
- template<class> friend class AstNodeFactory;
-
ObjectLiteral(Isolate* isolate,
Handle<FixedArray> constant_properties,
ZoneList<Property*>* properties,
@@ -1414,8 +1343,6 @@ class RegExpLiteral: public MaterializedLiteral {
Handle<String> flags() const { return flags_; }
protected:
- template<class> friend class AstNodeFactory;
-
RegExpLiteral(Isolate* isolate,
Handle<String> pattern,
Handle<String> flags,
@@ -1439,11 +1366,11 @@ class ArrayLiteral: public MaterializedLiteral {
ZoneList<Expression*>* values() const { return values_; }
// Return an AST id for an element that is used in simulate instructions.
- int GetIdForElement(int i) { return first_element_id_ + i; }
+ BailoutId GetIdForElement(int i) {
+ return BailoutId(first_element_id_.ToInt() + i);
+ }
protected:
- template<class> friend class AstNodeFactory;
-
ArrayLiteral(Isolate* isolate,
Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
@@ -1458,7 +1385,7 @@ class ArrayLiteral: public MaterializedLiteral {
private:
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
- int first_element_id_;
+ const BailoutId first_element_id_;
};
@@ -1494,15 +1421,13 @@ class VariableProxy: public Expression {
void BindTo(Variable* var);
protected:
- template<class> friend class AstNodeFactory;
-
VariableProxy(Isolate* isolate, Variable* var);
VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
- int position,
- Interface* interface);
+ Interface* interface,
+ int position);
Handle<String> name_;
Variable* var_; // resolved variable, or NULL
@@ -1526,6 +1451,8 @@ class Property: public Expression {
Expression* key() const { return key_; }
virtual int position() const { return pos_; }
+ BailoutId LoadId() const { return load_id_; }
+
bool IsStringLength() const { return is_string_length_; }
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
@@ -1536,10 +1463,9 @@ class Property: public Expression {
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; }
bool IsUninitialized() { return is_uninitialized_; }
+ TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
- template<class> friend class AstNodeFactory;
-
Property(Isolate* isolate,
Expression* obj,
Expression* key,
@@ -1548,6 +1474,7 @@ class Property: public Expression {
obj_(obj),
key_(key),
pos_(pos),
+ load_id_(GetNextId(isolate)),
is_monomorphic_(false),
is_uninitialized_(false),
is_array_length_(false),
@@ -1559,6 +1486,7 @@ class Property: public Expression {
Expression* obj_;
Expression* key_;
int pos_;
+ const BailoutId load_id_;
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
@@ -1578,20 +1506,25 @@ class Call: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
virtual int position() const { return pos_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle,
- CallKind call_kind);
+ // Type feedback information.
+ TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
Handle<JSFunction> target() { return target_; }
+
+ // A cache for the holder, set as a side effect of computing the target of the
+ // call. Note that it contains the null handle when the receiver is the same
+ // as the holder!
Handle<JSObject> holder() { return holder_; }
+
Handle<JSGlobalPropertyCell> cell() { return cell_; }
bool ComputeTarget(Handle<Map> type, Handle<String> name);
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
- // Bailout support.
- int ReturnId() const { return return_id_; }
+ BailoutId ReturnId() const { return return_id_; }
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
@@ -1599,8 +1532,6 @@ class Call: public Expression {
#endif
protected:
- template<class> friend class AstNodeFactory;
-
Call(Isolate* isolate,
Expression* expression,
ZoneList<Expression*>* arguments,
@@ -1625,7 +1556,7 @@ class Call: public Expression {
Handle<JSObject> holder_;
Handle<JSGlobalPropertyCell> cell_;
- int return_id_;
+ const BailoutId return_id_;
};
@@ -1637,16 +1568,15 @@ class CallNew: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
virtual int position() const { return pos_; }
+ // Type feedback information.
+ TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() { return target_; }
- // Bailout support.
- int ReturnId() const { return return_id_; }
+ BailoutId ReturnId() const { return return_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
CallNew(Isolate* isolate,
Expression* expression,
ZoneList<Expression*>* arguments,
@@ -1666,7 +1596,7 @@ class CallNew: public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
- int return_id_;
+ const BailoutId return_id_;
};
@@ -1683,9 +1613,9 @@ class CallRuntime: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == NULL; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
+ protected:
CallRuntime(Isolate* isolate,
Handle<String> name,
const Runtime::Function* function,
@@ -1712,12 +1642,12 @@ class UnaryOperation: public Expression {
Expression* expression() const { return expression_; }
virtual int position() const { return pos_; }
- int MaterializeTrueId() { return materialize_true_id_; }
- int MaterializeFalseId() { return materialize_false_id_; }
+ BailoutId MaterializeTrueId() { return materialize_true_id_; }
+ BailoutId MaterializeFalseId() { return materialize_false_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
+ protected:
UnaryOperation(Isolate* isolate,
Token::Value op,
Expression* expression,
@@ -1726,13 +1656,9 @@ class UnaryOperation: public Expression {
op_(op),
expression_(expression),
pos_(pos),
- materialize_true_id_(AstNode::kNoNumber),
- materialize_false_id_(AstNode::kNoNumber) {
+ materialize_true_id_(GetNextId(isolate)),
+ materialize_false_id_(GetNextId(isolate)) {
ASSERT(Token::IsUnaryOp(op));
- if (op == Token::NOT) {
- materialize_true_id_ = GetNextId(isolate);
- materialize_false_id_ = GetNextId(isolate);
- }
}
private:
@@ -1742,8 +1668,8 @@ class UnaryOperation: public Expression {
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
- int materialize_true_id_;
- int materialize_false_id_;
+ const BailoutId materialize_true_id_;
+ const BailoutId materialize_false_id_;
};
@@ -1758,22 +1684,23 @@ class BinaryOperation: public Expression {
Expression* right() const { return right_; }
virtual int position() const { return pos_; }
- // Bailout support.
- int RightId() const { return right_id_; }
+ BailoutId RightId() const { return right_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
+ protected:
BinaryOperation(Isolate* isolate,
Token::Value op,
Expression* left,
Expression* right,
int pos)
- : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
+ : Expression(isolate),
+ op_(op),
+ left_(left),
+ right_(right),
+ pos_(pos),
+ right_id_(GetNextId(isolate)) {
ASSERT(Token::IsBinaryOp(op));
- right_id_ = (op == Token::AND || op == Token::OR)
- ? GetNextId(isolate)
- : AstNode::kNoNumber;
}
private:
@@ -1781,9 +1708,9 @@ class BinaryOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
- // The short-circuit logical operations have an AST ID for their
+ // The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
- int right_id_;
+ const BailoutId right_id_;
};
@@ -1808,13 +1735,12 @@ class CountOperation: public Expression {
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- // Bailout support.
- int AssignmentId() const { return assignment_id_; }
- int CountId() const { return count_id_; }
+ BailoutId AssignmentId() const { return assignment_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId CountBinOpFeedbackId() const { return count_id_; }
+ TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
+ protected:
CountOperation(Isolate* isolate,
Token::Value op,
bool is_prefix,
@@ -1834,8 +1760,8 @@ class CountOperation: public Expression {
bool is_monomorphic_;
Expression* expression_;
int pos_;
- int assignment_id_;
- int count_id_;
+ const BailoutId assignment_id_;
+ const TypeFeedbackId count_id_;
SmallMapList receiver_types_;
};
@@ -1850,6 +1776,7 @@ class CompareOperation: public Expression {
virtual int position() const { return pos_; }
// Type feedback information.
+ TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
@@ -1860,8 +1787,6 @@ class CompareOperation: public Expression {
bool IsLiteralCompareNull(Expression** expr);
protected:
- template<class> friend class AstNodeFactory;
-
CompareOperation(Isolate* isolate,
Token::Value op,
Expression* left,
@@ -1898,12 +1823,10 @@ class Conditional: public Expression {
int then_expression_position() const { return then_expression_position_; }
int else_expression_position() const { return else_expression_position_; }
- int ThenId() const { return then_id_; }
- int ElseId() const { return else_id_; }
+ BailoutId ThenId() const { return then_id_; }
+ BailoutId ElseId() const { return else_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
Conditional(Isolate* isolate,
Expression* condition,
Expression* then_expression,
@@ -1925,8 +1848,8 @@ class Conditional: public Expression {
Expression* else_expression_;
int then_expression_position_;
int else_expression_position_;
- int then_id_;
- int else_id_;
+ const BailoutId then_id_;
+ const BailoutId else_id_;
};
@@ -1956,18 +1879,15 @@ class Assignment: public Expression {
void mark_block_start() { block_start_ = true; }
void mark_block_end() { block_end_ = true; }
+ BailoutId AssignmentId() const { return assignment_id_; }
+
// Type feedback information.
+ TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- // Bailout support.
- int CompoundLoadId() const { return compound_load_id_; }
- int AssignmentId() const { return assignment_id_; }
-
protected:
- template<class> friend class AstNodeFactory;
-
Assignment(Isolate* isolate,
Token::Value op,
Expression* target,
@@ -1980,7 +1900,6 @@ class Assignment: public Expression {
if (is_compound()) {
binary_operation_ =
factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
- compound_load_id_ = GetNextId(isolate);
}
}
@@ -1990,8 +1909,7 @@ class Assignment: public Expression {
Expression* value_;
int pos_;
BinaryOperation* binary_operation_;
- int compound_load_id_;
- int assignment_id_;
+ const BailoutId assignment_id_;
bool block_start_;
bool block_end_;
@@ -2009,8 +1927,6 @@ class Throw: public Expression {
virtual int position() const { return pos_; }
protected:
- template<class> friend class AstNodeFactory;
-
Throw(Isolate* isolate, Expression* exception, int pos)
: Expression(isolate), exception_(exception), pos_(pos) {}
@@ -2038,6 +1954,11 @@ class FunctionLiteral: public Expression {
kIsFunction
};
+ enum IsParenthesizedFlag {
+ kIsParenthesized,
+ kNotParenthesized
+ };
+
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; }
@@ -2065,6 +1986,7 @@ class FunctionLiteral: public Expression {
int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
+ bool AllowsLazyCompilationWithoutContext();
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
@@ -2085,6 +2007,18 @@ class FunctionLiteral: public Expression {
bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
+ // This is used as a heuristic on when to eagerly compile a function
+ // literal. We consider the following constructs as hints that the
+ // function will be called immediately:
+ // - (function() { ... })();
+ // - var x = function() { ... }();
+ bool is_parenthesized() {
+ return IsParenthesized::decode(bitfield_) == kIsParenthesized;
+ }
+ void set_parenthesized() {
+ bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
+ }
+
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) {
@@ -2092,8 +2026,6 @@ class FunctionLiteral: public Expression {
}
protected:
- template<class> friend class AstNodeFactory;
-
FunctionLiteral(Isolate* isolate,
Handle<String> name,
Scope* scope,
@@ -2106,7 +2038,8 @@ class FunctionLiteral: public Expression {
int parameter_count,
Type type,
ParameterFlag has_duplicate_parameters,
- IsFunctionFlag is_function)
+ IsFunctionFlag is_function,
+ IsParenthesizedFlag is_parenthesized)
: Expression(isolate),
name_(name),
scope_(scope),
@@ -2125,7 +2058,8 @@ class FunctionLiteral: public Expression {
IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters) |
- IsFunction::encode(is_function);
+ IsFunction::encode(is_function) |
+ IsParenthesized::encode(is_parenthesized);
}
private:
@@ -2149,6 +2083,7 @@ class FunctionLiteral: public Expression {
class Pretenure: public BitField<bool, 3, 1> {};
class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
+ class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
};
@@ -2161,8 +2096,6 @@ class SharedFunctionInfoLiteral: public Expression {
}
protected:
- template<class> friend class AstNodeFactory;
-
SharedFunctionInfoLiteral(
Isolate* isolate,
Handle<SharedFunctionInfo> shared_function_info)
@@ -2179,8 +2112,6 @@ class ThisFunction: public Expression {
DECLARE_NODE_TYPE(ThisFunction)
protected:
- template<class> friend class AstNodeFactory;
-
explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
};
@@ -2638,9 +2569,9 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory BASE_EMBEDDED {
public:
- explicit AstNodeFactory(Isolate* isolate)
+ AstNodeFactory(Isolate* isolate, Zone* zone)
: isolate_(isolate),
- zone_(isolate_->zone()) { }
+ zone_(zone) { }
Visitor* visitor() { return &visitor_; }
@@ -2710,10 +2641,9 @@ class AstNodeFactory BASE_EMBEDDED {
Block* NewBlock(ZoneStringList* labels,
int capacity,
- bool is_initializer_block,
- Zone* zone) {
+ bool is_initializer_block) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, zone);
+ isolate_, labels, capacity, is_initializer_block, zone_);
VISIT_AND_RETURN(Block, block)
}
@@ -2846,11 +2776,10 @@ class AstNodeFactory BASE_EMBEDDED {
VariableProxy* NewVariableProxy(Handle<String> name,
bool is_this,
- int position = RelocInfo::kNoPosition,
- Interface* interface =
- Interface::NewValue()) {
+ Interface* interface = Interface::NewValue(),
+ int position = RelocInfo::kNoPosition) {
VariableProxy* proxy =
- new(zone_) VariableProxy(isolate_, name, is_this, position, interface);
+ new(zone_) VariableProxy(isolate_, name, is_this, interface, position);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -2954,12 +2883,14 @@ class AstNodeFactory BASE_EMBEDDED {
int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::Type type,
- FunctionLiteral::IsFunctionFlag is_function) {
+ FunctionLiteral::IsFunctionFlag is_function,
+ FunctionLiteral::IsParenthesizedFlag is_parenthesized) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
has_only_simple_this_property_assignments, this_property_assignments,
- parameter_count, type, has_duplicate_parameters, is_function);
+ parameter_count, type, has_duplicate_parameters, is_function,
+ is_parenthesized);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit);
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 33cbb8149f..992659edce 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -42,6 +42,7 @@
#include "snapshot.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
+#include "extensions/statistics-extension.h"
namespace v8 {
namespace internal {
@@ -95,6 +96,7 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
extensions_cache_.Initialize(create_heap_objects);
GCExtension::Register();
ExternalizeStringExtension::Register();
+ StatisticsExtension::Register();
}
@@ -154,7 +156,7 @@ class Genesis BASE_EMBEDDED {
Heap* heap() const { return isolate_->heap(); }
private:
- Handle<Context> global_context_;
+ Handle<Context> native_context_;
Isolate* isolate_;
// There may be more than one active genesis object: When GC is
@@ -162,7 +164,7 @@ class Genesis BASE_EMBEDDED {
// processing callbacks which may create new environments.
Genesis* previous_;
- Handle<Context> global_context() { return global_context_; }
+ Handle<Context> native_context() { return native_context_; }
// Creates some basic objects. Used for creating a context from scratch.
void CreateRoots();
@@ -226,13 +228,13 @@ class Genesis BASE_EMBEDDED {
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
- static bool InstallExtensions(Handle<Context> global_context,
+ static bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
static bool InstallExtension(const char* name,
ExtensionStates* extension_states);
static bool InstallExtension(v8::RegisteredExtension* current,
ExtensionStates* extension_states);
- static void InstallSpecialObjects(Handle<Context> global_context);
+ static void InstallSpecialObjects(Handle<Context> native_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
@@ -253,16 +255,16 @@ class Genesis BASE_EMBEDDED {
Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
- Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode);
+ void SetFunctionInstanceDescriptor(Handle<Map> map,
+ PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
Handle<Map> CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function);
- Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode propertyMode);
+ void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
+ PrototypePropertyMode propertyMode);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -317,7 +319,7 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Factory* factory = object->GetIsolate()->factory();
Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = factory->CopyMap(old_to_map);
new_to_map->set_prototype(*proto);
object->set_map(*new_to_map);
}
@@ -325,22 +327,20 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
void Bootstrapper::DetachGlobal(Handle<Context> env) {
Factory* factory = env->GetIsolate()->factory();
- JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
- SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
- factory->null_value());
- env->set_global_proxy(env->global());
- env->global()->set_global_receiver(env->global());
+ Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
+ global_proxy->set_native_context(*factory->null_value());
+ SetObjectPrototype(global_proxy, factory->null_value());
+ env->set_global_proxy(env->global_object());
+ env->global_object()->set_global_receiver(env->global_object());
}
void Bootstrapper::ReattachGlobal(Handle<Context> env,
- Handle<Object> global_object) {
- ASSERT(global_object->IsJSGlobalProxy());
- Handle<JSGlobalProxy> global = Handle<JSGlobalProxy>::cast(global_object);
- env->global()->set_global_receiver(*global);
- env->set_global_proxy(*global);
- SetObjectPrototype(global, Handle<JSObject>(env->global()));
- global->set_context(*env);
+ Handle<JSGlobalProxy> global_proxy) {
+ env->global_object()->set_global_receiver(*global_proxy);
+ env->set_global_proxy(*global_proxy);
+ SetObjectPrototype(global_proxy, Handle<JSObject>(env->global_object()));
+ global_proxy->set_native_context(*env);
}
@@ -381,54 +381,54 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
}
-Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
+void Genesis::SetFunctionInstanceDescriptor(
+ Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+
+ Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
+ Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
+ Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments));
+ Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller));
+ Handle<Foreign> prototype;
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
+ }
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- DescriptorArray::WhitenessWitness witness(*descriptors);
+ Map::SetDescriptors(map, descriptors);
{ // Add length.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
- CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
- descriptors->Set(0, &d, witness);
+ CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add name.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
- CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
- descriptors->Set(1, &d, witness);
+ CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments));
- CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs);
- descriptors->Set(2, &d, witness);
+ CallbacksDescriptor d(*factory()->arguments_symbol(), *args, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller));
- CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs);
- descriptors->Set(3, &d, witness);
+ CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
}
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
- CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
- descriptors->Set(4, &d, witness);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ map->AppendDescriptor(&d, witness);
}
- descriptors->Sort(witness);
- return descriptors;
}
Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
+ SetFunctionInstanceDescriptor(map, prototype_mode);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
return map;
}
@@ -442,20 +442,20 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// writable.
Handle<Map> function_instance_map =
CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- global_context()->set_function_instance_map(*function_instance_map);
+ native_context()->set_function_instance_map(*function_instance_map);
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
CreateFunctionMap(DONT_ADD_PROTOTYPE);
- global_context()->set_function_without_prototype_map(
+ native_context()->set_function_without_prototype_map(
*function_without_prototype_map);
// Allocate the function map. This map is temporary, used only for processing
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- global_context()->set_function_map(*function_map);
+ native_context()->set_function_map(*function_map);
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
@@ -475,17 +475,15 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
object_fun->set_initial_map(*object_function_map);
object_function_map->set_constructor(*object_fun);
- global_context()->set_object_function(*object_fun);
+ native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
Handle<JSObject> prototype = factory->NewJSObject(
isolate->object_function(),
TENURED);
- global_context()->set_initial_object_prototype(*prototype);
+ native_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
- object_function_map->set_instance_descriptors(
- heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@@ -509,63 +507,63 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
// Set prototypes for the function maps.
- global_context()->function_map()->set_prototype(*empty_function);
- global_context()->function_instance_map()->set_prototype(*empty_function);
- global_context()->function_without_prototype_map()->
+ native_context()->function_map()->set_prototype(*empty_function);
+ native_context()->function_instance_map()->set_prototype(*empty_function);
+ native_context()->function_without_prototype_map()->
set_prototype(*empty_function);
function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
empty_function_map->set_prototype(
- global_context()->object_function()->prototype());
+ native_context()->object_function()->prototype());
empty_function->set_map(*empty_function_map);
return empty_function;
}
-Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
+void Genesis::SetStrictFunctionInstanceDescriptor(
+ Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+
+ Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
+ Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
+ Handle<AccessorPair> arguments(factory()->NewAccessorPair());
+ Handle<AccessorPair> caller(factory()->NewAccessorPair());
+ Handle<Foreign> prototype;
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
+ }
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
-
- DescriptorArray::WhitenessWitness witness(*descriptors);
+ Map::SetDescriptors(map, descriptors);
{ // Add length.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
- CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
- descriptors->Set(0, &d, witness);
+ CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add name.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
- CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
- descriptors->Set(1, &d, witness);
+ CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- Handle<AccessorPair> arguments(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
- descriptors->Set(2, &d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- Handle<AccessorPair> caller(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
- descriptors->Set(3, &d, witness);
+ map->AppendDescriptor(&d, witness);
}
-
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
}
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
- CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
- descriptors->Set(4, &d, witness);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ map->AppendDescriptor(&d, witness);
}
-
- descriptors->Sort(witness);
- return descriptors;
}
@@ -578,7 +576,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
throw_type_error_function->set_map(
- global_context()->function_map());
+ native_context()->function_map());
throw_type_error_function->set_code(*code);
throw_type_error_function->shared()->set_code(*code);
throw_type_error_function->shared()->DontAdaptArguments();
@@ -593,9 +591,7 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeStrictFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
+ SetStrictFunctionInstanceDescriptor(map, prototype_mode);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
map->set_prototype(*empty_function);
return map;
@@ -606,13 +602,13 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the strict mode function instances.
Handle<Map> strict_mode_function_instance_map =
CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
- global_context()->set_strict_mode_function_instance_map(
+ native_context()->set_strict_mode_function_instance_map(
*strict_mode_function_instance_map);
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_mode_function_without_prototype_map =
CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
- global_context()->set_strict_mode_function_without_prototype_map(
+ native_context()->set_strict_mode_function_without_prototype_map(
*strict_mode_function_without_prototype_map);
// Allocate map for the strict mode functions. This map is temporary, used
@@ -620,7 +616,7 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> strict_mode_function_map =
CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
- global_context()->set_strict_mode_function_map(
+ native_context()->set_strict_mode_function_map(
*strict_mode_function_map);
// The final map for the strict mode functions. Writeable prototype.
@@ -654,39 +650,39 @@ void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
}
-static void AddToWeakGlobalContextList(Context* context) {
- ASSERT(context->IsGlobalContext());
+static void AddToWeakNativeContextList(Context* context) {
+ ASSERT(context->IsNativeContext());
Heap* heap = context->GetIsolate()->heap();
#ifdef DEBUG
{ // NOLINT
ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
// Check that context is not in the list yet.
- for (Object* current = heap->global_contexts_list();
+ for (Object* current = heap->native_contexts_list();
!current->IsUndefined();
current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
ASSERT(current != context);
}
}
#endif
- context->set(Context::NEXT_CONTEXT_LINK, heap->global_contexts_list());
- heap->set_global_contexts_list(context);
+ context->set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list());
+ heap->set_native_contexts_list(context);
}
void Genesis::CreateRoots() {
- // Allocate the global context FixedArray first and then patch the
+ // Allocate the native context FixedArray first and then patch the
// closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
- // global context).
- global_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
- *factory()->NewGlobalContext()));
- AddToWeakGlobalContextList(*global_context_);
- isolate()->set_context(*global_context());
+ // native context).
+ native_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
+ *factory()->NewNativeContext()));
+ AddToWeakNativeContextList(*native_context_);
+ isolate()->set_context(*native_context());
// Allocate the message listeners object.
{
v8::NeanderArray listeners;
- global_context()->set_message_listeners(*listeners.value());
+ native_context()->set_message_listeners(*listeners.value());
}
}
@@ -749,6 +745,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
}
js_global_function->initial_map()->set_is_hidden_prototype();
+ js_global_function->initial_map()->set_dictionary_map(true);
Handle<GlobalObject> inner_global =
factory()->NewGlobalObject(js_global_function);
if (inner_global_out != NULL) {
@@ -795,21 +792,22 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
Handle<JSGlobalProxy> global_proxy) {
- // Set the global context for the global object.
- inner_global->set_global_context(*global_context());
+ // Set the native context for the global object.
+ inner_global->set_native_context(*native_context());
+ inner_global->set_global_context(*native_context());
inner_global->set_global_receiver(*global_proxy);
- global_proxy->set_context(*global_context());
- global_context()->set_global_proxy(*global_proxy);
+ global_proxy->set_native_context(*native_context());
+ native_context()->set_global_proxy(*global_proxy);
}
void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
Handle<GlobalObject> inner_global_from_snapshot(
- GlobalObject::cast(global_context_->extension()));
- Handle<JSBuiltinsObject> builtins_global(global_context_->builtins());
- global_context_->set_extension(*inner_global);
- global_context_->set_global(*inner_global);
- global_context_->set_security_token(*inner_global);
+ GlobalObject::cast(native_context_->extension()));
+ Handle<JSBuiltinsObject> builtins_global(native_context_->builtins());
+ native_context_->set_extension(*inner_global);
+ native_context_->set_global_object(*inner_global);
+ native_context_->set_security_token(*inner_global);
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
@@ -829,16 +827,16 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
- global_context()->set_closure(*empty_function);
- global_context()->set_previous(NULL);
+ native_context()->set_closure(*empty_function);
+ native_context()->set_previous(NULL);
// Set extension and global object.
- global_context()->set_extension(*inner_global);
- global_context()->set_global(*inner_global);
+ native_context()->set_extension(*inner_global);
+ native_context()->set_global_object(*inner_global);
// Security setup: Set the security token of the global object to
// its the inner global. This makes the security check between two
// different contexts fail by default even in case of global
// object reinitialization.
- global_context()->set_security_token(*inner_global);
+ native_context()->set_security_token(*inner_global);
Isolate* isolate = inner_global->GetIsolate();
Factory* factory = isolate->factory();
@@ -850,7 +848,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
inner_global, object_name,
isolate->object_function(), DONT_ENUM));
- Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
// Install global Function object
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
@@ -868,19 +866,26 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
- Handle<DescriptorArray> array_descriptors =
- factory->CopyAppendForeignDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewForeign(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+
+ Handle<Map> initial_map(array_function->initial_map());
+ Handle<DescriptorArray> array_descriptors(factory->NewDescriptorArray(1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ Map::SetDescriptors(initial_map, array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
// array_function is used internally. JS code creating array object should
// search for the 'Array' property on the global object and use that one
// as the constructor. 'Array' property on a global object can be
// overwritten by JS code.
- global_context()->set_array_function(*array_function);
- array_function->initial_map()->set_instance_descriptors(*array_descriptors);
+ native_context()->set_array_function(*array_function);
}
{ // --- N u m b e r ---
@@ -888,7 +893,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_number_function(*number_fun);
+ native_context()->set_number_function(*number_fun);
}
{ // --- B o o l e a n ---
@@ -896,7 +901,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_boolean_function(*boolean_fun);
+ native_context()->set_boolean_function(*boolean_fun);
}
{ // --- S t r i n g ---
@@ -906,20 +911,23 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Builtins::kIllegal, true);
string_fun->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kStringConstructCode));
- global_context()->set_string_function(*string_fun);
- // Add 'length' property to strings.
- Handle<DescriptorArray> string_descriptors =
- factory->CopyAppendForeignDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewForeign(&Accessors::StringLength),
- static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE |
- READ_ONLY));
+ native_context()->set_string_function(*string_fun);
Handle<Map> string_map =
- Handle<Map>(global_context()->string_function()->initial_map());
- string_map->set_instance_descriptors(*string_descriptors);
+ Handle<Map>(native_context()->string_function()->initial_map());
+ Handle<DescriptorArray> string_descriptors(factory->NewDescriptorArray(1));
+ DescriptorArray::WhitenessWitness witness(*string_descriptors);
+
+ Handle<Foreign> string_length(
+ factory->NewForeign(&Accessors::StringLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
+ Map::SetDescriptors(string_map, string_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
+ string_map->AppendDescriptor(&d, witness);
+ }
}
{ // --- D a t e ---
@@ -929,7 +937,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_date_function(*date_fun);
+ native_context()->set_date_function(*date_fun);
}
@@ -939,49 +947,46 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_regexp_function(*regexp_fun);
+ native_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
ASSERT_EQ(0, initial_map->inobject_properties());
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
- DescriptorArray::WhitenessWitness witness(*descriptors);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- int enum_index = 0;
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+ Map::SetDescriptors(initial_map, descriptors);
+
{
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_symbol(),
JSRegExp::kSourceFieldIndex,
- final,
- enum_index++);
- descriptors->Set(0, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_symbol(),
JSRegExp::kGlobalFieldIndex,
- final,
- enum_index++);
- descriptors->Set(1, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
- final,
- enum_index++);
- descriptors->Set(2, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
- final,
- enum_index++);
- descriptors->Set(3, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.5.
@@ -989,24 +994,20 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(heap->last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
- writable,
- enum_index++);
- descriptors->Set(4, &field, witness);
+ writable);
+ initial_map->AppendDescriptor(&field, witness);
}
- descriptors->SetNextEnumerationIndex(enum_index);
- descriptors->Sort(witness);
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
- initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
- proto_map->set_prototype(global_context()->initial_object_prototype());
+ Handle<Map> proto_map = factory->CopyMap(initial_map);
+ proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->query_colon_symbol());
@@ -1030,7 +1031,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
{ MaybeObject* result = cons->SetInstancePrototype(
- global_context()->initial_object_prototype());
+ native_context()->initial_object_prototype());
if (result->IsFailure()) return false;
}
cons->SetInstanceClassName(*name);
@@ -1039,7 +1040,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, name, json_object, DONT_ENUM));
- global_context()->set_json_object(*json_object);
+ native_context()->set_json_object(*json_object);
}
{ // --- arguments_boilerplate_
@@ -1051,7 +1052,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSObject> prototype =
Handle<JSObject>(
- JSObject::cast(global_context()->object_function()->prototype()));
+ JSObject::cast(native_context()->object_function()->prototype()));
Handle<JSFunction> function =
factory->NewFunctionWithPrototype(symbol,
@@ -1065,7 +1066,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
function->shared()->set_expected_nof_properties(2);
Handle<JSObject> result = factory->NewJSObject(function);
- global_context()->set_arguments_boilerplate(*result);
+ native_context()->set_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1080,11 +1081,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
#ifdef DEBUG
LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup);
- ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
@@ -1106,8 +1107,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
array = factory->NewFixedArray(0);
elements->set(1, *array);
- Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
- Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+ Handle<Map> old_map(native_context()->arguments_boilerplate()->map());
+ Handle<Map> new_map = factory->CopyMap(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
// Set elements kind after allocating the object because
@@ -1115,7 +1116,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
ASSERT(result->HasNonStrictArgumentsElements());
- global_context()->set_aliased_arguments_boilerplate(*result);
+ native_context()->set_aliased_arguments_boilerplate(*result);
}
{ // --- strict mode arguments boilerplate
@@ -1135,39 +1136,43 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
caller->set_getter(*throw_function);
caller->set_setter(*throw_function);
+ // Create the map. Allocate one in-object field for length.
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
+ Heap::kArgumentsObjectSizeStrict);
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
DescriptorArray::WhitenessWitness witness(*descriptors);
+ Map::SetDescriptors(map, descriptors);
+
{ // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
- descriptors->Set(0, &d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // callee
- CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
- descriptors->Set(1, &d, witness);
+ CallbacksDescriptor d(*factory->callee_symbol(),
+ *callee,
+ attributes);
+ map->AppendDescriptor(&d, witness);
}
{ // caller
- CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
- descriptors->Set(2, &d, witness);
+ CallbacksDescriptor d(*factory->caller_symbol(),
+ *caller,
+ attributes);
+ map->AppendDescriptor(&d, witness);
}
- descriptors->Sort(witness);
- // Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
- map->set_instance_descriptors(*descriptors);
map->set_function_with_prototype(true);
- map->set_prototype(global_context()->object_function()->prototype());
+ map->set_prototype(native_context()->object_function()->prototype());
map->set_pre_allocated_property_fields(1);
map->set_inobject_properties(1);
// Copy constructor from the non-strict arguments boilerplate.
map->set_constructor(
- global_context()->arguments_boilerplate()->map()->constructor());
+ native_context()->arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- global_context()->set_strict_mode_arguments_boilerplate(*result);
+ native_context()->set_strict_mode_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1178,7 +1183,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
#ifdef DEBUG
LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1202,7 +1207,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<String> name = factory->LookupAsciiSymbol("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
- global_context()->set_context_extension_function(*context_extension_fun);
+ native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -1214,7 +1219,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> delegate =
factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_function_delegate(*delegate);
+ native_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
@@ -1226,21 +1231,21 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> delegate =
factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_constructor_delegate(*delegate);
+ native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
// Initialize the out of memory slot.
- global_context()->set_out_of_memory(heap->false_value());
+ native_context()->set_out_of_memory(heap->false_value());
// Initialize the data slot.
- global_context()->set_data(heap->undefined_value());
+ native_context()->set_data(heap->undefined_value());
{
// Initialize the random seed slot.
Handle<ByteArray> zeroed_byte_array(
factory->NewByteArray(kRandomStateSize));
- global_context()->set_random_seed(*zeroed_byte_array);
+ native_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
}
return true;
@@ -1248,7 +1253,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
void Genesis::InitializeExperimentalGlobal() {
- Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
// TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
// longer need to live behind a flag, so functions get added to the snapshot.
@@ -1340,6 +1345,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
script_name,
0,
0,
+ top_context,
extension,
NULL,
Handle<String>::null(),
@@ -1351,7 +1357,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// Set up the function context. Conceptually, we should clone the
// function before overwriting the context but since we're in a
// single-threaded environment it is not strictly necessary.
- ASSERT(top_context->IsGlobalContext());
+ ASSERT(top_context->IsNativeContext());
Handle<Context> context =
Handle<Context>(use_runtime_context
? Handle<Context>(top_context->runtime_context())
@@ -1364,7 +1370,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<Object> receiver =
Handle<Object>(use_runtime_context
? top_context->builtins()
- : top_context->global());
+ : top_context->global_object());
bool has_pending_exception;
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
@@ -1375,9 +1381,9 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
#define INSTALL_NATIVE(Type, name, var) \
Handle<String> var##_name = factory()->LookupAsciiSymbol(name); \
Object* var##_native = \
- global_context()->builtins()->GetPropertyNoExceptionThrown( \
+ native_context()->builtins()->GetPropertyNoExceptionThrown( \
*var##_name); \
- global_context()->set_##var(Type::cast(var##_native));
+ native_context()->set_##var(Type::cast(var##_native));
void Genesis::InstallNativeFunctions() {
@@ -1417,7 +1423,7 @@ bool Genesis::InstallNatives() {
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
- // (itself) and a reference to the global_context directly in the object.
+ // (itself) and a reference to the native_context directly in the object.
Handle<Code> code = Handle<Code>(
isolate()->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> builtins_fun =
@@ -1427,12 +1433,15 @@ bool Genesis::InstallNatives() {
Handle<String> name = factory()->LookupAsciiSymbol("builtins");
builtins_fun->shared()->set_instance_class_name(*name);
+ builtins_fun->initial_map()->set_dictionary_map(true);
+ builtins_fun->initial_map()->set_prototype(heap()->null_value());
// Allocate the builtins object.
Handle<JSBuiltinsObject> builtins =
Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
- builtins->set_global_context(*global_context());
+ builtins->set_native_context(*native_context());
+ builtins->set_global_context(*native_context());
builtins->set_global_receiver(*builtins);
// Set up the 'global' properties of the builtins object. The
@@ -1442,26 +1451,27 @@ bool Genesis::InstallNatives() {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
- Handle<Object> global_obj(global_context()->global());
+ Handle<Object> global_obj(native_context()->global_object());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
builtins, global_symbol, global_obj, attributes));
// Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
+ JSGlobalObject::cast(native_context()->global_object())->
+ set_builtins(*builtins);
- // Create a bridge function that has context in the global context.
+ // Create a bridge function that has context in the native context.
Handle<JSFunction> bridge =
factory()->NewFunction(factory()->empty_symbol(),
factory()->undefined_value());
- ASSERT(bridge->context() == *isolate()->global_context());
+ ASSERT(bridge->context() == *isolate()->native_context());
// Allocate the builtins context.
Handle<Context> context =
factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- context->set_global(*builtins); // override builtins global object
+ context->set_global_object(*builtins); // override builtins global object
- global_context()->set_runtime_context(*context);
+ native_context()->set_runtime_context(*context);
{ // -- S c r i p t
// Builtin functions for Script.
@@ -1472,117 +1482,133 @@ bool Genesis::InstallNatives() {
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(script_fun, prototype);
- global_context()->set_script_function(*script_fun);
+ native_context()->set_script_function(*script_fun);
- // Add 'source' and 'data' property to scripts.
- PropertyAttributes common_attributes =
+ Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+
+ Handle<DescriptorArray> script_descriptors(
+ factory()->NewDescriptorArray(13));
+ DescriptorArray::WhitenessWitness witness(*script_descriptors);
+
+ Handle<Foreign> script_source(
+ factory()->NewForeign(&Accessors::ScriptSource));
+ Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
+ Handle<String> id_symbol(factory()->LookupAsciiSymbol("id"));
+ Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
+ Handle<String> line_offset_symbol(
+ factory()->LookupAsciiSymbol("line_offset"));
+ Handle<Foreign> script_line_offset(
+ factory()->NewForeign(&Accessors::ScriptLineOffset));
+ Handle<String> column_offset_symbol(
+ factory()->LookupAsciiSymbol("column_offset"));
+ Handle<Foreign> script_column_offset(
+ factory()->NewForeign(&Accessors::ScriptColumnOffset));
+ Handle<String> data_symbol(factory()->LookupAsciiSymbol("data"));
+ Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
+ Handle<String> type_symbol(factory()->LookupAsciiSymbol("type"));
+ Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
+ Handle<String> compilation_type_symbol(
+ factory()->LookupAsciiSymbol("compilation_type"));
+ Handle<Foreign> script_compilation_type(
+ factory()->NewForeign(&Accessors::ScriptCompilationType));
+ Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends"));
+ Handle<Foreign> script_line_ends(
+ factory()->NewForeign(&Accessors::ScriptLineEnds));
+ Handle<String> context_data_symbol(
+ factory()->LookupAsciiSymbol("context_data"));
+ Handle<Foreign> script_context_data(
+ factory()->NewForeign(&Accessors::ScriptContextData));
+ Handle<String> eval_from_script_symbol(
+ factory()->LookupAsciiSymbol("eval_from_script"));
+ Handle<Foreign> script_eval_from_script(
+ factory()->NewForeign(&Accessors::ScriptEvalFromScript));
+ Handle<String> eval_from_script_position_symbol(
+ factory()->LookupAsciiSymbol("eval_from_script_position"));
+ Handle<Foreign> script_eval_from_script_position(
+ factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
+ Handle<String> eval_from_function_name_symbol(
+ factory()->LookupAsciiSymbol("eval_from_function_name"));
+ Handle<Foreign> script_eval_from_function_name(
+ factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
+ PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Foreign> foreign_source =
- factory()->NewForeign(&Accessors::ScriptSource);
- Handle<DescriptorArray> script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- factory()->empty_descriptor_array(),
- factory()->LookupAsciiSymbol("source"),
- foreign_source,
- common_attributes);
- Handle<Foreign> foreign_name =
- factory()->NewForeign(&Accessors::ScriptName);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("name"),
- foreign_name,
- common_attributes);
- Handle<Foreign> foreign_id = factory()->NewForeign(&Accessors::ScriptId);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("id"),
- foreign_id,
- common_attributes);
- Handle<Foreign> foreign_line_offset =
- factory()->NewForeign(&Accessors::ScriptLineOffset);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("line_offset"),
- foreign_line_offset,
- common_attributes);
- Handle<Foreign> foreign_column_offset =
- factory()->NewForeign(&Accessors::ScriptColumnOffset);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("column_offset"),
- foreign_column_offset,
- common_attributes);
- Handle<Foreign> foreign_data =
- factory()->NewForeign(&Accessors::ScriptData);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("data"),
- foreign_data,
- common_attributes);
- Handle<Foreign> foreign_type =
- factory()->NewForeign(&Accessors::ScriptType);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("type"),
- foreign_type,
- common_attributes);
- Handle<Foreign> foreign_compilation_type =
- factory()->NewForeign(&Accessors::ScriptCompilationType);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("compilation_type"),
- foreign_compilation_type,
- common_attributes);
- Handle<Foreign> foreign_line_ends =
- factory()->NewForeign(&Accessors::ScriptLineEnds);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("line_ends"),
- foreign_line_ends,
- common_attributes);
- Handle<Foreign> foreign_context_data =
- factory()->NewForeign(&Accessors::ScriptContextData);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("context_data"),
- foreign_context_data,
- common_attributes);
- Handle<Foreign> foreign_eval_from_script =
- factory()->NewForeign(&Accessors::ScriptEvalFromScript);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_script"),
- foreign_eval_from_script,
- common_attributes);
- Handle<Foreign> foreign_eval_from_script_position =
- factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_script_position"),
- foreign_eval_from_script_position,
- common_attributes);
- Handle<Foreign> foreign_eval_from_function_name =
- factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_function_name"),
- foreign_eval_from_function_name,
- common_attributes);
+ Map::SetDescriptors(script_map, script_descriptors);
- Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
- script_map->set_instance_descriptors(*script_descriptors);
+ {
+ CallbacksDescriptor d(
+ *factory()->source_symbol(), *script_source, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*factory()->name_symbol(), *script_name, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*id_symbol, *script_id, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*line_offset_symbol, *script_line_offset, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *column_offset_symbol, *script_column_offset, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*data_symbol, *script_data, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*type_symbol, *script_type, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *compilation_type_symbol, *script_compilation_type, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*line_ends_symbol, *script_line_ends, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *context_data_symbol, *script_context_data, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_script_symbol, *script_eval_from_script, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_script_position_symbol,
+ *script_eval_from_script_position,
+ attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_function_name_symbol,
+ *script_eval_from_function_name,
+ attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
// Allocate the empty script.
Handle<Script> script = factory()->NewScript(factory()->empty_string());
@@ -1601,7 +1627,7 @@ bool Genesis::InstallNatives() {
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
- global_context()->set_opaque_reference_function(*opaque_reference_fun);
+ native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
{ // --- I n t e r n a l A r r a y ---
@@ -1631,26 +1657,30 @@ bool Genesis::InstallNatives() {
// elements in InternalArrays can be set to non-Smi values without going
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
- MaybeObject* maybe_map =
- array_function->initial_map()->CopyDropTransitions(
- DescriptorArray::MAY_BE_SHARED);
+ MaybeObject* maybe_map = array_function->initial_map()->Copy();
Map* new_map;
if (!maybe_map->To(&new_map)) return false;
new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
- Handle<DescriptorArray> array_descriptors =
- factory()->CopyAppendForeignDescriptor(
- factory()->empty_descriptor_array(),
- factory()->length_symbol(),
- factory()->NewForeign(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
-
- array_function->initial_map()->set_instance_descriptors(
- *array_descriptors);
+ Handle<Map> initial_map(array_function->initial_map());
+ Handle<DescriptorArray> array_descriptors(factory()->NewDescriptorArray(1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory()->NewForeign(
+ &Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ Map::SetDescriptors(initial_map, array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(
+ *factory()->length_symbol(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
- global_context()->set_internal_array_function(*array_function);
+ native_context()->set_internal_array_function(*array_function);
}
if (FLAG_disable_native_files) {
@@ -1673,16 +1703,16 @@ bool Genesis::InstallNatives() {
// Store the map for the string prototype after the natives has been compiled
// and the String function has been set up.
- Handle<JSFunction> string_function(global_context()->string_function());
+ Handle<JSFunction> string_function(native_context()->string_function());
ASSERT(JSObject::cast(
string_function->initial_map()->prototype())->HasFastProperties());
- global_context()->set_string_function_prototype_map(
+ native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
// Install Function.prototype.call and apply.
{ Handle<String> key = factory()->function_class_symbol();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(isolate()->global(), key));
+ Handle<JSFunction>::cast(GetProperty(isolate()->global_object(), key));
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
@@ -1720,7 +1750,7 @@ bool Genesis::InstallNatives() {
// RegExpResult initial map.
// Find global.Array.prototype to inherit from.
- Handle<JSFunction> array_constructor(global_context()->array_function());
+ Handle<JSFunction> array_constructor(native_context()->array_function());
Handle<JSObject> array_prototype(
JSObject::cast(array_constructor->instance_prototype()));
@@ -1737,39 +1767,39 @@ bool Genesis::InstallNatives() {
Handle<DescriptorArray> reresult_descriptors =
factory()->NewDescriptorArray(3);
DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
+ Map::SetDescriptors(initial_map, reresult_descriptors);
- JSFunction* array_function = global_context()->array_function();
- Handle<DescriptorArray> array_descriptors(
- array_function->initial_map()->instance_descriptors());
- int index = array_descriptors->SearchWithCache(heap()->length_symbol());
- MaybeObject* copy_result =
- reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
- if (copy_result->IsFailure()) return false;
-
- int enum_index = 0;
+ {
+ JSFunction* array_function = native_context()->array_function();
+ Handle<DescriptorArray> array_descriptors(
+ array_function->initial_map()->instance_descriptors());
+ String* length = heap()->length_symbol();
+ int old = array_descriptors->SearchWithCache(length);
+ ASSERT(old != DescriptorArray::kNotFound);
+ CallbacksDescriptor desc(length,
+ array_descriptors->GetValue(old),
+ array_descriptors->GetDetails(old).attributes());
+ initial_map->AppendDescriptor(&desc, witness);
+ }
{
FieldDescriptor index_field(heap()->index_symbol(),
JSRegExpResult::kIndexIndex,
- NONE,
- enum_index++);
- reresult_descriptors->Set(1, &index_field, witness);
+ NONE);
+ initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_symbol(),
JSRegExpResult::kInputIndex,
- NONE,
- enum_index++);
- reresult_descriptors->Set(2, &input_field, witness);
+ NONE);
+ initial_map->AppendDescriptor(&input_field, witness);
}
- reresult_descriptors->Sort(witness);
initial_map->set_inobject_properties(2);
initial_map->set_pre_allocated_property_fields(2);
initial_map->set_unused_property_fields(0);
- initial_map->set_instance_descriptors(*reresult_descriptors);
- global_context()->set_regexp_result_map(*initial_map);
+ native_context()->set_regexp_result_map(*initial_map);
}
#ifdef DEBUG
@@ -1803,10 +1833,10 @@ bool Genesis::InstallExperimentalNatives() {
static Handle<JSObject> ResolveBuiltinIdHolder(
- Handle<Context> global_context,
+ Handle<Context> native_context,
const char* holder_expr) {
- Factory* factory = global_context->GetIsolate()->factory();
- Handle<GlobalObject> global(global_context->global());
+ Factory* factory = native_context->GetIsolate()->factory();
+ Handle<GlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
@@ -1837,7 +1867,7 @@ void Genesis::InstallBuiltinFunctionIds() {
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
{ \
Handle<JSObject> holder = ResolveBuiltinIdHolder( \
- global_context(), #holder_expr); \
+ native_context(), #holder_expr); \
BuiltinFunctionId id = k##name; \
InstallBuiltinFunctionId(holder, #fun_name, id); \
}
@@ -1849,7 +1879,7 @@ void Genesis::InstallBuiltinFunctionIds() {
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
- F(16, global_context()->regexp_function())
+ F(16, native_context()->regexp_function())
static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
@@ -1885,34 +1915,35 @@ void Genesis::InstallJSFunctionResultCaches() {
#undef F
- global_context()->set_jsfunction_result_caches(*caches);
+ native_context()->set_jsfunction_result_caches(*caches);
}
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
- global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
+ native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
-bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
+bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = global_context->GetIsolate();
+ Isolate* isolate = native_context->GetIsolate();
BootstrapperActive active;
SaveContext saved_context(isolate);
- isolate->set_context(*global_context);
- if (!Genesis::InstallExtensions(global_context, extensions)) return false;
- Genesis::InstallSpecialObjects(global_context);
+ isolate->set_context(*native_context);
+ if (!Genesis::InstallExtensions(native_context, extensions)) return false;
+ Genesis::InstallSpecialObjects(native_context);
return true;
}
-void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
- Isolate* isolate = global_context->GetIsolate();
+void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
+ Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope;
- Handle<JSGlobalObject> global(JSGlobalObject::cast(global_context->global()));
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(
+ native_context->global_object()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
@@ -1941,10 +1972,10 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
// debugger but without tanking the whole context.
if (!debug->Load()) return;
// Set the security token for the debugger context to the same as
- // the shell global context to allow calling between these (otherwise
+ // the shell native context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
debug->debug_context()->set_security_token(
- global_context->security_token());
+ native_context->security_token());
Handle<String> debug_string =
factory->LookupAsciiSymbol(FLAG_expose_debug_as);
@@ -1983,7 +2014,7 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
reinterpret_cast<void*>(static_cast<intptr_t>(state));
}
-bool Genesis::InstallExtensions(Handle<Context> global_context,
+bool Genesis::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
// TODO(isolates): Extensions on multiple isolates may take a little more
// effort. (The external API reads 'ignore'-- does that mean
@@ -2003,6 +2034,9 @@ bool Genesis::InstallExtensions(Handle<Context> global_context,
if (FLAG_expose_externalize_string) {
InstallExtension("v8/externalize", &extension_states);
}
+ if (FLAG_track_gc_object_stats) {
+ InstallExtension("v8/statistics", &extension_states);
+ }
if (extensions == NULL) return true;
// Install required extensions
@@ -2105,8 +2139,9 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
bool Genesis::ConfigureGlobalObjects(
v8::Handle<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(
- JSObject::cast(global_context()->global_proxy()));
- Handle<JSObject> inner_global(JSObject::cast(global_context()->global()));
+ JSObject::cast(native_context()->global_proxy()));
+ Handle<JSObject> inner_global(
+ JSObject::cast(native_context()->global_object()));
if (!global_proxy_template.IsEmpty()) {
// Configure the global proxy object.
@@ -2180,26 +2215,24 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
LookupResult result(isolate());
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
- if (result.IsProperty()) continue;
+ if (result.IsFound()) continue;
HandleScope inner;
ASSERT(!to->HasFastProperties());
// Add to dictionary.
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<Object> callbacks(descs->GetCallbacksObject(i));
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ CALLBACKS,
+ details.descriptor_index());
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- // Ignore non-properties.
- break;
case NORMAL:
// Do not occur since the from object has fast properties.
case HANDLER:
case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
// No element in instance descriptors have proxy or interceptor type.
UNREACHABLE();
break;
@@ -2216,7 +2249,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// If the property is already there we skip it.
LookupResult result(isolate());
to->LocalLookup(String::cast(raw_key), &result);
- if (result.IsProperty()) continue;
+ if (result.IsFound()) continue;
// Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i));
@@ -2255,7 +2288,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = factory->CopyMap(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
@@ -2269,9 +2302,9 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- global_context()->set_function_map(
+ native_context()->set_function_map(
*function_instance_map_writable_prototype_);
- global_context()->set_strict_mode_function_map(
+ native_context()->set_strict_mode_function_map(
*strict_mode_function_instance_map_writable_prototype_);
}
@@ -2297,10 +2330,10 @@ Genesis::Genesis(Isolate* isolate,
Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
if (!new_context.is_null()) {
- global_context_ =
+ native_context_ =
Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
- AddToWeakGlobalContextList(*global_context_);
- isolate->set_context(*global_context_);
+ AddToWeakNativeContextList(*native_context_);
+ isolate->set_context(*native_context_);
isolate->counters()->contexts_created_by_snapshot()->Increment();
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
@@ -2336,7 +2369,7 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
- result_ = global_context_;
+ result_ = native_context_;
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 101c2e1b1f..179e65c354 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -104,7 +104,7 @@ class Bootstrapper {
void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
- void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
+ void ReattachGlobal(Handle<Context> env, Handle<JSGlobalProxy> global_proxy);
// Traverses the pointers for memory management.
void Iterate(ObjectVisitor* v);
@@ -126,7 +126,7 @@ class Bootstrapper {
char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
- bool InstallExtensions(Handle<Context> global_context,
+ bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 64ec3d9fcc..ffaaf8b1ea 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -35,6 +35,7 @@
#include "ic-inl.h"
#include "heap-profiler.h"
#include "mark-compact.h"
+#include "stub-cache.h"
#include "vm-state-inl.h"
namespace v8 {
@@ -199,11 +200,11 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_length(Smi::FromInt(0));
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
- Context* global_context = isolate->context()->global_context();
+ Context* native_context = isolate->context()->native_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !global_context->js_array_maps()->IsUndefined()) {
+ !native_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
- FixedArray::cast(global_context->js_array_maps());
+ FixedArray::cast(native_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
}
@@ -312,7 +313,7 @@ BUILTIN(InternalArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
- isolate->context()->global_context()->internal_array_function());
+ isolate->context()->native_context()->internal_array_function());
}
@@ -320,7 +321,7 @@ BUILTIN(ArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
- isolate->context()->global_context()->array_function());
+ isolate->context()->native_context()->array_function());
}
@@ -402,7 +403,7 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
static bool ArrayPrototypeHasNoElements(Heap* heap,
- Context* global_context,
+ Context* native_context,
JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
@@ -411,7 +412,7 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
Object* proto = array_proto->GetPrototype();
if (proto == heap->null_value()) return false;
array_proto = JSObject::cast(proto);
- if (array_proto != global_context->initial_object_prototype()) return false;
+ if (array_proto != native_context->initial_object_prototype()) return false;
if (array_proto->elements() != heap->empty_fixed_array()) return false;
return array_proto->GetPrototype()->IsNull();
}
@@ -461,11 +462,11 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
- Context* global_context = heap->isolate()->context()->global_context();
+ Context* native_context = heap->isolate()->context()->native_context();
JSObject* array_proto =
- JSObject::cast(global_context->array_function()->prototype());
+ JSObject::cast(native_context->array_function()->prototype());
return receiver->GetPrototype() == array_proto &&
- ArrayPrototypeHasNoElements(heap, global_context, array_proto);
+ ArrayPrototypeHasNoElements(heap, native_context, array_proto);
}
@@ -476,7 +477,7 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
+ GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
name);
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
@@ -706,7 +707,7 @@ BUILTIN(ArraySlice) {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
- isolate->context()->global_context()->arguments_boilerplate()->map();
+ isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
@@ -943,10 +944,10 @@ BUILTIN(ArraySplice) {
BUILTIN(ArrayConcat) {
Heap* heap = isolate->heap();
- Context* global_context = isolate->context()->global_context();
+ Context* native_context = isolate->context()->native_context();
JSObject* array_proto =
- JSObject::cast(global_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
+ JSObject::cast(native_context->array_function()->prototype());
+ if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -1148,6 +1149,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
+ result->VerifyApiCallResultType();
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -1224,6 +1226,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
+ result->VerifyApiCallResultType();
}
}
// Check for exceptions and return result.
@@ -1291,6 +1294,11 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
}
+static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
+ LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
+}
+
+
static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
KeyedLoadIC::GenerateInitialize(masm);
}
@@ -1388,6 +1396,11 @@ static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
}
+static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+ StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
+}
+
+
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 3ea33938eb..ca70ae5403 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -66,6 +66,8 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@@ -80,6 +82,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
@@ -119,6 +123,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -153,6 +159,8 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
+ kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -347,6 +355,8 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
+ static void Generate_InRecompileQueue(MacroAssembler* masm);
+ static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 608aa14806..d0a0c2b5ac 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -284,4 +284,12 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
+// "Extra checks" are lightweight checks that are enabled in some release
+// builds.
+#ifdef ENABLE_EXTRA_CHECKS
+#define EXTRA_CHECK(condition) CHECK(condition)
+#else
+#define EXTRA_CHECK(condition) ((void) 0)
+#endif
+
#endif // V8_CHECKS_H_
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 7d4f23ce94..59a4cdf823 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -194,7 +194,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
- ASSERT(op_ == (*code_out)->compare_operation());
+ ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
return true;
}
return false;
@@ -478,4 +478,26 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
}
+
+FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
+
+
+void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
+ intptr_t stack_pointer) {
+ if (entry_hook_ != NULL)
+ entry_hook_(function, stack_pointer);
+}
+
+
+bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
+ // We don't allow setting a new entry hook over one that's
+ // already active, as the hooks won't stack.
+ if (entry_hook != 0 && entry_hook_ != 0)
+ return false;
+
+ entry_hook_ = entry_hook;
+ return true;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 5c8717838f..f19063230a 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -73,7 +73,8 @@ namespace internal {
V(DebuggerStatement) \
V(StringDictionaryLookup) \
V(ElementsTransitionAndStore) \
- V(StoreArrayLiteralElement)
+ V(StoreArrayLiteralElement) \
+ V(ProfileEntryHook)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -161,10 +162,6 @@ class CodeStub BASE_EMBEDDED {
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out);
- protected:
- static const int kMajorBits = 6;
- static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
-
private:
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
@@ -222,8 +219,9 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
- class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
- class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
+ class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
+ class MinorKeyBits: public BitField<uint32_t,
+ kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
friend class BreakPointIterator;
};
@@ -498,7 +496,7 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
- code->set_compare_operation(op_);
+ code->set_compare_operation(op_ - Token::EQ);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
@@ -1145,6 +1143,37 @@ class StoreArrayLiteralElementStub : public CodeStub {
DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
};
+
+class ProfileEntryHookStub : public CodeStub {
+ public:
+ explicit ProfileEntryHookStub() {}
+
+ // The profile entry hook function is not allowed to cause a GC.
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ // Generates a call to the entry hook if it's enabled.
+ static void MaybeCallEntryHook(MacroAssembler* masm);
+
+ // Sets or unsets the entry hook function. Returns true on success,
+ // false on an attempt to replace a non-NULL entry hook with another
+ // non-NULL hook.
+ static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
+
+ private:
+ static void EntryHookTrampoline(intptr_t function,
+ intptr_t stack_pointer);
+
+ Major MajorKey() { return ProfileEntryHook; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ // The current function entry hook.
+ static FunctionEntryHook entry_hook_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 75fe3d541d..d36fe18fa0 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -79,7 +79,12 @@ function SetDelete(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
- return %SetDelete(this, key);
+ if (%SetHas(this, key)) {
+ %SetDelete(this, key);
+ return true;
+ } else {
+ return false;
+ }
}
@@ -124,7 +129,7 @@ function MapHas(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
- return !IS_UNDEFINED(%MapGet(this, key));
+ return %MapHas(this, key);
}
@@ -136,12 +141,7 @@ function MapDelete(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
- if (!IS_UNDEFINED(%MapGet(this, key))) {
- %MapSet(this, key, void 0);
- return true;
- } else {
- return false;
- }
+ return %MapDelete(this, key);
}
@@ -186,7 +186,7 @@ function WeakMapHas(key) {
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return !IS_UNDEFINED(%WeakMapGet(this, key));
+ return %WeakMapHas(this, key);
}
@@ -198,12 +198,7 @@ function WeakMapDelete(key) {
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
- %WeakMapSet(this, key, void 0);
- return true;
- } else {
- return false;
- }
+ return %WeakMapDelete(this, key);
}
// -------------------------------------------------------------------
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 82cc2231a3..c0645760b3 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -165,10 +165,12 @@ bool CompilationCacheScript::HasOrigin(
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
+Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
+ Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset,
+ Handle<Context> context) {
Object* result = NULL;
int generation;
@@ -177,7 +179,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->Lookup(*source), isolate());
+ Handle<Object> probe(table->Lookup(*source, *context), isolate());
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
@@ -214,7 +216,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
- if (generation != 0) Put(source, shared);
+ if (generation != 0) Put(source, context, shared);
isolate()->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
@@ -226,25 +228,28 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
MaybeObject* CompilationCacheScript::TryTablePut(
Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
Handle<CompilationCacheTable> table = GetFirstTable();
- return table->Put(*source, *function_info);
+ return table->Put(*source, *context, *function_info);
}
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, function_info),
+ TryTablePut(source, context, function_info),
CompilationCacheTable);
}
void CompilationCacheScript::Put(Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
- SetFirstTable(TablePut(source, function_info));
+ SetFirstTable(TablePut(source, context, function_info));
}
@@ -380,15 +385,17 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
-Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
+Handle<SharedFunctionInfo> CompilationCache::LookupScript(
+ Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset,
+ Handle<Context> context) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
- return script_.Lookup(source, name, line_offset, column_offset);
+ return script_.Lookup(source, name, line_offset, column_offset, context);
}
@@ -426,12 +433,13 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) {
return;
}
- script_.Put(source, function_info);
+ script_.Put(source, context, function_info);
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 2f2fbadb2e..7a236e8fbf 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -98,16 +98,23 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
- int column_offset);
- void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
+ int column_offset,
+ Handle<Context> context);
+ void Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
@@ -122,7 +129,7 @@ class CompilationCacheScript : public CompilationSubCache {
// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
-// in global contexts and one for eval calls in other contexts. The cache
+// in native contexts and one for eval calls in other contexts. The cache
// considers the following pieces of information when checking for matching
// entries:
// 1. The source string.
@@ -204,7 +211,8 @@ class CompilationCache {
Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
- int column_offset);
+ int column_offset,
+ Handle<Context> context);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
@@ -223,6 +231,7 @@ class CompilationCache {
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index d44718bc0f..e4a30dbbce 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -51,7 +51,7 @@ namespace v8 {
namespace internal {
-CompilationInfo::CompilationInfo(Handle<Script> script)
+CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: isolate_(script->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
@@ -60,12 +60,15 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
script_(script),
extension_(NULL),
pre_parse_data_(NULL),
- osr_ast_id_(AstNode::kNoNumber) {
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
Initialize(BASE);
}
-CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
+CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+ Zone* zone)
: isolate_(shared_info->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
@@ -76,12 +79,14 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL),
- osr_ast_id_(AstNode::kNoNumber) {
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
Initialize(BASE);
}
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: isolate_(closure->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
@@ -93,11 +98,19 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
- osr_ast_id_(AstNode::kNoNumber) {
+ context_(closure->context()),
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
Initialize(BASE);
}
+CompilationInfo::~CompilationInfo() {
+ delete deferred_handles_;
+}
+
+
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@@ -118,7 +131,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
- function()->scope()->AllowsLazyRecompilation() &&
+ function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
@@ -137,9 +150,8 @@ void CompilationInfo::AbortOptimization() {
// all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set.
-static bool is_debugging_active() {
+static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- Isolate* isolate = Isolate::Current();
return V8::UseCrankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
@@ -149,27 +161,32 @@ static bool is_debugging_active() {
}
-static bool AlwaysFullCompiler() {
- return FLAG_always_full_compiler || is_debugging_active();
+static bool AlwaysFullCompiler(Isolate* isolate) {
+ return FLAG_always_full_compiler || IsDebuggerActive(isolate);
}
-static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
+void OptimizingCompiler::RecordOptimizationStats() {
+ Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms_creategraph =
+ static_cast<double>(time_taken_to_create_graph_) / 1000;
+ double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
+ double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
if (FLAG_trace_opt) {
PrintF("[optimizing: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
- PrintF(" - took %0.3f ms]\n", ms);
+ PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+ ms_codegen);
}
if (FLAG_trace_opt_stats) {
static double compilation_time = 0.0;
static int compiled_functions = 0;
static int code_size = 0;
- compilation_time += ms;
+ compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
compiled_functions++;
code_size += function->shared()->SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
@@ -180,46 +197,54 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
}
+// A return value of true indicates the compilation pipeline is still
+// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
- // Test if we can optimize this function when asked to. We can only
- // do this after the scopes are computed.
- if (!V8::UseCrankshaft()) {
- info->DisableOptimization();
- }
+ OptimizingCompiler compiler(info);
+ OptimizingCompiler::Status status = compiler.CreateGraph();
- // In case we are not optimizing simply return the code from
- // the full code generator.
- if (!info->IsOptimizing()) {
- return FullCodeGenerator::MakeCode(info);
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ return status != OptimizingCompiler::FAILED;
+ }
+ status = compiler.OptimizeGraph();
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ status = compiler.AbortOptimization();
+ return status != OptimizingCompiler::FAILED;
}
+ status = compiler.GenerateAndInstallCode();
+ return status != OptimizingCompiler::FAILED;
+}
- // We should never arrive here if there is not code object on the
+
+OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
+ ASSERT(V8::UseCrankshaft());
+ ASSERT(info()->IsOptimizing());
+ ASSERT(!info()->IsCompilingForDebugging());
+
+ // We should never arrive here if there is no code object on the
// shared function object.
- Handle<Code> code(info->shared_info()->code());
+ Handle<Code> code(info()->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
- ASSERT(!info->shared_info()->optimization_disabled());
+ ASSERT(!info()->shared_info()->optimization_disabled());
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
- if (AlwaysFullCompiler()) {
- info->SetCode(code);
- return true;
+ if (AlwaysFullCompiler(info()->isolate())) {
+ info()->SetCode(code);
+ return SetLastStatus(BAILED_OUT);
}
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
const int kMaxOptCount =
- FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
- if (info->shared_info()->opt_count() > kMaxOptCount) {
- info->AbortOptimization();
- info->shared_info()->DisableOptimization();
- // True indicates the compilation pipeline is still going, not
- // necessarily that we optimized the code.
- return true;
+ FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
+ if (info()->shared_info()->opt_count() > kMaxOptCount) {
+ info()->set_bailout_reason("optimized too many times");
+ return AbortOptimization();
}
// Due to an encoding limit on LUnallocated operands in the Lithium
@@ -230,27 +255,28 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
const int parameter_limit = -LUnallocated::kMinFixedIndex;
+ Scope* scope = info()->scope();
+ if ((scope->num_parameters() + 1) > parameter_limit) {
+ info()->set_bailout_reason("too many parameters");
+ return AbortOptimization();
+ }
+
const int locals_limit = LUnallocated::kMaxFixedIndex;
- Scope* scope = info->scope();
- if ((scope->num_parameters() + 1) > parameter_limit ||
- (info->osr_ast_id() != AstNode::kNoNumber &&
- scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
- info->AbortOptimization();
- info->shared_info()->DisableOptimization();
- // True indicates the compilation pipeline is still going, not
- // necessarily that we optimized the code.
- return true;
+ if (!info()->osr_ast_id().IsNone() &&
+ scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
+ info()->set_bailout_reason("too many parameters/locals");
+ return AbortOptimization();
}
// Take --hydrogen-filter into account.
- Handle<String> name = info->function()->debug_name();
+ Handle<String> name = info()->function()->debug_name();
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
&& name->IsEqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsEqualTo(filter))) {
- info->SetCode(code);
- return true;
+ info()->SetCode(code);
+ return SetLastStatus(BAILED_OUT);
}
}
@@ -258,20 +284,21 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
- int64_t start = OS::Ticks();
- bool should_recompile = !info->shared_info()->has_deoptimization_support();
+ Timer t(this, &time_taken_to_create_graph_);
+ bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
- CompilationInfo unoptimized(info->shared_info());
+ CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
- unoptimized.SetFunction(info->function());
- unoptimized.SetScope(info->scope());
+ unoptimized.SetFunction(info()->function());
+ unoptimized.SetScope(info()->scope());
+ unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
if (should_recompile) {
- if (!succeeded) return false;
- Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (!succeeded) return SetLastStatus(FAILED);
+ Handle<SharedFunctionInfo> shared = info()->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
// The existing unoptimized code was replaced with the new one.
Compiler::RecordFunctionCompilation(
@@ -285,51 +312,93 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// is safe as long as the unoptimized code has deoptimization
// support.
ASSERT(FLAG_always_opt || code->optimizable());
- ASSERT(info->shared_info()->has_deoptimization_support());
+ ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info->function());
+ HTracer::Instance()->TraceCompilation(info()->function());
}
-
- Handle<Context> global_context(info->closure()->context()->global_context());
- TypeFeedbackOracle oracle(code, global_context, info->isolate(),
- info->isolate()->zone());
- HGraphBuilder builder(info, &oracle, info->isolate()->zone());
+ Handle<Context> native_context(
+ info()->closure()->context()->native_context());
+ oracle_ = new(info()->zone()) TypeFeedbackOracle(
+ code, native_context, info()->isolate(), info()->zone());
+ graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
HPhase phase(HPhase::kTotal);
- HGraph* graph = builder.CreateGraph();
- if (info->isolate()->has_pending_exception()) {
- info->SetCode(Handle<Code>::null());
- return false;
+ graph_ = graph_builder_->CreateGraph();
+
+ if (info()->isolate()->has_pending_exception()) {
+ info()->SetCode(Handle<Code>::null());
+ return SetLastStatus(FAILED);
}
- if (graph != NULL) {
- Handle<Code> optimized_code = graph->Compile(info, graph->zone());
- if (!optimized_code.is_null()) {
- info->SetCode(optimized_code);
- FinishOptimization(info->closure(), start);
- return true;
+ // The function being compiled may have bailed out due to an inline
+ // candidate bailing out. In such a case, we don't disable
+ // optimization on the shared_info.
+ ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
+ if (graph_ == NULL) {
+ if (graph_builder_->inline_bailout()) {
+ info_->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
+ } else {
+ return AbortOptimization();
+ }
+ }
+
+ return SetLastStatus(SUCCEEDED);
+}
+
+OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+ AssertNoAllocation no_gc;
+ NoHandleAllocation no_handles;
+
+ ASSERT(last_status() == SUCCEEDED);
+ Timer t(this, &time_taken_to_optimize_);
+ ASSERT(graph_ != NULL);
+ SmartArrayPointer<char> bailout_reason;
+ if (!graph_->Optimize(&bailout_reason)) {
+ if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
+ return SetLastStatus(BAILED_OUT);
+ } else {
+ chunk_ = LChunk::NewChunk(graph_);
+ if (chunk_ == NULL) {
+ return SetLastStatus(BAILED_OUT);
}
}
+ return SetLastStatus(SUCCEEDED);
+}
+
- // Keep using the shared code.
- info->AbortOptimization();
- if (!builder.inline_bailout()) {
- // Mark the shared code as unoptimizable unless it was an inlined
- // function that bailed out.
- info->shared_info()->DisableOptimization();
+OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+ ASSERT(last_status() == SUCCEEDED);
+ Timer timer(this, &time_taken_to_codegen_);
+ ASSERT(chunk_ != NULL);
+ ASSERT(graph_ != NULL);
+ Handle<Code> optimized_code = chunk_->Codegen();
+ if (optimized_code.is_null()) {
+ info()->set_bailout_reason("code generation failed");
+ return AbortOptimization();
}
- // True indicates the compilation pipeline is still going, not necessarily
- // that we optimized the code.
- return true;
+ info()->SetCode(optimized_code);
+ RecordOptimizationStats();
+ return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
- return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
- FullCodeGenerator::MakeCode(info) :
- MakeCrankshaftCode(info);
+ bool is_optimizing = V8::UseCrankshaft() &&
+ !info->IsCompilingForDebugging() &&
+ info->IsOptimizing();
+ if (is_optimizing) {
+ return MakeCrankshaftCode(info);
+ } else {
+ if (info->IsOptimizing()) {
+ // Have the CompilationInfo decide if the compilation should be
+ // BASE or NONOPT.
+ info->DisableOptimization();
+ }
+ return FullCodeGenerator::MakeCode(info);
+ }
}
@@ -348,7 +417,7 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) {
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
- info->isolate()->zone());
+ info->zone());
info->shared_info()->set_scope_info(*scope_info);
}
return succeeded;
@@ -358,12 +427,12 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
- ASSERT(!isolate->global_context().is_null());
+ ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
- script->set_context_data((*isolate->global_context())->data());
+ script->set_context_data((*isolate->native_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@@ -422,7 +491,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->name(),
lit->materialized_literal_count(),
info->code(),
- ScopeInfo::Create(info->scope(), info->isolate()->zone()));
+ ScopeInfo::Create(info->scope(), info->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@@ -464,7 +533,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
- live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone());
+ live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
return result;
}
@@ -474,6 +543,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
int column_offset,
+ Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
@@ -494,7 +564,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
result = compilation_cache->LookupScript(source,
script_name,
line_offset,
- column_offset);
+ column_offset,
+ context);
}
if (result.is_null()) {
@@ -522,16 +593,17 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
: *script_data);
// Compile the function and add it to the cache.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
+ info.SetContext(context);
if (FLAG_use_strict) {
info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
}
result = MakeFunctionInfo(&info);
- if (extension == NULL && !result.is_null()) {
- compilation_cache->PutScript(source, result);
+ if (extension == NULL && !result.is_null() && !result->dont_cache()) {
+ compilation_cache->PutScript(source, context, result);
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
@@ -570,16 +642,16 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetLanguageMode(language_mode);
- info.SetCallingContext(context);
+ info.SetContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
- result->DisableOptimization();
+ result->DisableOptimization("eval");
// If caller is strict mode, the result must be in strict mode or
// extended mode as well, but not the other way around. Consider:
@@ -589,8 +661,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
// extended mode.
ASSERT(language_mode != EXTENDED_MODE ||
result->is_extended_mode());
- compilation_cache->PutEval(
- source, context, is_global, result, scope_position);
+ if (!result->dont_cache()) {
+ compilation_cache->PutEval(
+ source, context, is_global, result, scope_position);
+ }
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
@@ -602,10 +676,113 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
}
+static bool InstallFullCode(CompilationInfo* info) {
+ // Update the shared function info with the compiled code and the
+ // scope info. Please note, that the order of the shared function
+ // info initialization is important since set_scope_info might
+ // trigger a GC, causing the ASSERT below to be invalid if the code
+ // was flushed. By setting the code object last we avoid this.
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<Code> code = info->code();
+ Handle<JSFunction> function = info->closure();
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*scope_info);
+ shared->set_code(*code);
+ if (!function.is_null()) {
+ function->ReplaceCode(*code);
+ ASSERT(!function->IsOptimized());
+ }
+
+ // Set the expected number of properties for instances.
+ FunctionLiteral* lit = info->function();
+ int expected = lit->expected_property_count();
+ SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+ // Set the optimization hints after performing lazy compilation, as
+ // these are not set when the function is set up as a lazily
+ // compiled function.
+ shared->SetThisPropertyAssignmentsInfo(
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+
+ // Check the function has compiled code.
+ ASSERT(shared->is_compiled());
+ shared->set_code_age(0);
+ shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+ shared->set_ast_node_count(lit->ast_node_count());
+
+ if (V8::UseCrankshaft() &&
+ !function.is_null() &&
+ !shared->optimization_disabled()) {
+ // If we're asked to always optimize, we compile the optimized
+ // version of the function right away - unless the debugger is
+ // active as it makes no sense to compile optimized code then.
+ if (FLAG_always_opt &&
+ !Isolate::Current()->DebuggerHasBreakPoints()) {
+ CompilationInfoWithZone optimized(function);
+ optimized.SetOptimizing(BailoutId::None());
+ return Compiler::CompileLazy(&optimized);
+ }
+ }
+ return true;
+}
+
+
+static void InstallCodeCommon(CompilationInfo* info) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<Code> code = info->code();
+ ASSERT(!code.is_null());
+
+ // Set optimizable to false if this is disallowed by the shared
+ // function info, e.g., we might have flushed the code and must
+ // reset this bit when lazy compiling the code again.
+ if (shared->optimization_disabled()) code->set_optimizable(false);
+
+ Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+}
+
+
+static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+ Handle<Code> code = info->code();
+ Handle<JSFunction> function = info->closure();
+ if (FLAG_cache_optimized_code && code->kind() == Code::OPTIMIZED_FUNCTION) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<FixedArray> literals(function->literals());
+ Handle<Context> native_context(function->context()->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(
+ shared, native_context, code, literals);
+ }
+}
+
+
+static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
+ if (FLAG_cache_optimized_code && info->IsOptimizing()) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<JSFunction> function = info->closure();
+ ASSERT(!function.is_null());
+ Handle<Context> native_context(function->context()->native_context());
+ int index = shared->SearchOptimizedCodeMap(*native_context);
+ if (index > 0) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
+ }
+ // Caching of optimized code enabled and optimized code found.
+ shared->InstallFromOptimizedCodeMap(*function, index);
+ return true;
+ }
+ }
+ return false;
+}
+
+
bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER);
@@ -616,6 +793,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
+ if (InstallCodeFromOptimizedCodeMap(info)) return true;
+
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info, kNoParsingFlags)) {
// Measure how long it takes to do the lazy compilation; only take the
@@ -634,69 +813,17 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
isolate->StackOverflow();
}
} else {
- ASSERT(!info->code().is_null());
- Handle<Code> code = info->code();
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- Handle<JSFunction> function = info->closure();
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ InstallCodeCommon(info);
if (info->IsOptimizing()) {
+ Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty());
- function->ReplaceCode(*code);
+ info->closure()->ReplaceCode(*code);
+ InsertCodeIntoOptimizedCodeMap(info);
+ return true;
} else {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->scope(), info->isolate()->zone());
- shared->set_scope_info(*scope_info);
- shared->set_code(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_code_age(0);
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
- shared->set_dont_inline(lit->flags()->Contains(kDontInline));
- shared->set_ast_node_count(lit->ast_node_count());
-
- if (V8::UseCrankshaft()&&
- !function.is_null() &&
- !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
- CompilationInfo optimized(function);
- optimized.SetOptimizing(AstNode::kNoNumber);
- return CompileLazy(&optimized);
- }
- }
+ return InstallFullCode(info);
}
-
- return true;
}
}
@@ -705,10 +832,97 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
+void Compiler::RecompileParallel(Handle<JSFunction> closure) {
+ if (closure->IsInRecompileQueue()) return;
+ ASSERT(closure->IsMarkedForParallelRecompilation());
+
+ Isolate* isolate = closure->GetIsolate();
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ }
+ return;
+ }
+
+ SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
+ VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
+ PostponeInterruptsScope postpone(isolate);
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ int compiled_size = shared->end_position() - shared->start_position();
+ isolate->counters()->total_compile_size()->Increment(compiled_size);
+ info->SetOptimizing(BailoutId::None());
+
+ {
+ CompilationHandleScope handle_scope(*info);
+
+ if (InstallCodeFromOptimizedCodeMap(*info)) return;
+
+ if (ParserApi::Parse(*info, kNoParsingFlags)) {
+ LanguageMode language_mode = info->function()->language_mode();
+ info->SetLanguageMode(language_mode);
+ shared->set_language_mode(language_mode);
+ info->SaveHandles();
+
+ if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
+ OptimizingCompiler* compiler =
+ new(info->zone()) OptimizingCompiler(*info);
+ OptimizingCompiler::Status status = compiler->CreateGraph();
+ if (status == OptimizingCompiler::SUCCEEDED) {
+ isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ shared->code()->set_profiler_ticks(0);
+ closure->ReplaceCode(isolate->builtins()->builtin(
+ Builtins::kInRecompileQueue));
+ info.Detach();
+ } else if (status == OptimizingCompiler::BAILED_OUT) {
+ isolate->clear_pending_exception();
+ InstallFullCode(*info);
+ }
+ }
+ }
+ }
+
+ if (isolate->has_pending_exception()) {
+ isolate->clear_pending_exception();
+ }
+}
+
+
+void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
+ SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+ // If crankshaft succeeded, install the optimized code else install
+ // the unoptimized code.
+ OptimizingCompiler::Status status = optimizing_compiler->last_status();
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ optimizing_compiler->info()->set_bailout_reason(
+ "failed/bailed out last time");
+ status = optimizing_compiler->AbortOptimization();
+ } else {
+ status = optimizing_compiler->GenerateAndInstallCode();
+ ASSERT(status == OptimizingCompiler::SUCCEEDED ||
+ status == OptimizingCompiler::BAILED_OUT);
+ }
+
+ InstallCodeCommon(*info);
+ if (status == OptimizingCompiler::SUCCEEDED) {
+ Handle<Code> code = info->code();
+ ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
+ info->closure()->ReplaceCode(*code);
+ if (info->shared_info()->SearchOptimizedCodeMap(
+ info->closure()->context()->native_context()) == -1) {
+ InsertCodeIntoOptimizedCodeMap(*info);
+ }
+ } else {
+ info->SetCode(Handle<Code>(info->shared_info()->code()));
+ InstallFullCode(*info);
+ }
+}
+
+
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script) {
// Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
@@ -719,19 +933,24 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the
// parser records.
+ // If the debugger requests compilation for break points, we cannot be
+ // aggressive about lazy compilation, because it might trigger compilation
+ // of functions without an outer context when setting a breakpoint through
+ // Debug::FindSharedFunctionInfoInScript.
+ bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive(info.isolate());
+ !LiveEditFunctionTracker::IsActive(info.isolate()) &&
+ (!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
// Generate code
- if (FLAG_lazy && allow_lazy) {
+ if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
Handle<Code> code = info.isolate()->builtins()->LazyCompile();
info.SetCode(code);
- } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
- (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
+ } else if (GenerateCode(&info)) {
ASSERT(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone());
+ scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
return Handle<SharedFunctionInfo>::null();
}
@@ -745,12 +964,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
SetFunctionInfo(result, literal, false, script);
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
+ result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone());
+ live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
return result;
}
@@ -777,6 +997,8 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ function_info->set_allows_lazy_compilation_without_context(
+ lit->AllowsLazyCompilationWithoutContext());
function_info->set_language_mode(lit->language_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
@@ -784,6 +1006,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_function(lit->is_function());
function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+ function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
}
@@ -796,7 +1019,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
- if (info->isolate()->logger()->is_logging() ||
+ if (info->isolate()->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(info->isolate())) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 44df9e090f..af9459566d 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -39,16 +39,21 @@ class ScriptDataImpl;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
-class CompilationInfo BASE_EMBEDDED {
+class CompilationInfo {
public:
- explicit CompilationInfo(Handle<Script> script);
- explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
- explicit CompilationInfo(Handle<JSFunction> closure);
+ CompilationInfo(Handle<Script> script, Zone* zone);
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
+ CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+
+ virtual ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
return isolate_;
}
+ Zone* zone() {
+ return zone_;
+ }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@@ -67,8 +72,8 @@ class CompilationInfo BASE_EMBEDDED {
Handle<Script> script() const { return script_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- Handle<Context> calling_context() const { return calling_context_; }
- int osr_ast_id() const { return osr_ast_id_; }
+ Handle<Context> context() const { return context_; }
+ BailoutId osr_ast_id() const { return osr_ast_id_; }
void MarkAsEval() {
ASSERT(!is_lazy());
@@ -115,13 +120,8 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
pre_parse_data_ = pre_parse_data;
}
- void SetCallingContext(Handle<Context> context) {
- ASSERT(is_eval());
- calling_context_ = context;
- }
- void SetOsrAstId(int osr_ast_id) {
- ASSERT(IsOptimizing());
- osr_ast_id_ = osr_ast_id;
+ void SetContext(Handle<Context> context) {
+ context_ = context;
}
void MarkCompilingForDebugging(Handle<Code> current_code) {
ASSERT(mode_ != OPTIMIZE);
@@ -138,17 +138,18 @@ class CompilationInfo BASE_EMBEDDED {
}
bool has_global_object() const {
- return !closure().is_null() && (closure()->context()->global() != NULL);
+ return !closure().is_null() &&
+ (closure()->context()->global_object() != NULL);
}
GlobalObject* global_object() const {
- return has_global_object() ? closure()->context()->global() : NULL;
+ return has_global_object() ? closure()->context()->global_object() : NULL;
}
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
- void SetOptimizing(int osr_ast_id) {
+ void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
@@ -170,6 +171,21 @@ class CompilationInfo BASE_EMBEDDED {
// current compilation pipeline.
void AbortOptimization();
+ void set_deferred_handles(DeferredHandles* deferred_handles) {
+ ASSERT(deferred_handles_ == NULL);
+ deferred_handles_ = deferred_handles;
+ }
+
+ void SaveHandles() {
+ SaveHandle(&closure_);
+ SaveHandle(&shared_info_);
+ SaveHandle(&context_);
+ SaveHandle(&script_);
+ }
+
+ const char* bailout_reason() const { return bailout_reason_; }
+ void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
+
private:
Isolate* isolate_;
@@ -184,8 +200,6 @@ class CompilationInfo BASE_EMBEDDED {
NONOPT
};
- CompilationInfo() : function_(NULL) {}
-
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
@@ -196,6 +210,7 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
+ set_bailout_reason("unknown");
}
void SetMode(Mode mode) {
@@ -246,18 +261,148 @@ class CompilationInfo BASE_EMBEDDED {
v8::Extension* extension_;
ScriptDataImpl* pre_parse_data_;
- // The context of the caller is needed for eval code, and will be a null
- // handle otherwise.
- Handle<Context> calling_context_;
+ // The context of the caller for eval code, and the global context for a
+ // global script. Will be a null handle otherwise.
+ Handle<Context> context_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
- int osr_ast_id_;
+ BailoutId osr_ast_id_;
+
+ // The zone from which the compilation pipeline working on this
+ // CompilationInfo allocates.
+ Zone* zone_;
+
+ DeferredHandles* deferred_handles_;
+
+ template<typename T>
+ void SaveHandle(Handle<T> *object) {
+ if (!object->is_null()) {
+ Handle<T> handle(*(*object));
+ *object = handle;
+ }
+ }
+
+ const char* bailout_reason_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
+// Exactly like a CompilationInfo, except also creates and enters a
+// Zone on construction and deallocates it on exit.
+class CompilationInfoWithZone: public CompilationInfo {
+ public:
+ explicit CompilationInfoWithZone(Handle<Script> script)
+ : CompilationInfo(script, &zone_),
+ zone_(script->GetIsolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
+ : CompilationInfo(shared_info, &zone_),
+ zone_(shared_info->GetIsolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(Handle<JSFunction> closure)
+ : CompilationInfo(closure, &zone_),
+ zone_(closure->GetIsolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
+
+ private:
+ Zone zone_;
+ ZoneScope zone_scope_;
+};
+
+
+// A wrapper around a CompilationInfo that detaches the Handles from
+// the underlying DeferredHandleScope and stores them in info_ on
+// destruction.
+class CompilationHandleScope BASE_EMBEDDED {
+ public:
+ explicit CompilationHandleScope(CompilationInfo* info)
+ : deferred_(info->isolate()), info_(info) {}
+ ~CompilationHandleScope() {
+ info_->set_deferred_handles(deferred_.Detach());
+ }
+
+ private:
+ DeferredHandleScope deferred_;
+ CompilationInfo* info_;
+};
+
+
+class HGraph;
+class HGraphBuilder;
+class LChunk;
+
+// A helper class that calls the three compilation phases in
+// Crankshaft and keeps track of its state. The three phases
+// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
+// fail, bail-out to the full code generator or succeed. Apart from
+// their return value, the status of the phase last run can be checked
+// using last_status().
+class OptimizingCompiler: public ZoneObject {
+ public:
+ explicit OptimizingCompiler(CompilationInfo* info)
+ : info_(info),
+ oracle_(NULL),
+ graph_builder_(NULL),
+ graph_(NULL),
+ chunk_(NULL),
+ time_taken_to_create_graph_(0),
+ time_taken_to_optimize_(0),
+ time_taken_to_codegen_(0),
+ last_status_(FAILED) { }
+
+ enum Status {
+ FAILED, BAILED_OUT, SUCCEEDED
+ };
+
+ MUST_USE_RESULT Status CreateGraph();
+ MUST_USE_RESULT Status OptimizeGraph();
+ MUST_USE_RESULT Status GenerateAndInstallCode();
+
+ Status last_status() const { return last_status_; }
+ CompilationInfo* info() const { return info_; }
+
+ MUST_USE_RESULT Status AbortOptimization() {
+ info_->AbortOptimization();
+ info_->shared_info()->DisableOptimization(info_->bailout_reason());
+ return SetLastStatus(BAILED_OUT);
+ }
+
+ private:
+ CompilationInfo* info_;
+ TypeFeedbackOracle* oracle_;
+ HGraphBuilder* graph_builder_;
+ HGraph* graph_;
+ LChunk* chunk_;
+ int64_t time_taken_to_create_graph_;
+ int64_t time_taken_to_optimize_;
+ int64_t time_taken_to_codegen_;
+ Status last_status_;
+
+ MUST_USE_RESULT Status SetLastStatus(Status status) {
+ last_status_ = status;
+ return last_status_;
+ }
+ void RecordOptimizationStats();
+
+ struct Timer {
+ Timer(OptimizingCompiler* compiler, int64_t* location)
+ : compiler_(compiler),
+ start_(OS::Ticks()),
+ location_(location) { }
+
+ ~Timer() {
+ *location_ += (OS::Ticks() - start_);
+ }
+
+ OptimizingCompiler* compiler_;
+ int64_t start_;
+ int64_t* location_;
+ };
+};
+
+
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
@@ -271,10 +416,6 @@ class CompilationInfo BASE_EMBEDDED {
class Compiler : public AllStatic {
public:
- // Default maximum number of function optimization attempts before we
- // give up.
- static const int kDefaultMaxOptCount = 10;
-
static const int kMaxInliningLevels = 3;
// Call count before primitive functions trigger their own optimization.
@@ -289,6 +430,7 @@ class Compiler : public AllStatic {
Handle<Object> script_name,
int line_offset,
int column_offset,
+ Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
@@ -305,6 +447,8 @@ class Compiler : public AllStatic {
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
+ static void RecompileParallel(Handle<JSFunction> function);
+
// Compile a shared function info object (the function is possibly lazily
// compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
@@ -316,6 +460,8 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
+ static void InstallOptimizedCode(OptimizingCompiler* info);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 76784bd704..169d9a1223 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -36,7 +36,7 @@ namespace internal {
Context* Context::declaration_context() {
Context* current = this;
- while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
+ while (!current->IsFunctionContext() && !current->IsNativeContext()) {
current = current->previous();
ASSERT(current->closure() == closure());
}
@@ -45,7 +45,7 @@ Context* Context::declaration_context() {
JSBuiltinsObject* Context::builtins() {
- GlobalObject* object = global();
+ GlobalObject* object = global_object();
if (object->IsJSGlobalObject()) {
return JSGlobalObject::cast(object)->builtins();
} else {
@@ -55,19 +55,19 @@ JSBuiltinsObject* Context::builtins() {
}
-Context* Context::global_context() {
+Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
// context.
- if (global()->IsGlobalObject()) {
- return global()->global_context();
+ if (global_object()->IsGlobalObject()) {
+ return global_object()->native_context();
}
// During bootstrapping, the global object might not be set and we
- // have to search the context chain to find the global context.
+ // have to search the context chain to find the native context.
ASSERT(Isolate::Current()->bootstrapper()->IsActive());
Context* current = this;
- while (!current->IsGlobalContext()) {
+ while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
current = Context::cast(closure->context());
}
@@ -76,11 +76,11 @@ Context* Context::global_context() {
JSObject* Context::global_proxy() {
- return global_context()->global_proxy_object();
+ return native_context()->global_proxy_object();
}
void Context::set_global_proxy(JSObject* object) {
- global_context()->set_global_proxy_object(object);
+ native_context()->set_global_proxy_object(object);
}
@@ -106,12 +106,12 @@ Handle<Object> Context::Lookup(Handle<String> name,
do {
if (FLAG_trace_contexts) {
PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
- if (context->IsGlobalContext()) PrintF(" (global context)");
+ if (context->IsNativeContext()) PrintF(" (native context)");
PrintF("\n");
}
// 1. Check global objects, subjects of with, and extension objects.
- if (context->IsGlobalContext() ||
+ if (context->IsNativeContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
@@ -226,7 +226,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 3. Prepare to continue with the previous (next outermost) context.
- if (context->IsGlobalContext()) {
+ if (context->IsNativeContext()) {
follow_context_chain = false;
} else {
context = Handle<Context>(context->previous(), isolate);
@@ -241,19 +241,21 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
- ASSERT(IsGlobalContext());
+ ASSERT(IsNativeContext());
#ifdef DEBUG
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- while (!element->IsUndefined()) {
- CHECK(element != function);
- element = JSFunction::cast(element)->next_function_link();
+ if (FLAG_enable_slow_asserts) {
+ Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
+ while (!element->IsUndefined()) {
+ CHECK(element != function);
+ element = JSFunction::cast(element)->next_function_link();
+ }
}
CHECK(function->next_function_link()->IsUndefined());
- // Check that the context belongs to the weak global contexts list.
+ // Check that the context belongs to the weak native contexts list.
bool found = false;
- Object* context = GetHeap()->global_contexts_list();
+ Object* context = GetHeap()->native_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
@@ -269,7 +271,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
void Context::RemoveOptimizedFunction(JSFunction* function) {
- ASSERT(IsGlobalContext());
+ ASSERT(IsNativeContext());
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
JSFunction* prev = NULL;
while (!element->IsUndefined()) {
@@ -293,7 +295,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
Object* Context::OptimizedFunctionsListHead() {
- ASSERT(IsGlobalContext());
+ ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
@@ -304,10 +306,15 @@ void Context::ClearOptimizedFunctions() {
#ifdef DEBUG
-bool Context::IsBootstrappingOrContext(Object* object) {
+bool Context::IsBootstrappingOrValidParentContext(
+ Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
+ if (Isolate::Current()->bootstrapper()->IsActive()) return true;
+ if (!object->IsContext()) return false;
+ Context* context = Context::cast(object);
+ return context->IsNativeContext() || context->IsGlobalContext() ||
+ context->IsModuleContext() || !child->IsModuleContext();
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index d154b82ca0..cfc576cecb 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -96,7 +96,7 @@ enum BindingFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define GLOBAL_CONTEXT_FIELDS(V) \
+#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
@@ -190,16 +190,19 @@ enum BindingFlags {
// Dynamically declared variables/functions are also added
// to lazily allocated extension object. Context::Lookup
// searches the extension object for properties.
+// For global and block contexts, contains the respective
+// ScopeInfo.
+// For module contexts, points back to the respective JSModule.
//
-// [ global ] A pointer to the global object. Provided for quick
+// [ global_object ] A pointer to the global object. Provided for quick
// access to the global object from inside the code (since
// we always have a context pointer).
//
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the global context contains additional slots for fast access to
-// global properties.
+// Finally, the native context contains additional slots for fast access to
+// native properties.
class Context: public FixedArray {
public:
@@ -217,15 +220,15 @@ class Context: public FixedArray {
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
- // scope info (block contexts).
+ // scope info (block contexts), or the module instance (module contexts).
EXTENSION_INDEX,
- GLOBAL_INDEX,
+ GLOBAL_OBJECT_INDEX,
MIN_CONTEXT_SLOTS,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
- // These slots are only in global contexts.
+ // These slots are only in native contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
@@ -292,7 +295,7 @@ class Context: public FixedArray {
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
- GLOBAL_CONTEXT_SLOTS,
+ NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
};
@@ -303,7 +306,7 @@ class Context: public FixedArray {
Context* previous() {
Object* result = unchecked_previous();
- ASSERT(IsBootstrappingOrContext(result));
+ ASSERT(IsBootstrappingOrValidParentContext(result, this));
return reinterpret_cast<Context*>(result);
}
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
@@ -312,16 +315,21 @@ class Context: public FixedArray {
Object* extension() { return get(EXTENSION_INDEX); }
void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+ JSModule* module() { return JSModule::cast(get(EXTENSION_INDEX)); }
+ void set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
+
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
- GlobalObject* global() {
- Object* result = get(GLOBAL_INDEX);
+ GlobalObject* global_object() {
+ Object* result = get(GLOBAL_OBJECT_INDEX);
ASSERT(IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
- void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
+ void set_global_object(GlobalObject* object) {
+ set(GLOBAL_OBJECT_INDEX, object);
+ }
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
@@ -330,11 +338,11 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
- // Compute the global context by traversing the context chain.
- Context* global_context();
+ // Compute the native context by traversing the context chain.
+ Context* native_context();
- // Predicates for context types. IsGlobalContext is defined on Object
- // because we frequently have to know if arbitrary objects are global
+ // Predicates for context types. IsNativeContext is defined on Object
+ // because we frequently have to know if arbitrary objects are natives
// contexts.
bool IsFunctionContext() {
Map* map = this->map();
@@ -356,30 +364,34 @@ class Context: public FixedArray {
Map* map = this->map();
return map == map->GetHeap()->module_context_map();
}
+ bool IsGlobalContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->global_context_map();
+ }
- // Tells whether the global context is marked with out of memory.
+ // Tells whether the native context is marked with out of memory.
inline bool has_out_of_memory();
- // Mark the global context with out of memory.
+ // Mark the native context with out of memory.
inline void mark_out_of_memory();
- // A global context hold a list of all functions which have been optimized.
+ // A native context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
-#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
- ASSERT(IsGlobalContext()); \
+ ASSERT(IsNativeContext()); \
set(index, value); \
} \
type* name() { \
- ASSERT(IsGlobalContext()); \
+ ASSERT(IsNativeContext()); \
return type::cast(get(index)); \
}
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
+#undef NATIVE_CONTEXT_FIELD_ACCESSORS
// Lookup the slot called name, starting with the current context.
// There are three possibilities:
@@ -409,7 +421,7 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
+ static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
// GC support.
typedef FixedBodyDescriptor<
@@ -426,7 +438,7 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
- static bool IsBootstrappingOrContext(Object* object);
+ static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
};
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 77b260f036..e272fe6c08 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -51,6 +51,11 @@ inline double JunkStringValue() {
}
+inline double SignedZero(bool negative) {
+ return negative ? uint64_to_double(Double::kSignMask) : 0.0;
+}
+
+
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
@@ -263,6 +268,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
if (radix == 0) {
// Radix detection.
+ radix = 10;
if (*current == '0') {
++current;
if (current == end) return SignedZero(negative);
@@ -271,11 +277,8 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
++current;
if (current == end) return JunkStringValue();
} else {
- radix = 8;
leading_zero = true;
}
- } else {
- radix = 10;
}
} else if (radix == 16) {
if (*current == '0') {
@@ -459,16 +462,23 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
- bool negative = false;
+ enum Sign {
+ NONE,
+ NEGATIVE,
+ POSITIVE
+ };
+
+ Sign sign = NONE;
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JunkStringValue();
+ sign = POSITIVE;
} else if (*current == '-') {
++current;
if (current == end) return JunkStringValue();
- negative = true;
+ sign = NEGATIVE;
}
static const char kInfinitySymbol[] = "Infinity";
@@ -483,34 +493,34 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
}
ASSERT(buffer_pos == 0);
- return negative ? -V8_INFINITY : V8_INFINITY;
+ return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
- if (current == end || !isDigit(*current, 16)) {
+ if (current == end || !isDigit(*current, 16) || sign != NONE) {
return JunkStringValue(); // "0x".
}
return InternalStringToIntDouble<4>(unicode_cache,
current,
end,
- negative,
+ false,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
}
}
@@ -555,7 +565,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
exponent--; // Move this 0 into the exponent.
}
}
@@ -647,7 +657,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
return InternalStringToIntDouble<3>(unicode_cache,
buffer,
buffer + buffer_pos,
- negative,
+ sign == NEGATIVE,
allow_trailing_junk);
}
@@ -660,7 +670,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return negative ? -converted : converted;
+ return (sign == NEGATIVE) ? -converted : converted;
}
} } // namespace v8::internal
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 70559c9e9d..1fbb5f1182 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -52,8 +52,13 @@ inline bool isDigit(int x, int radix) {
}
-inline double SignedZero(bool negative) {
- return negative ? -0.0 : 0.0;
+// The fast double-to-(unsigned-)int conversion routine does not guarantee
+// rounding towards zero.
+// For NaN and values outside the int range, return INT_MIN or INT_MAX.
+inline int FastD2IChecked(double x) {
+ if (!(x >= INT_MIN)) return INT_MIN; // Negation to catch NaNs.
+ if (x > INT_MAX) return INT_MAX;
+ return static_cast<int>(x);
}
@@ -62,8 +67,6 @@ inline double SignedZero(bool negative) {
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
inline int FastD2I(double x) {
- // The static_cast convertion from double to int used to be slow, but
- // as new benchmarks show, now it is much faster than lrint().
return static_cast<int>(x);
}
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index faad6d409a..811c0aa2e6 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -64,9 +64,20 @@ void StatsCounterTimer::Stop() {
counter_.Increment(milliseconds);
}
+void Histogram::AddSample(int sample) {
+ if (Enabled()) {
+ Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample);
+ }
+}
+
+void* Histogram::CreateHistogram() const {
+ return Isolate::Current()->stats_table()->
+ CreateHistogram(name_, min_, max_, num_buckets_);
+}
+
// Start the timer.
void HistogramTimer::Start() {
- if (GetHistogram() != NULL) {
+ if (histogram_.Enabled()) {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
@@ -74,20 +85,13 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
- if (histogram_ != NULL) {
+ if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- Isolate::Current()->stats_table()->
- AddHistogramSample(histogram_, milliseconds);
+ histogram_.AddSample(milliseconds);
}
}
-
-void* HistogramTimer::CreateHistogram() const {
- return Isolate::Current()->stats_table()->
- CreateHistogram(name_, 0, 10000, 50);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 6498a0242f..577280f444 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -169,8 +169,7 @@ struct StatsCounter {
protected:
// Returns the cached address of this counter location.
int* GetPtr() {
- if (lookup_done_)
- return ptr_;
+ if (lookup_done_) return ptr_;
lookup_done_ = true;
ptr_ = FindLocationInStatsTable();
return ptr_;
@@ -199,25 +198,30 @@ struct StatsCounterTimer {
}
};
-// A HistogramTimer allows distributions of results to be created
-// HistogramTimer t = { L"foo", NULL, false, 0, 0 };
-struct HistogramTimer {
+// A Histogram represents a dynamically created histogram in the StatsTable.
+//
+// This class is designed to be POD initialized. It will be registered with
+// the histogram system on first use. For example:
+// Histogram h = { "myhist", 0, 10000, 50, NULL, false };
+struct Histogram {
const char* name_;
+ int min_;
+ int max_;
+ int num_buckets_;
void* histogram_;
bool lookup_done_;
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
+ // Add a single sample to this histogram.
+ void AddSample(int sample);
- // Stop the timer and record the results.
- void Stop();
+ // Returns true if this histogram is enabled.
+ bool Enabled() {
+ return GetHistogram() != NULL;
+ }
- // Returns true if the timer is running.
- bool Running() {
- return (histogram_ != NULL) && (start_time_ != 0) && (stop_time_ == 0);
+ // Reset the cached internal pointer.
+ void Reset() {
+ lookup_done_ = false;
}
protected:
@@ -234,6 +238,30 @@ struct HistogramTimer {
void* CreateHistogram() const;
};
+// A HistogramTimer allows distributions of results to be created
+// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 };
+struct HistogramTimer {
+ Histogram histogram_;
+
+ int64_t start_time_;
+ int64_t stop_time_;
+
+ // Start the timer.
+ void Start();
+
+ // Stop the timer and record the results.
+ void Stop();
+
+ // Returns true if the timer is running.
+ bool Running() {
+ return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0);
+ }
+
+ void Reset() {
+ histogram_.Reset();
+ }
+};
+
// Helper class for scoping a HistogramTimer.
class HistogramTimerScope BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 6e2e771a41..9cd4484209 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -188,7 +188,7 @@ class ProfilerEventsProcessor : public Thread {
#define PROFILE(isolate, Call) \
- LOG(isolate, Call); \
+ LOG_CODE_EVENT(isolate, Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
v8::internal::CpuProfiler::Call; \
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7a01d55148..f20a41aa5f 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -284,9 +284,9 @@ Handle<Value> Shell::Load(const Arguments& args) {
return Undefined();
}
-static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
- if (value_in->IsUint32()) {
- return value_in->Uint32Value();
+static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
+ if (value_in->IsInt32()) {
+ return value_in->Int32Value();
}
Local<Value> number = value_in->ToNumber();
@@ -296,7 +296,15 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
Local<Int32> int32 = number->ToInt32();
if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
- int32_t raw_value = int32->Int32Value();
+ int32_t value = int32->Int32Value();
+ if (try_catch->HasCaught()) return 0;
+
+ return value;
+}
+
+
+static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
+ int32_t raw_value = convertToInt(value_in, try_catch);
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
@@ -312,14 +320,18 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ThrowException(
String::New("Array length exceeds maximum length."));
}
- return static_cast<size_t>(raw_value);
+ return raw_value;
}
+// TODO(rossberg): should replace these by proper uses of HasInstance,
+// once we figure out a good way to make the templates global.
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
+const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
+Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
+ int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
@@ -327,11 +339,10 @@ Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
}
uint8_t* data = new uint8_t[length];
if (data == NULL) {
- return ThrowException(String::New("Memory allocation failed."));
+ return ThrowException(String::New("Memory allocation failed"));
}
memset(data, 0, length);
- Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
@@ -346,28 +357,73 @@ Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
}
-Handle<Value> Shell::CreateExternalArrayBuffer(const Arguments& args) {
+Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
+ if (!args.IsConstructCall()) {
+ Handle<Value>* rec_args = new Handle<Value>[args.Length()];
+ for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
+ Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
+ delete[] rec_args;
+ return result;
+ }
+
if (args.Length() == 0) {
return ThrowException(
- String::New("ArrayBuffer constructor must have one parameter."));
+ String::New("ArrayBuffer constructor must have one argument"));
}
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.Exception();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
- return CreateExternalArrayBuffer(length);
+ return CreateExternalArrayBuffer(args.This(), length);
+}
+
+
+Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
+ Handle<Object> buffer,
+ ExternalArrayType type,
+ int32_t length,
+ int32_t byteLength,
+ int32_t byteOffset,
+ int32_t element_size) {
+ ASSERT(element_size == 1 || element_size == 2 ||
+ element_size == 4 || element_size == 8);
+ ASSERT(byteLength == length * element_size);
+
+ void* data = buffer->GetIndexedPropertiesExternalArrayData();
+ ASSERT(data != NULL);
+
+ array->SetIndexedPropertiesToExternalArrayData(
+ static_cast<uint8_t*>(data) + byteOffset, type, length);
+ array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
+ array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
+ array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
+ array->Set(String::New("length"), Int32::New(length), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
+ array->Set(String::New("buffer"), buffer, ReadOnly);
+
+ return array;
}
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
+ if (!args.IsConstructCall()) {
+ Handle<Value>* rec_args = new Handle<Value>[args.Length()];
+ for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
+ Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
+ delete[] rec_args;
+ return result;
+ }
+
TryCatch try_catch;
ASSERT(element_size == 1 || element_size == 2 ||
element_size == 4 || element_size == 8);
- // Currently, only the following constructors are supported:
+ // All of the following constructors are supported:
// TypedArray(unsigned long length)
+ // TypedArray(type[] array)
+ // TypedArray(TypedArray array)
// TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset,
// optional unsigned long length)
@@ -375,29 +431,31 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
int32_t length;
int32_t byteLength;
int32_t byteOffset;
+ bool init_from_array = false;
if (args.Length() == 0) {
return ThrowException(
- String::New("Array constructor must have at least one parameter."));
+ String::New("Array constructor must have at least one argument"));
}
if (args[0]->IsObject() &&
- !args[0]->ToObject()->GetHiddenValue(
- String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
+ !args[0]->ToObject()->GetHiddenValue(
+ String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
+ // Construct from ArrayBuffer.
buffer = args[0]->ToObject();
int32_t bufferLength =
convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
- if (try_catch.HasCaught()) return try_catch.Exception();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) {
byteOffset = 0;
} else {
byteOffset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.Exception();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) {
return ThrowException(String::New("byteOffset out of bounds"));
}
if (byteOffset % element_size != 0) {
return ThrowException(
- String::New("byteOffset must be multiple of element_size"));
+ String::New("byteOffset must be multiple of element size"));
}
}
@@ -406,41 +464,312 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
length = byteLength / element_size;
if (byteLength % element_size != 0) {
return ThrowException(
- String::New("buffer size must be multiple of element_size"));
+ String::New("buffer size must be multiple of element size"));
}
} else {
length = convertToUint(args[2], &try_catch);
- if (try_catch.HasCaught()) return try_catch.Exception();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
return ThrowException(String::New("length out of bounds"));
}
}
} else {
- length = convertToUint(args[0], &try_catch);
+ if (args[0]->IsObject() &&
+ args[0]->ToObject()->Has(String::New("length"))) {
+ // Construct from array.
+ length = convertToUint(
+ args[0]->ToObject()->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ init_from_array = true;
+ } else {
+ // Construct from size.
+ length = convertToUint(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
byteLength = length * element_size;
byteOffset = 0;
- Handle<Value> result = CreateExternalArrayBuffer(byteLength);
- if (!result->IsObject()) return result;
+
+ Handle<Object> global = Context::GetCurrent()->Global();
+ Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
+ ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
+ Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
+ Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
+ 1, buffer_args);
+ if (try_catch.HasCaught()) return result;
buffer = result->ToObject();
}
- void* data = buffer->GetIndexedPropertiesExternalArrayData();
- ASSERT(data != NULL);
+ Handle<Object> array = CreateExternalArray(
+ args.This(), buffer, type, length, byteLength, byteOffset, element_size);
- Handle<Object> array = Object::New();
- array->SetIndexedPropertiesToExternalArrayData(
- static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
- array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
- array->Set(String::New("length"), Int32::New(length), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
- array->Set(String::New("buffer"), buffer, ReadOnly);
+ if (init_from_array) {
+ Handle<Object> init = args[0]->ToObject();
+ for (int i = 0; i < length; ++i) array->Set(i, init->Get(i));
+ }
return array;
}
+Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
+ TryCatch try_catch;
+
+ if (!args.This()->IsObject()) {
+ return ThrowException(
+ String::New("'slice' invoked on non-object receiver"));
+ }
+
+ Local<Object> self = args.This();
+ Local<Value> marker =
+ self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
+ if (marker.IsEmpty()) {
+ return ThrowException(
+ String::New("'slice' invoked on wrong receiver type"));
+ }
+
+ int32_t length =
+ convertToUint(self->Get(String::New("byteLength")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("'slice' must have at least one argument"));
+ }
+ int32_t begin = convertToInt(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (begin < 0) begin += length;
+ if (begin < 0) begin = 0;
+ if (begin > length) begin = length;
+
+ int32_t end;
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ end = length;
+ } else {
+ end = convertToInt(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (end < 0) end += length;
+ if (end < 0) end = 0;
+ if (end > length) end = length;
+ if (end < begin) end = begin;
+ }
+
+ Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
+ Handle<Value> new_args[] = { Uint32::New(end - begin) };
+ Handle<Value> result = constructor->NewInstance(1, new_args);
+ if (try_catch.HasCaught()) return result;
+ Handle<Object> buffer = result->ToObject();
+ uint8_t* dest =
+ static_cast<uint8_t*>(buffer->GetIndexedPropertiesExternalArrayData());
+ uint8_t* src = begin + static_cast<uint8_t*>(
+ self->GetIndexedPropertiesExternalArrayData());
+ memcpy(dest, src, end - begin);
+
+ return buffer;
+}
+
+
+Handle<Value> Shell::ArraySubArray(const Arguments& args) {
+ TryCatch try_catch;
+
+ if (!args.This()->IsObject()) {
+ return ThrowException(
+ String::New("'subarray' invoked on non-object receiver"));
+ }
+
+ Local<Object> self = args.This();
+ Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ if (marker.IsEmpty()) {
+ return ThrowException(
+ String::New("'subarray' invoked on wrong receiver type"));
+ }
+
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t length =
+ convertToUint(self->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t byteOffset =
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t element_size =
+ convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("'subarray' must have at least one argument"));
+ }
+ int32_t begin = convertToInt(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (begin < 0) begin += length;
+ if (begin < 0) begin = 0;
+ if (begin > length) begin = length;
+
+ int32_t end;
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ end = length;
+ } else {
+ end = convertToInt(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (end < 0) end += length;
+ if (end < 0) end = 0;
+ if (end > length) end = length;
+ if (end < begin) end = begin;
+ }
+
+ length = end - begin;
+ byteOffset += begin * element_size;
+
+ Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
+ Handle<Value> construct_args[] = {
+ buffer, Uint32::New(byteOffset), Uint32::New(length)
+ };
+ return constructor->NewInstance(3, construct_args);
+}
+
+
+Handle<Value> Shell::ArraySet(const Arguments& args) {
+ TryCatch try_catch;
+
+ if (!args.This()->IsObject()) {
+ return ThrowException(
+ String::New("'set' invoked on non-object receiver"));
+ }
+
+ Local<Object> self = args.This();
+ Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ if (marker.IsEmpty()) {
+ return ThrowException(
+ String::New("'set' invoked on wrong receiver type"));
+ }
+ int32_t length =
+ convertToUint(self->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t element_size =
+ convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("'set' must have at least one argument"));
+ }
+ if (!args[0]->IsObject() ||
+ !args[0]->ToObject()->Has(String::New("length"))) {
+ return ThrowException(
+ String::New("'set' invoked with non-array argument"));
+ }
+ Handle<Object> source = args[0]->ToObject();
+ int32_t source_length =
+ convertToUint(source->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ int32_t offset;
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ offset = 0;
+ } else {
+ offset = convertToUint(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
+ if (offset + source_length > length) {
+ return ThrowException(String::New("offset or source length out of bounds"));
+ }
+
+ int32_t source_element_size;
+ if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
+ source_element_size = 0;
+ } else {
+ source_element_size =
+ convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
+
+ if (element_size == source_element_size &&
+ self->GetConstructor()->StrictEquals(source->GetConstructor())) {
+ // Use memmove on the array buffers.
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ Handle<Object> source_buffer =
+ source->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t byteOffset =
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t source_byteOffset =
+ convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
+ buffer->GetIndexedPropertiesExternalArrayData());
+ uint8_t* src = source_byteOffset + static_cast<uint8_t*>(
+ source_buffer->GetIndexedPropertiesExternalArrayData());
+ memmove(dest, src, source_length * element_size);
+ } else if (source_element_size == 0) {
+ // Source is not a typed array, copy element-wise sequentially.
+ for (int i = 0; i < source_length; ++i) {
+ self->Set(offset + i, source->Get(i));
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
+ } else {
+ // Need to copy element-wise to make the right conversions.
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ Handle<Object> source_buffer =
+ source->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (buffer->StrictEquals(source_buffer)) {
+ // Same backing store, need to handle overlap correctly.
+ // This gets a bit tricky in the case of different element sizes
+ // (which, of course, is extremely unlikely to ever occur in practice).
+ int32_t byteOffset =
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t source_byteOffset =
+ convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ // Copy as much as we can from left to right.
+ int i = 0;
+ int32_t next_dest_offset = byteOffset + (offset + 1) * element_size;
+ int32_t next_src_offset = source_byteOffset + source_element_size;
+ while (i < length && next_dest_offset <= next_src_offset) {
+ self->Set(offset + i, source->Get(i));
+ ++i;
+ next_dest_offset += element_size;
+ next_src_offset += source_element_size;
+ }
+ // Of what's left, copy as much as we can from right to left.
+ int j = length - 1;
+ int32_t dest_offset = byteOffset + (offset + j) * element_size;
+ int32_t src_offset = source_byteOffset + j * source_element_size;
+ while (j >= i && dest_offset >= src_offset) {
+ self->Set(offset + j, source->Get(j));
+ --j;
+ dest_offset -= element_size;
+ src_offset -= source_element_size;
+ }
+ // There can be at most 8 entries left in the middle that need buffering
+ // (because the largest element_size is 8 times the smallest).
+ ASSERT(j+1 - i <= 8);
+ Handle<Value> temp[8];
+ for (int k = i; k <= j; ++k) {
+ temp[k - i] = source->Get(k);
+ }
+ for (int k = i; k <= j; ++k) {
+ self->Set(offset + k, temp[k - i]);
+ }
+ } else {
+ // Different backing stores, safe to copy element-wise sequentially.
+ for (int i = 0; i < source_length; ++i)
+ self->Set(offset + i, source->Get(i));
+ }
+ }
+
+ return Undefined();
+}
+
+
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
int32_t length =
@@ -451,11 +780,6 @@ void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
}
-Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
- return CreateExternalArrayBuffer(args);
-}
-
-
Handle<Value> Shell::Int8Array(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
@@ -472,8 +796,8 @@ Handle<Value> Shell::Int16Array(const Arguments& args) {
Handle<Value> Shell::Uint16Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedShortArray,
- sizeof(uint16_t));
+ return CreateExternalArray(
+ args, kExternalUnsignedShortArray, sizeof(uint16_t));
}
@@ -488,18 +812,18 @@ Handle<Value> Shell::Uint32Array(const Arguments& args) {
Handle<Value> Shell::Float32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalFloatArray,
- sizeof(float)); // NOLINT
+ return CreateExternalArray(
+ args, kExternalFloatArray, sizeof(float)); // NOLINT
}
Handle<Value> Shell::Float64Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalDoubleArray,
- sizeof(double)); // NOLINT
+ return CreateExternalArray(
+ args, kExternalDoubleArray, sizeof(double)); // NOLINT
}
-Handle<Value> Shell::PixelArray(const Arguments& args) {
+Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
}
@@ -729,7 +1053,7 @@ void Shell::InstallUtilityScript() {
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
- = i::Handle<i::JSObject>(debug->debug_context()->global());
+ = i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
@@ -794,6 +1118,27 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
};
#endif
+
+Handle<FunctionTemplate> Shell::CreateArrayBufferTemplate(
+ InvocationCallback fun) {
+ Handle<FunctionTemplate> buffer_template = FunctionTemplate::New(fun);
+ Local<Template> proto_template = buffer_template->PrototypeTemplate();
+ proto_template->Set(String::New("slice"),
+ FunctionTemplate::New(ArrayBufferSlice));
+ return buffer_template;
+}
+
+
+Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
+ Handle<FunctionTemplate> array_template = FunctionTemplate::New(fun);
+ Local<Template> proto_template = array_template->PrototypeTemplate();
+ proto_template->Set(String::New("set"), FunctionTemplate::New(ArraySet));
+ proto_template->Set(String::New("subarray"),
+ FunctionTemplate::New(ArraySubArray));
+ return array_template;
+}
+
+
Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
@@ -812,26 +1157,28 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
FunctionTemplate::New(DisableProfiler));
// Bind the handlers for external arrays.
+ PropertyAttribute attr =
+ static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(String::New("ArrayBuffer"),
- FunctionTemplate::New(ArrayBuffer));
+ CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
- FunctionTemplate::New(Int8Array));
+ CreateArrayTemplate(Int8Array), attr);
global_template->Set(String::New("Uint8Array"),
- FunctionTemplate::New(Uint8Array));
+ CreateArrayTemplate(Uint8Array), attr);
global_template->Set(String::New("Int16Array"),
- FunctionTemplate::New(Int16Array));
+ CreateArrayTemplate(Int16Array), attr);
global_template->Set(String::New("Uint16Array"),
- FunctionTemplate::New(Uint16Array));
+ CreateArrayTemplate(Uint16Array), attr);
global_template->Set(String::New("Int32Array"),
- FunctionTemplate::New(Int32Array));
+ CreateArrayTemplate(Int32Array), attr);
global_template->Set(String::New("Uint32Array"),
- FunctionTemplate::New(Uint32Array));
+ CreateArrayTemplate(Uint32Array), attr);
global_template->Set(String::New("Float32Array"),
- FunctionTemplate::New(Float32Array));
+ CreateArrayTemplate(Float32Array), attr);
global_template->Set(String::New("Float64Array"),
- FunctionTemplate::New(Float64Array));
- global_template->Set(String::New("PixelArray"),
- FunctionTemplate::New(PixelArray));
+ CreateArrayTemplate(Float64Array), attr);
+ global_template->Set(String::New("Uint8ClampedArray"),
+ CreateArrayTemplate(Uint8ClampedArray), attr);
#ifdef LIVE_OBJECT_LIST
global_template->Set(String::New("lol_is_enabled"), True());
@@ -864,7 +1211,7 @@ void Shell::Initialize() {
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters) {
+ if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
@@ -954,20 +1301,24 @@ void Shell::OnExit() {
counters[j].key = i.CurrentKey();
}
qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
- printf("+--------------------------------------------+-------------+\n");
- printf("| Name | Value |\n");
- printf("+--------------------------------------------+-------------+\n");
+ printf("+----------------------------------------------------------------+"
+ "-------------+\n");
+ printf("| Name |"
+ " Value |\n");
+ printf("+----------------------------------------------------------------+"
+ "-------------+\n");
for (j = 0; j < number_of_counters; j++) {
Counter* counter = counters[j].counter;
const char* key = counters[j].key;
if (counter->is_histogram()) {
- printf("| c:%-40s | %11i |\n", key, counter->count());
- printf("| t:%-40s | %11i |\n", key, counter->sample_total());
+ printf("| c:%-60s | %11i |\n", key, counter->count());
+ printf("| t:%-60s | %11i |\n", key, counter->sample_total());
} else {
- printf("| %-42s | %11i |\n", key, counter->count());
+ printf("| %-62s | %11i |\n", key, counter->count());
}
}
- printf("+--------------------------------------------+-------------+\n");
+ printf("+----------------------------------------------------------------+"
+ "-------------+\n");
delete [] counters;
}
delete counters_file_;
@@ -1238,6 +1589,11 @@ void SourceGroup::ExecuteInThread() {
Execute();
}
context.Dispose();
+ if (Shell::options.send_idle_notification) {
+ const int kLongIdlePauseInMs = 1000;
+ V8::ContextDisposedNotification();
+ V8::IdleNotification(kLongIdlePauseInMs);
+ }
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
} while (!Shell::options.last_run);
@@ -1283,6 +1639,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--test") == 0) {
options.test_shell = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--send-idle-notification") == 0) {
+ options.send_idle_notification = true;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--preemption") == 0) {
#ifdef V8_SHARED
printf("D8 with shared library does not support multi-threading\n");
@@ -1439,13 +1798,11 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (!options.last_run) {
context.Dispose();
-#if !defined(V8_SHARED)
- if (i::FLAG_send_idle_notification) {
+ if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
-#endif // !V8_SHARED
}
#ifndef V8_SHARED
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 2789c6db3e..a62a81fd9c 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -31,7 +31,7 @@
#ifndef V8_SHARED
#include "allocation.h"
#include "hashmap.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "v8.h"
#else
#include "../include/v8.h"
@@ -67,7 +67,7 @@ class CounterCollection {
CounterCollection();
Counter* GetNextCounter();
private:
- static const unsigned kMaxCounters = 256;
+ static const unsigned kMaxCounters = 512;
uint32_t magic_number_;
uint32_t max_counters_;
uint32_t max_name_size_;
@@ -227,6 +227,7 @@ class ShellOptions {
#endif // V8_SHARED
script_executed(false),
last_run(true),
+ send_idle_notification(false),
stress_opt(false),
stress_deopt(false),
interactive_shell(false),
@@ -249,6 +250,7 @@ class ShellOptions {
#endif // V8_SHARED
bool script_executed;
bool last_run;
+ bool send_idle_notification;
bool stress_opt;
bool stress_deopt;
bool interactive_shell;
@@ -322,7 +324,10 @@ class Shell : public i::AllStatic {
static Handle<Value> Uint32Array(const Arguments& args);
static Handle<Value> Float32Array(const Arguments& args);
static Handle<Value> Float64Array(const Arguments& args);
- static Handle<Value> PixelArray(const Arguments& args);
+ static Handle<Value> Uint8ClampedArray(const Arguments& args);
+ static Handle<Value> ArrayBufferSlice(const Arguments& args);
+ static Handle<Value> ArraySubArray(const Arguments& args);
+ static Handle<Value> ArraySet(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -383,8 +388,17 @@ class Shell : public i::AllStatic {
static void RunShell();
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
- static Handle<Value> CreateExternalArrayBuffer(int32_t size);
- static Handle<Value> CreateExternalArrayBuffer(const Arguments& args);
+ static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
+ static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
+ static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
+ int32_t size);
+ static Handle<Object> CreateExternalArray(Handle<Object> array,
+ Handle<Object> buffer,
+ ExternalArrayType type,
+ int32_t length,
+ int32_t byteLength,
+ int32_t byteOffset,
+ int32_t element_size);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size);
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index d0e24abc50..a54cb238c5 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -318,7 +318,6 @@ function DateNow() {
// ECMA 262 - 15.9.5.2
function DateToString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this)
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this)
@@ -328,7 +327,6 @@ function DateToString() {
// ECMA 262 - 15.9.5.3
function DateToDateString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DateString(this);
@@ -337,7 +335,6 @@ function DateToDateString() {
// ECMA 262 - 15.9.5.4
function DateToTimeString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this);
@@ -353,7 +350,6 @@ function DateToLocaleString() {
// ECMA 262 - 15.9.5.6
function DateToLocaleDateString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return LongDateString(this);
@@ -362,7 +358,6 @@ function DateToLocaleDateString() {
// ECMA 262 - 15.9.5.7
function DateToLocaleTimeString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return TimeString(this);
@@ -371,133 +366,114 @@ function DateToLocaleTimeString() {
// ECMA 262 - 15.9.5.8
function DateValueOf() {
- CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.9
function DateGetTime() {
- CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.10
function DateGetFullYear() {
- CHECK_DATE(this);
return LOCAL_YEAR(this);
}
// ECMA 262 - 15.9.5.11
function DateGetUTCFullYear() {
- CHECK_DATE(this);
return UTC_YEAR(this);
}
// ECMA 262 - 15.9.5.12
function DateGetMonth() {
- CHECK_DATE(this);
return LOCAL_MONTH(this);
}
// ECMA 262 - 15.9.5.13
function DateGetUTCMonth() {
- CHECK_DATE(this);
return UTC_MONTH(this);
}
// ECMA 262 - 15.9.5.14
function DateGetDate() {
- CHECK_DATE(this);
return LOCAL_DAY(this);
}
// ECMA 262 - 15.9.5.15
function DateGetUTCDate() {
- CHECK_DATE(this);
return UTC_DAY(this);
}
// ECMA 262 - 15.9.5.16
function DateGetDay() {
- CHECK_DATE(this);
return LOCAL_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
- CHECK_DATE(this);
return UTC_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.18
function DateGetHours() {
- CHECK_DATE(this);
return LOCAL_HOUR(this);
}
// ECMA 262 - 15.9.5.19
function DateGetUTCHours() {
- CHECK_DATE(this);
return UTC_HOUR(this);
}
// ECMA 262 - 15.9.5.20
function DateGetMinutes() {
- CHECK_DATE(this);
return LOCAL_MIN(this);
}
// ECMA 262 - 15.9.5.21
function DateGetUTCMinutes() {
- CHECK_DATE(this);
return UTC_MIN(this);
}
// ECMA 262 - 15.9.5.22
function DateGetSeconds() {
- CHECK_DATE(this);
return LOCAL_SEC(this);
}
// ECMA 262 - 15.9.5.23
function DateGetUTCSeconds() {
- CHECK_DATE(this);
return UTC_SEC(this)
}
// ECMA 262 - 15.9.5.24
function DateGetMilliseconds() {
- CHECK_DATE(this);
return LOCAL_MS(this);
}
// ECMA 262 - 15.9.5.25
function DateGetUTCMilliseconds() {
- CHECK_DATE(this);
return UTC_MS(this);
}
// ECMA 262 - 15.9.5.26
function DateGetTimezoneOffset() {
- CHECK_DATE(this);
return TIMEZONE_OFFSET(this);
}
@@ -512,7 +488,6 @@ function DateSetTime(ms) {
// ECMA 262 - 15.9.5.28
function DateSetMilliseconds(ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
@@ -522,7 +497,6 @@ function DateSetMilliseconds(ms) {
// ECMA 262 - 15.9.5.29
function DateSetUTCMilliseconds(ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(UTC_HOUR(this),
@@ -535,7 +509,6 @@ function DateSetUTCMilliseconds(ms) {
// ECMA 262 - 15.9.5.30
function DateSetSeconds(sec, ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
@@ -546,7 +519,6 @@ function DateSetSeconds(sec, ms) {
// ECMA 262 - 15.9.5.31
function DateSetUTCSeconds(sec, ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
@@ -557,7 +529,6 @@ function DateSetUTCSeconds(sec, ms) {
// ECMA 262 - 15.9.5.33
function DateSetMinutes(min, sec, ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
@@ -570,7 +541,6 @@ function DateSetMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCMinutes(min, sec, ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
@@ -583,7 +553,6 @@ function DateSetUTCMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.35
function DateSetHours(hour, min, sec, ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
@@ -597,7 +566,6 @@ function DateSetHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCHours(hour, min, sec, ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
@@ -611,7 +579,6 @@ function DateSetUTCHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.36
function DateSetDate(date) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
@@ -621,7 +588,6 @@ function DateSetDate(date) {
// ECMA 262 - 15.9.5.37
function DateSetUTCDate(date) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
@@ -631,7 +597,6 @@ function DateSetUTCDate(date) {
// ECMA 262 - 15.9.5.38
function DateSetMonth(month, date) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
@@ -642,7 +607,6 @@ function DateSetMonth(month, date) {
// ECMA 262 - 15.9.5.39
function DateSetUTCMonth(month, date) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
@@ -653,7 +617,6 @@ function DateSetUTCMonth(month, date) {
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
year = ToNumber(year);
var argc = %_ArgumentsLength();
@@ -674,7 +637,6 @@ function DateSetFullYear(year, month, date) {
// ECMA 262 - 15.9.5.41
function DateSetUTCFullYear(year, month, date) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
year = ToNumber(year);
var argc = %_ArgumentsLength();
@@ -695,7 +657,6 @@ function DateSetUTCFullYear(year, month, date) {
// ECMA 262 - 15.9.5.42
function DateToUTCString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
@@ -709,7 +670,6 @@ function DateToUTCString() {
// ECMA 262 - B.2.4
function DateGetYear() {
- CHECK_DATE(this);
return LOCAL_YEAR(this) - 1900;
}
@@ -757,7 +717,6 @@ function PadInt(n, digits) {
// ECMA 262 - 15.9.5.43
function DateToISOString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
var year = this.getUTCFullYear();
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index 91838e8ad0..163a0bd829 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -1449,6 +1449,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.profileRequest_(request, response);
} else if (request.command == 'changelive') {
this.changeLiveRequest_(request, response);
+ } else if (request.command == 'restartframe') {
+ this.restartFrameRequest_(request, response);
} else if (request.command == 'flags') {
this.debuggerFlagsRequest_(request, response);
} else if (request.command == 'v8flags') {
@@ -2076,7 +2078,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// Global evaluate.
if (global) {
- // Evaluate in the global context.
+ // Evaluate in the native context.
response.body = this.exec_state_.evaluateGlobal(
expression, Boolean(disable_break), additional_context_object);
return;
@@ -2358,9 +2360,6 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
DebugCommandProcessor.prototype.changeLiveRequest_ = function(
request, response) {
- if (!Debug.LiveEdit) {
- return response.failed('LiveEdit feature is not supported');
- }
if (!request.arguments) {
return response.failed('Missing arguments');
}
@@ -2398,6 +2397,37 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
};
+DebugCommandProcessor.prototype.restartFrameRequest_ = function(
+ request, response) {
+ if (!request.arguments) {
+ return response.failed('Missing arguments');
+ }
+ var frame = request.arguments.frame;
+
+ // No frames to evaluate in frame.
+ if (this.exec_state_.frameCount() == 0) {
+ return response.failed('No frames');
+ }
+
+ var frame_mirror;
+ // Check whether a frame was specified.
+ if (!IS_UNDEFINED(frame)) {
+ var frame_number = %ToNumber(frame);
+ if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+ return response.failed('Invalid frame "' + frame + '"');
+ }
+ // Restart specified frame.
+ frame_mirror = this.exec_state_.frame(frame_number);
+ } else {
+ // Restart selected frame.
+ frame_mirror = this.exec_state_.frame();
+ }
+
+ var result_description = Debug.LiveEdit.RestartFrame(frame_mirror);
+ response.body = {result: result_description};
+};
+
+
DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
response) {
// Check for legal request.
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index ed70c6a8d9..6358d35317 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -97,8 +97,8 @@ static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
// Isolate::context() may have been NULL when "script collected" event
// occured.
if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> global_context(context->global_context());
- return v8::Utils::ToLocal(global_context);
+ Handle<Context> native_context(context->native_context());
+ return v8::Utils::ToLocal(native_context);
}
@@ -698,7 +698,7 @@ void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// the function will live in the heap until next gc, and can be found by
- // Runtime::FindSharedFunctionInfoInScript.
+ // Debug::FindSharedFunctionInfoInScript.
BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
it.ClearAllDebugBreak();
debug->RemoveDebugInfo(node->debug_info());
@@ -745,12 +745,15 @@ bool Debug::CompileDebuggerScript(int index) {
isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> script_name = factory->NewStringFromAscii(name);
+ Handle<Context> context = isolate->native_context();
// Compile the script.
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::Compile(source_code,
script_name,
- 0, 0, NULL, NULL,
+ 0, 0,
+ context,
+ NULL, NULL,
Handle<String>::null(),
NATIVES_CODE);
@@ -762,13 +765,12 @@ bool Debug::CompileDebuggerScript(int index) {
}
// Execute the shared function in the debugger context.
- Handle<Context> context = isolate->global_context();
bool caught_exception;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> exception =
- Execution::TryCall(function, Handle<Object>(context->global()),
+ Execution::TryCall(function, Handle<Object>(context->global_object()),
0, NULL, &caught_exception);
// Check for caught exceptions.
@@ -829,7 +831,7 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
- Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
+ Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
@@ -992,12 +994,11 @@ Object* Debug::Break(Arguments args) {
// Check that we indeed found the frame we are looking for.
CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
if (step_count > 1) {
- // Save old count and action to continue stepping after
- // StepOut
+ // Save old count and action to continue stepping after StepOut.
thread_local_.queued_step_count_ = step_count - 1;
}
- // Set up for StepOut to reach target frame
+ // Set up for StepOut to reach target frame.
step_action = StepOut;
step_count = count;
}
@@ -1096,7 +1097,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
factory->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
- debug_context()->global()->GetPropertyNoExceptionThrown(
+ debug_context()->global_object()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
// Get the break id as an object.
@@ -1136,14 +1137,16 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
}
-void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
+void Debug::SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
- if (!EnsureDebugInfo(shared)) {
+ // Make sure the function is compiled and has set up the debug info.
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if retrieving debug info failed.
return;
}
@@ -1164,6 +1167,50 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
}
+bool Debug::SetBreakPointForScript(Handle<Script> script,
+ Handle<Object> break_point_object,
+ int* source_position) {
+ HandleScope scope(isolate_);
+
+ PrepareForBreakPoints();
+
+ // Obtain shared function info for the function.
+ Object* result = FindSharedFunctionInfoInScript(script, *source_position);
+ if (result->IsUndefined()) return false;
+
+ // Make sure the function has set up the debug info.
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+ if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
+ // Return if retrieving debug info failed.
+ return false;
+ }
+
+ // Find position within function. The script position might be before the
+ // source position of the first function.
+ int position;
+ if (shared->start_position() > *source_position) {
+ position = 0;
+ } else {
+ position = *source_position - shared->start_position();
+ }
+
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ // Source positions starts with zero.
+ ASSERT(position >= 0);
+
+ // Find the break point and change it.
+ BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+ it.FindBreakLocationFromPosition(position);
+ it.SetBreakPoint(break_point_object);
+
+ *source_position = it.position() + shared->start_position();
+
+ // At least one active break point now.
+ ASSERT(debug_info->GetBreakPointCount() > 0);
+ return true;
+}
+
+
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
HandleScope scope(isolate_);
@@ -1215,10 +1262,12 @@ void Debug::ClearAllBreakPoints() {
}
-void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+void Debug::FloodWithOneShot(Handle<JSFunction> function) {
PrepareForBreakPoints();
- // Make sure the function has set up the debug info.
- if (!EnsureDebugInfo(shared)) {
+
+ // Make sure the function is compiled and has set up the debug info.
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return;
}
@@ -1238,8 +1287,8 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
- Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
- Debug::FloodWithOneShot(shared_info);
+ Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
+ Debug::FloodWithOneShot(bindee_function);
}
}
@@ -1254,11 +1303,9 @@ void Debug::FloodHandlerWithOneShot() {
for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(
- JSFunction::cast(frame->function())->shared());
// Flood the function with the catch block with break points
- FloodWithOneShot(shared);
+ JSFunction* function = JSFunction::cast(frame->function());
+ FloodWithOneShot(Handle<JSFunction>(function));
return;
}
}
@@ -1325,14 +1372,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
frames_it.Advance();
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ FloodWithOneShot(Handle<JSFunction>(function));
return;
}
// Get the debug info (create it if it does not exist).
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
return;
}
@@ -1402,7 +1449,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ FloodWithOneShot(Handle<JSFunction>(function));
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
}
@@ -1412,7 +1459,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Step next or step min.
// Fill the current function with one-shot break points.
- FloodWithOneShot(shared);
+ FloodWithOneShot(function);
// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
@@ -1424,9 +1471,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (is_at_restarted_function) {
Handle<JSFunction> restarted_function(
JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
- Handle<SharedFunctionInfo> restarted_shared(
- restarted_function->shared());
- FloodWithOneShot(restarted_shared);
+ FloodWithOneShot(restarted_function);
} else if (!call_function_stub.is_null()) {
// If it's CallFunction stub ensure target function is compiled and flood
// it with one shot breakpoints.
@@ -1468,7 +1513,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
} else if (!js_function->IsBuiltin()) {
// Don't step into builtins.
// It will also compile target function if it's not compiled yet.
- FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
+ FloodWithOneShot(js_function);
}
}
}
@@ -1477,7 +1522,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// a call target as the function called might be a native function for
// which step in will not stop. It also prepares for stepping in
// getters/setters.
- FloodWithOneShot(shared);
+ FloodWithOneShot(function);
if (is_load_or_store) {
// Remember source position and frame to handle step in getter/setter. If
@@ -1677,12 +1722,11 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function.
if (!holder.is_null() && holder->IsJSFunction() &&
!JSFunction::cast(*holder)->IsBuiltin()) {
- Handle<SharedFunctionInfo> shared_info(
- JSFunction::cast(*holder)->shared());
- Debug::FloodWithOneShot(shared_info);
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
+ Debug::FloodWithOneShot(js_function);
}
} else {
- Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ Debug::FloodWithOneShot(function);
}
}
}
@@ -1762,7 +1806,7 @@ static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots());
- CompilationInfo info(function);
+ CompilationInfoWithZone info(function);
info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception());
@@ -1834,29 +1878,48 @@ static void RedirectActivationsToRecompiledCodeOnThread(
continue;
}
- intptr_t delta = frame->pc() - frame_code->instruction_start();
- int debug_break_slot_count = 0;
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+ // Iterate over the RelocInfo in the original code to compute the sum of the
+ // constant pools sizes. (See Assembler::CheckConstPool())
+ // Note that this is only useful for architectures using constant pools.
+ int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
+ int frame_const_pool_size = 0;
+ for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->pc() >= frame->pc()) break;
+ frame_const_pool_size += static_cast<int>(info->data());
+ }
+ intptr_t frame_offset =
+ frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
+
+ // Iterate over the RelocInfo for new code to find the number of bytes
+ // generated for debug slots and constant pools.
+ int debug_break_slot_bytes = 0;
+ int new_code_const_pool_size = 0;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::CONST_POOL);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
// Check if the pc in the new code with debug break
// slots is before this slot.
RelocInfo* info = it.rinfo();
- int debug_break_slot_bytes =
- debug_break_slot_count * Assembler::kDebugBreakSlotLength;
- intptr_t new_delta =
- info->pc() -
- new_code->instruction_start() -
- debug_break_slot_bytes;
- if (new_delta > delta) {
+ intptr_t new_offset = info->pc() - new_code->instruction_start() -
+ new_code_const_pool_size - debug_break_slot_bytes;
+ if (new_offset >= frame_offset) {
break;
}
- // Passed a debug break slot in the full code with debug
- // break slots.
- debug_break_slot_count++;
+ if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
+ debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
+ } else {
+ ASSERT(RelocInfo::IsConstPool(info->rmode()));
+ // The size of the constant pool is encoded in the data.
+ new_code_const_pool_size += static_cast<int>(info->data());
+ }
}
- int debug_break_slot_bytes =
- debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+
+ // Compute the equivalent pc in the new code.
+ byte* new_pc = new_code->instruction_start() + frame_offset +
+ debug_break_slot_bytes + new_code_const_pool_size;
+
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
"with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@@ -1873,14 +1936,12 @@ static void RedirectActivationsToRecompiledCodeOnThread(
new_code->instruction_size(),
new_code->instruction_size(),
reinterpret_cast<intptr_t>(frame->pc()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
- delta + debug_break_slot_bytes);
+ reinterpret_cast<intptr_t>(new_pc));
}
// Patch the return address to return into the code with
// debug break slots.
- frame->set_pc(
- new_code->instruction_start() + delta + debug_break_slot_bytes);
+ frame->set_pc(new_pc);
}
}
@@ -1922,6 +1983,9 @@ void Debug::PrepareForBreakPoints() {
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+ // There will be at least one break point when we are done.
+ has_break_points_ = true;
+
// Keep the list of activated functions in a handlified list as it
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
@@ -1999,7 +2063,6 @@ void Debug::PrepareForBreakPoints() {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
Handle<Code> current_code(function->shared()->code());
- ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
@@ -2028,16 +2091,130 @@ void Debug::PrepareForBreakPoints() {
}
+Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
+ int position) {
+ // Iterate the heap looking for SharedFunctionInfo generated from the
+ // script. The inner most SharedFunctionInfo containing the source position
+ // for the requested break point is found.
+ // NOTE: This might require several heap iterations. If the SharedFunctionInfo
+ // which is found is not compiled it is compiled and the heap is iterated
+ // again as the compilation might create inner functions from the newly
+ // compiled function and the actual requested break point might be in one of
+ // these functions.
+ // NOTE: The below fix-point iteration depends on all functions that cannot be
+ // compiled lazily without a context to not be compiled at all. Compilation
+ // will be triggered at points where we do not need a context.
+ bool done = false;
+ // The current candidate for the source position:
+ int target_start_position = RelocInfo::kNoPosition;
+ Handle<JSFunction> target_function;
+ Handle<SharedFunctionInfo> target;
+ while (!done) {
+ { // Extra scope for iterator and no-allocation.
+ isolate_->heap()->EnsureHeapIsIterable();
+ AssertNoAllocation no_alloc_during_heap_iteration;
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL; obj = iterator.next()) {
+ bool found_next_candidate = false;
+ Handle<JSFunction> function;
+ Handle<SharedFunctionInfo> shared;
+ if (obj->IsJSFunction()) {
+ function = Handle<JSFunction>(JSFunction::cast(obj));
+ shared = Handle<SharedFunctionInfo>(function->shared());
+ ASSERT(shared->allows_lazy_compilation() || shared->is_compiled());
+ found_next_candidate = true;
+ } else if (obj->IsSharedFunctionInfo()) {
+ shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
+ // Skip functions that we cannot compile lazily without a context,
+ // which is not available here, because there is no closure.
+ found_next_candidate = shared->is_compiled() ||
+ shared->allows_lazy_compilation_without_context();
+ }
+ if (!found_next_candidate) continue;
+ if (shared->script() == *script) {
+ // If the SharedFunctionInfo found has the requested script data and
+ // contains the source position it is a candidate.
+ int start_position = shared->function_token_position();
+ if (start_position == RelocInfo::kNoPosition) {
+ start_position = shared->start_position();
+ }
+ if (start_position <= position &&
+ position <= shared->end_position()) {
+ // If there is no candidate or this function is within the current
+ // candidate this is the new candidate.
+ if (target.is_null()) {
+ target_start_position = start_position;
+ target_function = function;
+ target = shared;
+ } else {
+ if (target_start_position == start_position &&
+ shared->end_position() == target->end_position()) {
+ // If a top-level function contains only one function
+ // declaration the source for the top-level and the function
+ // is the same. In that case prefer the non top-level function.
+ if (!shared->is_toplevel()) {
+ target_start_position = start_position;
+ target_function = function;
+ target = shared;
+ }
+ } else if (target_start_position <= start_position &&
+ shared->end_position() <= target->end_position()) {
+ // This containment check includes equality as a function
+ // inside a top-level function can share either start or end
+ // position with the top-level function.
+ target_start_position = start_position;
+ target_function = function;
+ target = shared;
+ }
+ }
+ }
+ }
+ } // End for loop.
+ } // End no-allocation scope.
+
+ if (target.is_null()) {
+ return isolate_->heap()->undefined_value();
+ }
+
+ // There will be at least one break point when we are done.
+ has_break_points_ = true;
+
+ // If the candidate found is compiled we are done.
+ done = target->is_compiled();
+ if (!done) {
+ // If the candidate is not compiled, compile it to reveal any inner
+ // functions which might contain the requested source position. This
+ // will compile all inner functions that cannot be compiled without a
+ // context, because Compiler::BuildFunctionInfo checks whether the
+ // debugger is active.
+ if (target_function.is_null()) {
+ SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
+ } else {
+ JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
+ }
+ }
+ } // End while loop.
+
+ return *target;
+}
+
+
// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> function) {
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) {
ASSERT(shared->is_compiled());
return true;
}
- // Ensure shared in compiled. Return false if this failed.
- if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ // There will be at least one break point when we are done.
+ has_break_points_ = true;
+
+ // Ensure function is compiled. Return false if this failed.
+ if (!function.is_null() &&
+ !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
@@ -2049,9 +2226,6 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
node->set_next(debug_info_list_);
debug_info_list_ = node;
- // Now there is at least one break point.
- has_break_points_ = true;
-
return true;
}
@@ -2093,9 +2267,9 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return;
}
@@ -2185,9 +2359,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return false;
}
@@ -2218,7 +2392,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode,
Object** restarter_frame_function_pointer) {
- thread_local_.frame_drop_mode_ = mode;
+ if (mode != CURRENTLY_SET_MODE) {
+ thread_local_.frame_drop_mode_ = mode;
+ }
thread_local_.break_frame_id_ = new_break_frame_id;
thread_local_.restarter_frame_function_pointer_ =
restarter_frame_function_pointer;
@@ -2233,7 +2409,7 @@ const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == debug_context()->global();
+ return IsLoaded() && global == debug_context()->global_object();
}
@@ -2245,12 +2421,13 @@ void Debug::ClearMirrorCache() {
// Clear the mirror cache.
Handle<String> function_name =
isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
- Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
+ Handle<Object> fun(
+ Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
*function_name));
ASSERT(fun->IsJSFunction());
bool caught_exception;
Execution::TryCall(Handle<JSFunction>::cast(fun),
- Handle<JSObject>(Debug::debug_context()->global()),
+ Handle<JSObject>(Debug::debug_context()->global_object()),
0, NULL, &caught_exception);
}
@@ -2337,6 +2514,7 @@ Debugger::Debugger(Isolate* isolate)
event_listener_data_(Handle<Object>()),
compiling_natives_(false),
is_loading_debugger_(false),
+ live_edit_enabled_(true),
never_unload_debugger_(false),
force_debugger_active_(false),
message_handler_(NULL),
@@ -2372,7 +2550,8 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<String> constructor_str =
isolate_->factory()->LookupSymbol(constructor_name);
Handle<Object> constructor(
- isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
+ isolate_->global_object()->GetPropertyNoExceptionThrown(
+ *constructor_str));
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
@@ -2380,7 +2559,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
}
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(isolate_->debug()->debug_context()->global()),
+ Handle<JSObject>(isolate_->debug()->debug_context()->global_object()),
argc,
argv,
caught_exception);
@@ -2602,7 +2781,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<String> update_script_break_points_symbol =
isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
- Handle<Object>(debug->debug_context()->global()->
+ Handle<Object>(debug->debug_context()->global_object()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
@@ -2758,7 +2937,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
event_listener_data_ };
bool caught_exception;
Execution::TryCall(fun,
- isolate_->global(),
+ isolate_->global_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 2adbd24338..9e33f4b184 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -233,12 +233,15 @@ class Debug {
void Iterate(ObjectVisitor* v);
Object* Break(Arguments args);
- void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+ void SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position);
+ bool SetBreakPointForScript(Handle<Script> script,
+ Handle<Object> break_point_object,
+ int* source_position);
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
- void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ void FloodWithOneShot(Handle<JSFunction> function);
void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
@@ -254,8 +257,14 @@ class Debug {
void PrepareForBreakPoints();
- // Returns whether the operation succeeded.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
+ // This function is used in FunctionNameUsing* tests.
+ Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
+
+ // Returns whether the operation succeeded. Compilation can only be triggered
+ // if a valid closure is passed as the second argument, otherwise the shared
+ // function needs to be compiled already.
+ bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> function);
// Returns true if the current stub call is patched to call the debugger.
static bool IsDebugBreak(Address addr);
@@ -434,7 +443,8 @@ class Debug {
// The top JS frame had been calling some C++ function. The return address
// gets patched automatically.
FRAME_DROPPED_IN_DIRECT_CALL,
- FRAME_DROPPED_IN_RETURN_CALL
+ FRAME_DROPPED_IN_RETURN_CALL,
+ CURRENTLY_SET_MODE
};
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
@@ -865,6 +875,8 @@ class Debugger {
bool compiling_natives() const { return compiling_natives_; }
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
+ void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
+ bool live_edit_enabled() const { return live_edit_enabled_; }
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}
@@ -893,6 +905,7 @@ class Debugger {
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
bool is_loading_debugger_; // Are we loading the debugger?
+ bool live_edit_enabled_; // Enable LiveEdit.
bool never_unload_debugger_; // Can we unload the debugger?
bool force_debugger_active_; // Activate debugger without event listeners.
v8::Debug::MessageHandler2 message_handler_;
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 3debf55cd6..d1b00f8bde 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -268,20 +268,29 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
+ Isolate* isolate = context->GetIsolate();
+ ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
AssertNoAllocation no_allocation;
- ASSERT(context->IsGlobalContext());
+ ASSERT(context->IsNativeContext());
visitor->EnterContext(context);
- // Run through the list of optimized functions and deoptimize them.
+
+ // Create a snapshot of the optimized functions list. This is needed because
+ // visitors might remove more than one link from the list at once.
+ ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
JSFunction* element_function = JSFunction::cast(element);
- // Get the next link before deoptimizing as deoptimizing will clear the
- // next link.
+ snapshot.Add(element_function, isolate->runtime_zone());
element = element_function->next_function_link();
- visitor->VisitFunction(element_function);
}
+
+ // Run through the snapshot of optimized functions and visit them.
+ for (int i = 0; i < snapshot.length(); ++i) {
+ visitor->VisitFunction(snapshot.at(i));
+ }
+
visitor->LeaveContext(context);
}
@@ -294,10 +303,10 @@ void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(proto)->global_context(), visitor);
+ GlobalObject::cast(proto)->native_context(), visitor);
} else if (object->IsGlobalObject()) {
VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(object)->global_context(), visitor);
+ GlobalObject::cast(object)->native_context(), visitor);
}
}
@@ -306,12 +315,12 @@ void Deoptimizer::VisitAllOptimizedFunctions(
OptimizedFunctionVisitor* visitor) {
AssertNoAllocation no_allocation;
- // Run through the list of all global contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->global_contexts_list();
+ // Run through the list of all native contexts and deoptimize.
+ Object* context = Isolate::Current()->heap()->native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the global field of the context can be undefined.
- Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
+ Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX);
if (!global->IsUndefined()) {
VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
visitor);
@@ -484,19 +493,18 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
- unsigned id,
+ BailoutId id,
SharedFunctionInfo* shared) {
// TODO(kasperl): For now, we do a simple linear search for the PC
// offset associated with the given node id. This should probably be
// changed to a binary search.
int length = data->DeoptPoints();
- Smi* smi_id = Smi::FromInt(id);
for (int i = 0; i < length; i++) {
- if (data->AstId(i) == smi_id) {
+ if (data->AstId(i) == id) {
return data->PcAndState(i)->value();
}
}
- PrintF("[couldn't find pc offset for node=%u]\n", id);
+ PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
// Print the source code if available.
HeapStringAllocator string_allocator;
@@ -543,7 +551,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
- unsigned node_id = input_data->AstId(bailout_id_)->value();
+ BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
@@ -581,7 +589,24 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::CONSTRUCT_STUB_FRAME:
DoComputeConstructStubFrame(&iterator, i);
break;
- default:
+ case Translation::GETTER_STUB_FRAME:
+ DoComputeAccessorStubFrame(&iterator, i, false);
+ break;
+ case Translation::SETTER_STUB_FRAME:
+ DoComputeAccessorStubFrame(&iterator, i, true);
+ break;
+ case Translation::BEGIN:
+ case Translation::REGISTER:
+ case Translation::INT32_REGISTER:
+ case Translation::UINT32_REGISTER:
+ case Translation::DOUBLE_REGISTER:
+ case Translation::STACK_SLOT:
+ case Translation::INT32_STACK_SLOT:
+ case Translation::UINT32_STACK_SLOT:
+ case Translation::DOUBLE_STACK_SLOT:
+ case Translation::LITERAL:
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::DUPLICATE:
UNREACHABLE();
break;
}
@@ -595,9 +620,9 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
function->PrintName();
- PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
- node_id,
+ node_id.ToInt(),
output_[index]->GetPc(),
FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>(
@@ -700,6 +725,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@@ -748,6 +775,34 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
+ case Translation::UINT32_REGISTER: {
+ int input_reg = iterator->Next();
+ uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (FLAG_trace_deopt) {
+ PrintF(
+ " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
+ " ; uint %s (%s)\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ value,
+ converter.NameOfCPURegister(input_reg),
+ is_smi ? "smi" : "heap number");
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else {
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+ static_cast<double>(static_cast<uint32_t>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ }
+ return;
+ }
+
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
@@ -813,6 +868,36 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset =
+ input_->GetOffsetFromSlotIndex(input_slot_index);
+ uintptr_t value =
+ static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": ",
+ output_[frame_index]->GetTop() + output_offset);
+ PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
+ output_offset,
+ value,
+ input_offset,
+ is_smi ? "smi" : "heap number");
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else {
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+ static_cast<double>(static_cast<uint32_t>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ }
+ return;
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
@@ -865,6 +950,56 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
+static bool ObjectToInt32(Object* obj, int32_t* value) {
+ if (obj->IsSmi()) {
+ *value = Smi::cast(obj)->value();
+ return true;
+ }
+
+ if (obj->IsHeapNumber()) {
+ double num = HeapNumber::cast(obj)->value();
+ if (FastI2D(FastD2I(num)) != num) {
+ if (FLAG_trace_osr) {
+ PrintF("**** %g could not be converted to int32 ****\n",
+ HeapNumber::cast(obj)->value());
+ }
+ return false;
+ }
+
+ *value = FastD2I(num);
+ return true;
+ }
+
+ return false;
+}
+
+
+static bool ObjectToUint32(Object* obj, uint32_t* value) {
+ if (obj->IsSmi()) {
+ if (Smi::cast(obj)->value() < 0) return false;
+
+ *value = static_cast<uint32_t>(Smi::cast(obj)->value());
+ return true;
+ }
+
+ if (obj->IsHeapNumber()) {
+ double num = HeapNumber::cast(obj)->value();
+ if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
+ if (FLAG_trace_osr) {
+ PrintF("**** %g could not be converted to uint32 ****\n",
+ HeapNumber::cast(obj)->value());
+ }
+ return false;
+ }
+
+ *value = FastD2UI(num);
+ return true;
+ }
+
+ return false;
+}
+
+
bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset) {
disasm::NameConverter converter;
@@ -887,6 +1022,8 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@@ -904,22 +1041,10 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
case Translation::INT32_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
+ int32_t int32_value = 0;
+ if (!ObjectToInt32(input_object, &int32_value)) return false;
int output_reg = iterator->Next();
- int int32_value = input_object->IsSmi()
- ? Smi::cast(input_object)->value()
- : FastD2I(input_object->Number());
- // Abort the translation if the conversion lost information.
- if (!input_object->IsSmi() &&
- FastI2D(int32_value) != input_object->Number()) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- input_object->Number());
- }
- return false;
- }
if (FLAG_trace_osr) {
PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
@@ -930,6 +1055,21 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
+ case Translation::UINT32_REGISTER: {
+ uint32_t uint32_value = 0;
+ if (!ObjectToUint32(input_object, &uint32_value)) return false;
+
+ int output_reg = iterator->Next();
+ if (FLAG_trace_osr) {
+ PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
+ converter.NameOfCPURegister(output_reg),
+ uint32_value,
+ *input_offset);
+ }
+ output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
+ }
+
+
case Translation::DOUBLE_REGISTER: {
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
@@ -963,24 +1103,12 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
case Translation::INT32_STACK_SLOT: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
+ int32_t int32_value = 0;
+ if (!ObjectToInt32(input_object, &int32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
- int int32_value = input_object->IsSmi()
- ? Smi::cast(input_object)->value()
- : DoubleToInt32(input_object->Number());
- // Abort the translation if the conversion lost information.
- if (!input_object->IsSmi() &&
- FastI2D(int32_value) != input_object->Number()) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- input_object->Number());
- }
- return false;
- }
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
@@ -991,6 +1119,23 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
+ case Translation::UINT32_STACK_SLOT: {
+ uint32_t uint32_value = 0;
+ if (!ObjectToUint32(input_object, &uint32_value)) return false;
+
+ int output_index = iterator->Next();
+ unsigned output_offset =
+ output->GetOffsetFromSlotIndex(output_index);
+ if (FLAG_trace_osr) {
+ PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
+ output_offset,
+ uint32_value,
+ *input_offset);
+ }
+ output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
+ break;
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
static const int kLowerOffset = 0 * kPointerSize;
static const int kUpperOffset = 1 * kPointerSize;
@@ -1342,6 +1487,18 @@ void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
}
+void Translation::BeginGetterStubFrame(int literal_id) {
+ buffer_->Add(GETTER_STUB_FRAME, zone());
+ buffer_->Add(literal_id, zone());
+}
+
+
+void Translation::BeginSetterStubFrame(int literal_id) {
+ buffer_->Add(SETTER_STUB_FRAME, zone());
+ buffer_->Add(literal_id, zone());
+}
+
+
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
buffer_->Add(literal_id, zone());
@@ -1349,9 +1506,11 @@ void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
}
-void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
+void Translation::BeginJSFrame(BailoutId node_id,
+ int literal_id,
+ unsigned height) {
buffer_->Add(JS_FRAME, zone());
- buffer_->Add(node_id, zone());
+ buffer_->Add(node_id.ToInt(), zone());
buffer_->Add(literal_id, zone());
buffer_->Add(height, zone());
}
@@ -1369,6 +1528,12 @@ void Translation::StoreInt32Register(Register reg) {
}
+void Translation::StoreUint32Register(Register reg) {
+ buffer_->Add(UINT32_REGISTER, zone());
+ buffer_->Add(reg.code(), zone());
+}
+
+
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER, zone());
buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
@@ -1387,6 +1552,12 @@ void Translation::StoreInt32StackSlot(int index) {
}
+void Translation::StoreUint32StackSlot(int index) {
+ buffer_->Add(UINT32_STACK_SLOT, zone());
+ buffer_->Add(index, zone());
+}
+
+
void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(DOUBLE_STACK_SLOT, zone());
buffer_->Add(index, zone());
@@ -1414,11 +1585,15 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case ARGUMENTS_OBJECT:
case DUPLICATE:
return 0;
+ case GETTER_STUB_FRAME:
+ case SETTER_STUB_FRAME:
case REGISTER:
case INT32_REGISTER:
+ case UINT32_REGISTER:
case DOUBLE_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
+ case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
@@ -1446,16 +1621,24 @@ const char* Translation::StringFor(Opcode opcode) {
return "ARGUMENTS_ADAPTOR_FRAME";
case CONSTRUCT_STUB_FRAME:
return "CONSTRUCT_STUB_FRAME";
+ case GETTER_STUB_FRAME:
+ return "GETTER_STUB_FRAME";
+ case SETTER_STUB_FRAME:
+ return "SETTER_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
return "INT32_REGISTER";
+ case UINT32_REGISTER:
+ return "UINT32_REGISTER";
case DOUBLE_REGISTER:
return "DOUBLE_REGISTER";
case STACK_SLOT:
return "STACK_SLOT";
case INT32_STACK_SLOT:
return "INT32_STACK_SLOT";
+ case UINT32_STACK_SLOT:
+ return "UINT32_STACK_SLOT";
case DOUBLE_STACK_SLOT:
return "DOUBLE_STACK_SLOT";
case LITERAL:
@@ -1502,6 +1685,8 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
// Peeled off before getting here.
break;
@@ -1511,6 +1696,7 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::REGISTER:
case Translation::INT32_REGISTER:
+ case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
@@ -1530,6 +1716,12 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
return SlotRef(slot_addr, SlotRef::INT32);
}
+ case Translation::UINT32_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::UINT32);
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
@@ -1569,7 +1761,7 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
int inlined_jsframe_index,
int formal_parameter_count) {
AssertNoAllocation no_gc;
- int deopt_index = AstNode::kNoNumber;
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 9e8a5491a2..cd33477e26 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -62,13 +62,13 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
virtual ~OptimizedFunctionVisitor() {}
// Function which is called before iteration of any optimized functions
- // from given global context.
+ // from given native context.
virtual void EnterContext(Context* context) = 0;
virtual void VisitFunction(JSFunction* function) = 0;
// Function which is called after iteration of all optimized functions
- // from given global context.
+ // from given native context.
virtual void LeaveContext(Context* context) = 0;
};
@@ -211,7 +211,7 @@ class Deoptimizer : public Malloced {
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
- unsigned node_id,
+ BailoutId node_id,
SharedFunctionInfo* shared);
// Code generation support.
@@ -284,6 +284,9 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index);
+ void DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@@ -559,12 +562,16 @@ class Translation BASE_EMBEDDED {
BEGIN,
JS_FRAME,
CONSTRUCT_STUB_FRAME,
+ GETTER_STUB_FRAME,
+ SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
REGISTER,
INT32_REGISTER,
+ UINT32_REGISTER,
DOUBLE_REGISTER,
STACK_SLOT,
INT32_STACK_SLOT,
+ UINT32_STACK_SLOT,
DOUBLE_STACK_SLOT,
LITERAL,
ARGUMENTS_OBJECT,
@@ -587,14 +594,18 @@ class Translation BASE_EMBEDDED {
int index() const { return index_; }
// Commands.
- void BeginJSFrame(int node_id, int literal_id, unsigned height);
+ void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
+ void BeginGetterStubFrame(int literal_id);
+ void BeginSetterStubFrame(int literal_id);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
+ void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
+ void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject();
@@ -608,6 +619,9 @@ class Translation BASE_EMBEDDED {
static const char* StringFor(Opcode opcode);
#endif
+ // A literal id which refers to the JSFunction itself.
+ static const int kSelfLiteralId = -239;
+
private:
TranslationBuffer* buffer_;
int index_;
@@ -641,6 +655,7 @@ class SlotRef BASE_EMBEDDED {
UNKNOWN,
TAGGED,
INT32,
+ UINT32,
DOUBLE,
LITERAL
};
@@ -668,6 +683,16 @@ class SlotRef BASE_EMBEDDED {
}
}
+ case UINT32: {
+ uint32_t value = Memory::uint32_at(addr_);
+ if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(value)));
+ } else {
+ return Isolate::Current()->factory()->NewNumber(
+ static_cast<double>(value));
+ }
+ }
+
case DOUBLE: {
double value = Memory::double_at(addr_);
return Isolate::Current()->factory()->NewNumber(value);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index e3b40ab93f..9f8b9a820b 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -244,8 +244,8 @@ static int DecodeIt(FILE* f,
out.AddFormatted(" %s, %s", Code::Kind2String(kind),
Code::ICState2String(ic_state));
if (ic_state == MONOMORPHIC) {
- PropertyType type = code->type();
- out.AddFormatted(", %s", Code::PropertyType2String(type));
+ Code::StubType type = code->type();
+ out.AddFormatted(", %s", Code::StubType2String(type));
}
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index f0e1414de4..4cb50a461d 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -800,7 +800,7 @@ class FastElementsAccessor
}
} else {
// Otherwise, fill the unused tail with holes.
- int old_length = FastD2I(array->length()->Number());
+ int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
backing_store->set_the_hole(i);
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 40ed7de414..330e41fbc9 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -100,7 +100,7 @@ static Handle<Object> Invoke(bool is_construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
- ASSERT(function->context()->global()->IsGlobalObject());
+ ASSERT(function->context()->global_object()->IsGlobalObject());
{
// Save and restore context around invocation and block the
@@ -165,10 +165,10 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (convert_receiver && !receiver->IsJSReceiver() &&
!func->shared()->native() && func->shared()->is_classic_mode()) {
if (receiver->IsUndefined() || receiver->IsNull()) {
- Object* global = func->context()->global()->global_receiver();
+ Object* global = func->context()->global_object()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
- // In that case, don't rewrite.
- // (FWIW, the same holds for GetIsolate()->global()->global_receiver().)
+ // In that case, don't rewrite. (FWIW, the same holds for
+ // GetIsolate()->global_object()->global_receiver().)
if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
} else {
receiver = ToObject(receiver, pending_exception);
@@ -184,7 +184,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global(), argc, argv,
+ return Invoke(true, func, Isolate::Current()->global_object(), argc, argv,
pending_exception);
}
@@ -246,7 +246,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_function_delegate());
+ isolate->native_context()->call_as_function_delegate());
}
return factory->undefined_value();
@@ -270,7 +270,7 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_function_delegate());
+ isolate->native_context()->call_as_function_delegate());
}
// If the Object doesn't have an instance-call handler we should
@@ -303,7 +303,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_constructor_delegate());
+ isolate->native_context()->call_as_constructor_delegate());
}
return isolate->factory()->undefined_value();
@@ -331,7 +331,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_constructor_delegate());
+ isolate->native_context()->call_as_constructor_delegate());
}
// If the Object doesn't have an instance-call handler we should
@@ -446,6 +446,25 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
+void StackGuard::RequestCodeReadyEvent() {
+ ASSERT(FLAG_parallel_recompilation);
+ if (ExecutionAccess::TryLock(isolate_)) {
+ thread_local_.interrupt_flags_ |= CODE_READY;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+ ExecutionAccess::Unlock(isolate_);
+ }
+}
+
+
+bool StackGuard::IsCodeReadyEvent() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
+}
+
+
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
@@ -661,7 +680,7 @@ Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags,
bool* exc) {
Handle<JSFunction> function = Handle<JSFunction>(
- pattern->GetIsolate()->global_context()->regexp_function());
+ pattern->GetIsolate()->native_context()->regexp_function());
Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
function, pattern, flags, exc);
if (*exc) return Handle<JSRegExp>();
@@ -707,7 +726,7 @@ Handle<JSFunction> Execution::InstantiateFunction(
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
Object* elm =
- isolate->global_context()->function_cache()->
+ isolate->native_context()->function_cache()->
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
@@ -832,6 +851,11 @@ Object* Execution::DebugBreakHelper() {
return isolate->heap()->undefined_value();
}
+ // Ignore debug break if debugger is not active.
+ if (!isolate->debugger()->IsDebuggerActive()) {
+ return isolate->heap()->undefined_value();
+ }
+
StackLimitCheck check(isolate);
if (check.HasOverflowed()) {
return isolate->heap()->undefined_value();
@@ -846,7 +870,7 @@ Object* Execution::DebugBreakHelper() {
if (JSFunction::cast(fun)->IsBuiltin()) {
return isolate->heap()->undefined_value();
}
- GlobalObject* global = JSFunction::cast(fun)->context()->global();
+ GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (isolate->debug()->IsDebugGlobal(global)) {
return isolate->heap()->undefined_value();
@@ -906,6 +930,17 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
+ if (stack_guard->IsCodeReadyEvent()) {
+ ASSERT(FLAG_parallel_recompilation);
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** CODE_READY event received.\n");
+ }
+ stack_guard->Continue(CODE_READY);
+ }
+ if (!stack_guard->IsTerminateExecution()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+
isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 01e4b9da4f..9f5d9ff2cd 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -42,7 +42,8 @@ enum InterruptFlag {
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5,
- GC_REQUEST = 1 << 6
+ GC_REQUEST = 1 << 6,
+ CODE_READY = 1 << 7
};
@@ -195,6 +196,8 @@ class StackGuard {
void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
+ bool IsCodeReadyEvent();
+ void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak();
void DebugBreak();
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
new file mode 100644
index 0000000000..7ae090c987
--- /dev/null
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -0,0 +1,153 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "statistics-extension.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const StatisticsExtension::kSource =
+ "native function getV8Statistics();";
+
+
+v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
+ return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
+}
+
+
+static void AddCounter(v8::Local<v8::Object> object,
+ StatsCounter* counter,
+ const char* name) {
+ if (counter->Enabled()) {
+ object->Set(v8::String::New(name),
+ v8::Number::New(*counter->GetInternalPointer()));
+ }
+}
+
+static void AddNumber(v8::Local<v8::Object> object,
+ intptr_t value,
+ const char* name) {
+ object->Set(v8::String::New(name),
+ v8::Number::New(static_cast<double>(value)));
+}
+
+
+v8::Handle<v8::Value> StatisticsExtension::GetCounters(
+ const v8::Arguments& args) {
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+
+ if (args.Length() > 0) { // GC if first argument evaluates to true.
+ if (args[0]->IsBoolean() && args[0]->ToBoolean()->Value()) {
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
+ }
+ }
+
+ Counters* counters = isolate->counters();
+ v8::Local<v8::Object> result = v8::Object::New();
+
+#define ADD_COUNTER(name, caption) \
+ AddCounter(result, counters->name(), #name);
+
+ STATS_COUNTER_LIST_1(ADD_COUNTER)
+ STATS_COUNTER_LIST_2(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ AddCounter(result, counters->count_of_##name(), "count_of_" #name); \
+ AddCounter(result, counters->size_of_##name(), "size_of_" #name);
+
+ INSTANCE_TYPE_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ AddCounter(result, counters->count_of_CODE_TYPE_##name(), \
+ "count_of_CODE_TYPE_" #name); \
+ AddCounter(result, counters->size_of_CODE_TYPE_##name(), \
+ "size_of_CODE_TYPE_" #name);
+
+ CODE_KIND_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ AddCounter(result, counters->count_of_FIXED_ARRAY_##name(), \
+ "count_of_FIXED_ARRAY_" #name); \
+ AddCounter(result, counters->size_of_FIXED_ARRAY_##name(), \
+ "size_of_FIXED_ARRAY_" #name);
+
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+
+ AddNumber(result, isolate->memory_allocator()->Size(),
+ "total_committed_bytes");
+ AddNumber(result, heap->new_space()->Size(),
+ "new_space_live_bytes");
+ AddNumber(result, heap->new_space()->Available(),
+ "new_space_available_bytes");
+ AddNumber(result, heap->new_space()->CommittedMemory(),
+ "new_space_commited_bytes");
+ AddNumber(result, heap->old_pointer_space()->Size(),
+ "old_pointer_space_live_bytes");
+ AddNumber(result, heap->old_pointer_space()->Available(),
+ "old_pointer_space_available_bytes");
+ AddNumber(result, heap->old_pointer_space()->CommittedMemory(),
+ "old_pointer_space_commited_bytes");
+ AddNumber(result, heap->old_data_space()->Size(),
+ "old_data_space_live_bytes");
+ AddNumber(result, heap->old_data_space()->Available(),
+ "old_data_space_available_bytes");
+ AddNumber(result, heap->old_data_space()->CommittedMemory(),
+ "old_data_space_commited_bytes");
+ AddNumber(result, heap->code_space()->Size(),
+ "code_space_live_bytes");
+ AddNumber(result, heap->code_space()->Available(),
+ "code_space_available_bytes");
+ AddNumber(result, heap->code_space()->CommittedMemory(),
+ "code_space_commited_bytes");
+ AddNumber(result, heap->cell_space()->Size(),
+ "cell_space_live_bytes");
+ AddNumber(result, heap->cell_space()->Available(),
+ "cell_space_available_bytes");
+ AddNumber(result, heap->cell_space()->CommittedMemory(),
+ "cell_space_commited_bytes");
+ AddNumber(result, heap->lo_space()->Size(),
+ "lo_space_live_bytes");
+ AddNumber(result, heap->lo_space()->Available(),
+ "lo_space_available_bytes");
+ AddNumber(result, heap->lo_space()->CommittedMemory(),
+ "lo_space_commited_bytes");
+ AddNumber(result, heap->amount_of_external_allocated_memory(),
+ "amount_of_external_allocated_memory");
+ return result;
+}
+
+
+void StatisticsExtension::Register() {
+ static StatisticsExtension statistics_extension;
+ static v8::DeclareExtension declaration(&statistics_extension);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
new file mode 100644
index 0000000000..433c4cf687
--- /dev/null
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
+#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class StatisticsExtension : public v8::Extension {
+ public:
+ StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
+ static void Register();
+ private:
+ static const char* const kSource;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 28b318a8f4..462af590d2 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -115,8 +115,7 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors,
- DescriptorArray::MAY_BE_SHARED),
+ DescriptorArray::Allocate(number_of_descriptors),
DescriptorArray);
}
@@ -285,19 +284,27 @@ Handle<String> Factory::NewExternalStringFromTwoByte(
}
-Handle<Context> Factory::NewGlobalContext() {
+Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateGlobalContext(),
+ isolate()->heap()->AllocateNativeContext(),
Context);
}
-Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
+Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
+ isolate()->heap()->AllocateGlobalContext(*function, *scope_info),
+ Context);
+}
+
+
+Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateModuleContext(*scope_info),
Context);
}
@@ -466,14 +473,15 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
-Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->CopyDropDescriptors(), Map);
+Handle<Map> Factory::CopyWithPreallocatedFieldDescriptors(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(
+ isolate(), src->CopyWithPreallocatedFieldDescriptors(), Map);
}
Handle<Map> Factory::CopyMap(Handle<Map> src,
int extra_inobject_properties) {
- Handle<Map> copy = CopyMapDropDescriptors(src);
+ Handle<Map> copy = CopyWithPreallocatedFieldDescriptors(src);
// Check that we do not overflow the instance size when adding the
// extra inobject properties.
int instance_size_delta = extra_inobject_properties * kPointerSize;
@@ -496,10 +504,8 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
}
-Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(),
- src->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED),
- Map);
+Handle<Map> Factory::CopyMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(isolate(), src->Copy(), Map);
}
@@ -554,18 +560,27 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
}
result->set_context(*context);
- if (!function_info->bound()) {
+
+ int index = function_info->SearchOptimizedCodeMap(context->native_context());
+ if (!function_info->bound() && index < 0) {
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
- // Store the object, regexp and array functions in the literals
- // array prefix. These functions will be used when creating
- // object, regexp and array literals in this function.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
+ // Store the native context in the literals array prefix. This
+ // context will be used when creating object, regexp and array
+ // literals in this function.
+ literals->set(JSFunction::kLiteralNativeContextIndex,
+ context->native_context());
}
result->set_literals(*literals);
}
+
+ if (index > 0) {
+ // Caching of optimized code enabled and optimized code found.
+ function_info->InstallFromOptimizedCodeMap(*result, index);
+ return result;
+ }
+
if (V8::UseCrankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
@@ -699,7 +714,7 @@ Handle<String> Factory::EmergencyNewError(const char* type,
MaybeObject* maybe_arg = args->GetElement(i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
- Vector<char> v2(p, space);
+ Vector<char> v2(p, static_cast<int>(space));
OS::StrNCpy(v2, arg, space);
space -= Min(space, strlen(arg));
p = &buffer[kBufferSize] - space;
@@ -879,97 +894,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-MUST_USE_RESULT static inline MaybeObject* DoCopyInsert(
- DescriptorArray* array,
- String* key,
- Object* value,
- PropertyAttributes attributes) {
- CallbacksDescriptor desc(key, value, attributes);
- MaybeObject* obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
- return obj;
-}
-
-
-// Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(isolate(),
- DoCopyInsert(*array, *key, *value, attributes),
- DescriptorArray);
-}
-
-
Handle<String> Factory::SymbolFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->LookupSymbol(*value), String);
}
-Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors) {
- v8::NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
- Handle<DescriptorArray> result =
- NewDescriptorArray(array->number_of_descriptors() + nof_callbacks);
-
- // Number of descriptors added to the result so far.
- int descriptor_count = 0;
-
- // Ensure that marking will not progress and change color of objects.
- DescriptorArray::WhitenessWitness witness(*result);
-
- // Copy the descriptors from the array.
- for (int i = 0; i < array->number_of_descriptors(); i++) {
- if (!array->IsNullDescriptor(i)) {
- DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
- }
- }
-
- // Number of duplicates detected.
- int duplicates = 0;
-
- // Fill in new callback descriptors. Process the callbacks from
- // back to front so that the last callback with a given name takes
- // precedence over previously added callbacks with that name.
- for (int i = nof_callbacks - 1; i >= 0; i--) {
- Handle<AccessorInfo> entry =
- Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
- // Ensure the key is a symbol before writing into the instance descriptor.
- Handle<String> key =
- SymbolFromString(Handle<String>(String::cast(entry->name())));
- // Check if a descriptor with this name already exists before writing.
- if (result->LinearSearch(EXPECT_UNSORTED, *key, descriptor_count) ==
- DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
- result->Set(descriptor_count, &desc, witness);
- descriptor_count++;
- } else {
- duplicates++;
- }
- }
-
- // If duplicates were detected, allocate a result of the right size
- // and transfer the elements.
- if (duplicates > 0) {
- int number_of_descriptors = result->number_of_descriptors() - duplicates;
- Handle<DescriptorArray> new_result =
- NewDescriptorArray(number_of_descriptors);
- for (int i = 0; i < number_of_descriptors; i++) {
- DescriptorArray::CopyFrom(new_result, i, result, i, witness);
- }
- result = new_result;
- }
-
- // Sort the result before returning.
- result->Sort(witness);
- return result;
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
@@ -978,10 +908,11 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
-Handle<JSModule> Factory::NewJSModule() {
+Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
+ Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSModule(), JSModule);
+ isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule);
}
@@ -1088,7 +1019,7 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
-void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
+void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
object->SetIdentityHash(hash, ALLOW_CREATION));
@@ -1188,7 +1119,7 @@ Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<Object> prototype) {
Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(isolate()->context()->global_context());
+ fun->set_context(isolate()->context()->native_context());
return fun;
}
@@ -1214,7 +1145,7 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
LanguageMode language_mode) {
Handle<JSFunction> fun =
NewFunctionWithoutPrototypeHelper(name, language_mode);
- fun->set_context(isolate()->context()->global_context());
+ fun->set_context(isolate()->context()->native_context());
return fun;
}
@@ -1225,8 +1156,8 @@ Handle<Object> Factory::ToObject(Handle<Object> object) {
Handle<Object> Factory::ToObject(Handle<Object> object,
- Handle<Context> global_context) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(*global_context), Object);
+ Handle<Context> native_context) {
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(*native_context), Object);
}
@@ -1353,20 +1284,15 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified.
- Handle<DescriptorArray> array =
- Handle<DescriptorArray>(map->instance_descriptors());
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
- array = CopyAppendCallbackDescriptors(array, props);
+ Map::CopyAppendCallbackDescriptors(map, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
- if (!array->IsEmpty()) {
- map->set_instance_descriptors(*array);
- }
ASSERT(result->shared()->IsApiFunction());
return result;
@@ -1403,7 +1329,7 @@ Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys) {
if (context->map_cache()->IsUndefined()) {
- // Allocate the new map cache for the global context.
+ // Allocate the new map cache for the native context.
Handle<MapCache> new_cache = NewMapCache(24);
context->set_map_cache(*new_cache);
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index bb435456b0..e617abb6d1 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -160,12 +160,15 @@ class Factory {
const ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
- Handle<Context> NewGlobalContext();
+ Handle<Context> NewNativeContext();
- // Create a module context.
- Handle<Context> NewModuleContext(Handle<Context> previous,
+ // Create a global context.
+ Handle<Context> NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info);
+ // Create a module context.
+ Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
+
// Create a function context.
Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
@@ -223,13 +226,12 @@ class Factory {
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
- Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
+ Handle<Map> CopyWithPreallocatedFieldDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without
// overflowing the instance size.
Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
-
- Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+ Handle<Map> CopyMap(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind elements_kind);
@@ -267,7 +269,8 @@ class Factory {
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS modules are pretenured.
- Handle<JSModule> NewJSModule();
+ Handle<JSModule> NewJSModule(Handle<Context> context,
+ Handle<ScopeInfo> scope_info);
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(
@@ -298,7 +301,7 @@ class Factory {
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
- void SetIdentityHash(Handle<JSObject> object, Object* hash);
+ void SetIdentityHash(Handle<JSObject> object, Smi* hash);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@@ -332,7 +335,7 @@ class Factory {
Handle<Object> ToObject(Handle<Object> object);
Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> global_context);
+ Handle<Context> native_context);
// Interface for creating error objects.
@@ -386,12 +389,6 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code);
- Handle<DescriptorArray> CopyAppendForeignDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
@@ -464,7 +461,7 @@ class Factory {
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
- // Return a map using the map cache in the global context.
+ // Return a map using the map cache in the native context.
// The key the an ordered set of property names.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys);
@@ -503,14 +500,10 @@ class Factory {
Handle<String> name,
LanguageMode language_mode);
- Handle<DescriptorArray> CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors);
-
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
- // Update the map cache in the global context with (keys, map)
+ // Update the map cache in the native context with (keys, map)
Handle<MapCache> AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 2b4c53cd2d..d3ea89bdb7 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -132,9 +132,9 @@ public:
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es5_readonly, false,
+DEFINE_bool(es5_readonly, true,
"activate correct semantics for inheriting readonliness")
-DEFINE_bool(es52_globals, false,
+DEFINE_bool(es52_globals, true,
"activate new semantics for global var declarations")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
@@ -152,7 +152,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
-DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
+DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@@ -206,12 +206,27 @@ DEFINE_bool(array_index_dehoisting, false,
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
+DEFINE_bool(lookup_sample_by_shared, true,
+ "when picking a function to optimize, watch for shared function "
+ "info, not JSFunction itself")
+DEFINE_bool(cache_optimized_code, true,
+ "cache optimized code for closures")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
+DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
+DEFINE_bool(opt_safe_uint32_operations, true,
+ "allow uint32 values on optimize frames if they are used only in"
+ "safe operations")
+
+DEFINE_bool(parallel_recompilation, false,
+ "optimizing hot functions asynchronously on a separate thread")
+DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
+DEFINE_int(parallel_recompilation_queue_length, 2,
+ "the length of the parallel compilation queue")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@@ -228,7 +243,8 @@ DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
-DEFINE_int(interrupt_budget, 5900,
+ // 0x1700 fits in the immediate field of an ARM instruction.
+DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow optimization")
@@ -263,7 +279,9 @@ DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies "
- "enabling ARMv7 instructions (ARM only)")
+ "enabling ARMv7 and VFP2 instructions (ARM only)")
+DEFINE_bool(enable_vfp2, true,
+ "enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true,
@@ -307,8 +325,8 @@ DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
-DEFINE_bool(trace_bailout, false,
- "print reasons for falling back to using the classic V8 backend")
+DEFINE_int(max_opt_count, 10,
+ "maximum number of optimization attempts before giving up.")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -348,12 +366,17 @@ DEFINE_bool(trace_gc, false,
DEFINE_bool(trace_gc_nvp, false,
"print one detailed trace line in name=value format "
"after each garbage collection")
+DEFINE_bool(trace_gc_ignore_scavenger, false,
+ "do not print trace line after scavenger collection")
DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
+DEFINE_bool(trace_external_memory, false,
+ "print amount of external allocated memory after each time "
+ "it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
@@ -362,13 +385,12 @@ DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
+DEFINE_bool(track_gc_object_stats, false,
+ "track object counts and memory usage")
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
-
-DEFINE_bool(send_idle_notification, false,
- "Send idle notifcation between stress runs.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
@@ -402,6 +424,7 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
@@ -446,6 +469,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
"file in which to serialize heap")
#endif
+// mksnapshot.cc
+DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
+ " the snapshot (mksnapshot only)")
+
//
// Dev shell flags
//
@@ -532,6 +559,8 @@ DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
+DEFINE_bool(verify_native_context_separation, false,
+ "verify that code holds on to at most one native context after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
@@ -604,6 +633,8 @@ DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
+DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
+ "Specify the name of the file for fake gc mmap used in ll_prof")
//
// Disassembler only flags
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 5720cbda34..bca0eff58d 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -31,7 +31,7 @@
#include "v8.h"
#include "platform.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "string-stream.h"
@@ -343,6 +343,7 @@ static Flag* FindFlag(const char* name) {
int FlagList::SetFlagsFromCommandLine(int* argc,
char** argv,
bool remove_flags) {
+ int return_code = 0;
// parse arguments
for (int i = 1; i < *argc;) {
int j = i; // j > 0
@@ -368,7 +369,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
} else {
fprintf(stderr, "Error: unrecognized flag %s\n"
"Try --help for options\n", arg);
- return j;
+ return_code = j;
+ break;
}
}
@@ -382,7 +384,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
fprintf(stderr, "Error: missing value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
- return j;
+ return_code = j;
+ break;
}
}
@@ -424,7 +427,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
- return j;
+ return_code = j;
+ break;
}
// remove the flag & value from the command
@@ -451,7 +455,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
exit(0);
}
// parsed all flags successfully
- return 0;
+ return return_code;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index b7e028634f..18dc54164a 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -789,7 +789,7 @@ void JavaScriptFrame::PrintTop(FILE* file,
ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", *c_script_name, line);
} else {
- PrintF(file, "at <unknown>:%d", line);
+ PrintF(file, " at <unknown>:%d", line);
}
} else {
PrintF(file, " at <unknown>:<unknown>");
@@ -832,12 +832,23 @@ void FrameSummary::Print() {
}
+JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
+ int literal_id) {
+ if (literal_id == Translation::kSelfLiteralId) {
+ return JSFunction::cast(function());
+ }
+
+ return JSFunction::cast(literal_array->get(literal_id));
+}
+
+
void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
ASSERT(frames->length() == 0);
ASSERT(is_optimized());
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+ FixedArray* literal_array = data->LiteralArray();
// BUG(3243555): Since we don't have a lazy-deopt registered at
// throw-statements, we can't use the translation at the call-site of
@@ -864,11 +875,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::JS_FRAME) {
i--;
- int ast_id = it.Next();
- int function_id = it.Next();
+ BailoutId ast_id = BailoutId(it.Next());
+ JSFunction* function = LiteralAt(literal_array, it.Next());
it.Next(); // Skip height.
- JSFunction* function =
- JSFunction::cast(data->LiteralArray()->get(function_id));
// The translation commands are ordered and the receiver is always
// at the first position. Since we are always at a call when we need
@@ -975,6 +984,7 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+ FixedArray* literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
@@ -990,10 +1000,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
if (opcode == Translation::JS_FRAME) {
jsframe_count--;
it.Next(); // Skip ast id.
- int function_id = it.Next();
+ JSFunction* function = LiteralAt(literal_array, it.Next());
it.Next(); // Skip height.
- JSFunction* function =
- JSFunction::cast(data->LiteralArray()->get(function_id));
functions->Add(function);
} else {
// Skip over operands to advance to the next opcode.
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 2d45932d09..30f7e1f00e 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -577,6 +577,8 @@ class OptimizedFrame : public JavaScriptFrame {
inline explicit OptimizedFrame(StackFrameIterator* iterator);
private:
+ JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
+
friend class StackFrameIterator;
};
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 4da4e531ee..9592e0afa2 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -36,6 +36,7 @@
#include "prettyprinter.h"
#include "scopes.h"
#include "scopeinfo.h"
+#include "snapshot.h"
#include "stub-cache.h"
namespace v8 {
@@ -303,7 +304,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
- FullCodeGenerator cgen(&masm, info, isolate->zone());
+ FullCodeGenerator cgen(&masm, info);
cgen.Generate();
if (cgen.HasStackOverflow()) {
ASSERT(!isolate->has_pending_exception());
@@ -315,7 +316,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize) &&
- info->function()->scope()->AllowsLazyRecompilation());
+ info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);
@@ -352,7 +353,7 @@ unsigned FullCodeGenerator::EmitStackCheckTable() {
unsigned length = stack_checks_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
- __ dd(stack_checks_[i].id);
+ __ dd(stack_checks_[i].id.ToInt());
__ dd(stack_checks_[i].pc_and_state);
}
return offset;
@@ -367,7 +368,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationOutputData> data = isolate()->factory()->
NewDeoptimizationOutputData(length, TENURED);
for (int i = 0; i < length; i++) {
- data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
+ data->SetAstId(i, bailout_entries_[i].id);
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
}
code->set_deoptimization_data(*data);
@@ -382,6 +383,20 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
}
+void FullCodeGenerator::Initialize() {
+ // The generation of debug code must match between the snapshot code and the
+ // code that is generated later. This is assumed by the debugger when it is
+ // calculating PC offsets after generating a debug version of code. Therefore
+ // we disable the production of debug code in the full compiler if we are
+ // either generating a snapshot or we booted from a snapshot.
+ generate_debug_code_ = FLAG_debug_code &&
+ !Serializer::enabled() &&
+ !Snapshot::HaveASnapshotToStartFrom();
+ masm_->set_emit_debug_code(generate_debug_code_);
+ masm_->set_predictable_code_size(true);
+}
+
+
void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
if (type_feedback_cells_.is_empty()) return;
int length = type_feedback_cells_.length();
@@ -389,7 +404,7 @@ void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
isolate()->factory()->NewFixedArray(array_size, TENURED));
for (int i = 0; i < length; i++) {
- cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
+ cache->SetAstId(i, type_feedback_cells_[i].ast_id);
cache->SetCell(i, *type_feedback_cells_[i].cell);
}
TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
@@ -420,7 +435,7 @@ void FullCodeGenerator::RecordJSReturnSite(Call* call) {
}
-void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
// There's no need to prepare this code for bailouts from already optimized
// code or code that can't be optimized.
if (!info_->HasDeoptimizationSupport()) return;
@@ -445,13 +460,13 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
void FullCodeGenerator::RecordTypeFeedbackCell(
- unsigned id, Handle<JSGlobalPropertyCell> cell) {
+ TypeFeedbackId id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry, zone());
}
-void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
+void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a
// state.
ASSERT(masm_->pc_offset() > 0);
@@ -589,27 +604,20 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
- Handle<JSModule> instance = module->interface()->Instance();
- ASSERT(!instance.is_null());
-
// Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ Interface* interface = module->interface();
+ Handle<JSModule> instance = interface->Instance();
- // Generate code for module creation and linking.
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
- if (scope_info->HasContext()) {
- // Set up module context.
- __ Push(scope_info);
- __ Push(instance);
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
- }
+ // Set up module context.
+ __ Push(instance);
+ __ CallRuntime(Runtime::kPushModuleContext, 1);
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
Comment cmnt(masm_, "[ Declarations");
@@ -617,42 +625,21 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
}
scope_ = saved_scope;
- if (scope_info->HasContext()) {
- // Pop module context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
- }
-
- // Populate module instance object.
- const PropertyAttributes attr =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
- for (Interface::Iterator it = module->interface()->iterator();
- !it.done(); it.Advance()) {
- if (it.interface()->IsModule()) {
- Handle<Object> value = it.interface()->Instance();
- ASSERT(!value.is_null());
- JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
- } else {
- // TODO(rossberg): set proper getters instead of undefined...
- // instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
- Handle<Object> value(isolate()->heap()->undefined_value());
- JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
- }
- }
- USE(instance->PreventExtensions());
+ // Pop module context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
- // Noting to do.
+ // Nothing to do.
// The instance object is resolved statically through the module's interface.
}
void FullCodeGenerator::VisitModulePath(ModulePath* module) {
- // Noting to do.
+ // Nothing to do.
// The instance object is resolved statically through the module's interface.
}
@@ -822,7 +809,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
Expression* left = expr->left();
Expression* right = expr->right();
- int right_id = expr->RightId();
+ BailoutId right_id = expr->RightId();
Label done;
if (context()->IsTest()) {
@@ -916,25 +903,36 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Scope* saved_scope = scope();
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
- { Comment cmnt(masm_, "[ Extend block context");
- scope_ = stmt->scope();
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
- PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ scope_ = stmt->scope();
+ if (scope_->is_module_scope()) {
+ // If this block is a module body, then we have already allocated and
+ // initialized the declarations earlier. Just push the context.
+ ASSERT(!scope_->interface()->Instance().is_null());
+ __ Push(scope_->interface()->Instance());
+ __ CallRuntime(Runtime::kPushModuleContext, 1);
+ StoreToFrameField(
+ StandardFrameConstants::kContextOffset, context_register());
+ } else {
+ { Comment cmnt(masm_, "[ Extend block context");
+ Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ int heap_slots =
+ scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+ __ Push(scope_info);
+ PushFunctionArgumentForContextAllocation();
+ if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+ FastNewBlockContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
+ }
+
+ // Replace the context stored in the frame.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope_->declarations());
}
-
- // Replace the context stored in the frame.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
}
}
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 928de47b31..89b51f9582 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -77,8 +77,7 @@ class FullCodeGenerator: public AstVisitor {
TOS_REG
};
- FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
- Zone* zone)
+ FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
: masm_(masm),
info_(info),
scope_(info->scope()),
@@ -87,12 +86,18 @@ class FullCodeGenerator: public AstVisitor {
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0, zone),
- stack_checks_(2, zone), // There's always at least one.
+ ? info->function()->ast_node_count() : 0,
+ info->zone()),
+ stack_checks_(2, info->zone()), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0, zone),
+ ? info->function()->ast_node_count() : 0,
+ info->zone()),
ic_total_count_(0),
- zone_(zone) { }
+ zone_(info->zone()) {
+ Initialize();
+ }
+
+ void Initialize();
static bool MakeCode(CompilationInfo* info);
@@ -112,6 +117,21 @@ class FullCodeGenerator: public AstVisitor {
Zone* zone() const { return zone_; }
+ static const int kMaxBackEdgeWeight = 127;
+
+#if V8_TARGET_ARCH_IA32
+ static const int kBackEdgeDistanceUnit = 100;
+#elif V8_TARGET_ARCH_X64
+ static const int kBackEdgeDistanceUnit = 162;
+#elif V8_TARGET_ARCH_ARM
+ static const int kBackEdgeDistanceUnit = 142;
+#elif V8_TARGET_ARCH_MIPS
+ static const int kBackEdgeDistanceUnit = 142;
+#else
+#error Unsupported target architecture.
+#endif
+
+
private:
class Breakable;
class Iteration;
@@ -397,11 +417,12 @@ class FullCodeGenerator: public AstVisitor {
// Bailout support.
void PrepareForBailout(Expression* node, State state);
- void PrepareForBailoutForId(unsigned id, State state);
+ void PrepareForBailoutForId(BailoutId id, State state);
// Cache cell support. This associates AST ids with global property cells
// that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(unsigned id, Handle<JSGlobalPropertyCell> cell);
+ void RecordTypeFeedbackCell(TypeFeedbackId id,
+ Handle<JSGlobalPropertyCell> cell);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -428,7 +449,7 @@ class FullCodeGenerator: public AstVisitor {
// of code inside the loop.
void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
// Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(unsigned osr_ast_id);
+ void RecordStackCheck(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@@ -519,7 +540,7 @@ class FullCodeGenerator: public AstVisitor {
void CallIC(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -590,12 +611,12 @@ class FullCodeGenerator: public AstVisitor {
Handle<FixedArray> handler_table() { return handler_table_; }
struct BailoutEntry {
- unsigned id;
+ BailoutId id;
unsigned pc_and_state;
};
struct TypeFeedbackCellEntry {
- unsigned ast_id;
+ TypeFeedbackId ast_id;
Handle<JSGlobalPropertyCell> cell;
};
@@ -790,6 +811,7 @@ class FullCodeGenerator: public AstVisitor {
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
+ bool generate_debug_code_;
Zone* zone_;
friend class NestedStatement;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 97b033f848..00ecd63d2a 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -203,6 +203,7 @@ typedef byte* Address;
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
+#define V8PRIuPTR V8_PTR_PREFIX "u"
// Fix for Mac OS X defining uintptr_t as "unsigned long":
#if defined(__APPLE__) && defined(__MACH__)
@@ -360,6 +361,20 @@ F FUNCTION_CAST(Address addr) {
#define MUST_USE_RESULT
#endif
+
+// Define DISABLE_ASAN macros.
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
+#endif
+#endif
+
+
+#ifndef DISABLE_ASAN
+#define DISABLE_ASAN
+#endif
+
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index a5c81cec56..130798647b 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -149,25 +149,31 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
+ Isolate* isolate = Isolate::Current();
v8::ImplementationUtilities::HandleScopeData* current =
- Isolate::Current()->handle_scope_data();
+ isolate->handle_scope_data();
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ current->limit = current->next;
- level_ = current->level;
- current->level = 0;
+ level_ = current->level;
+ current->level = 0;
+ }
}
inline NoHandleAllocation::~NoHandleAllocation() {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- Isolate::Current()->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
+ if (active_) {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ v8::ImplementationUtilities::HandleScopeData* data =
+ Isolate::Current()->handle_scope_data();
+ ASSERT_EQ(0, data->level);
+ data->level = level_;
+ }
}
#endif
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index def1604ac7..6aa7a6a876 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -165,7 +165,7 @@ void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) {
Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMapDropTransitions(
+ func->GetIsolate()->factory()->CopyMap(
Handle<Map>(func->initial_map()));
new_initial_map->set_unused_property_fields(nof);
func->set_initial_map(*new_initial_map);
@@ -561,6 +561,9 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
result = enum_fun(info);
}
}
+#if ENABLE_EXTRA_CHECKS
+ CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+#endif
return result;
}
@@ -581,6 +584,9 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
// Leaving JavaScript.
VMState state(isolate, EXTERNAL);
result = enum_fun(info);
+#if ENABLE_EXTRA_CHECKS
+ CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+#endif
}
}
return result;
@@ -604,7 +610,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->global_context()->arguments_boilerplate(),
+ isolate->context()->native_context()->arguments_boilerplate(),
isolate);
Handle<JSFunction> arguments_function = Handle<JSFunction>(
JSFunction::cast(arguments_boilerplate->map()->constructor()),
@@ -701,75 +707,106 @@ Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result) {
- int index = 0;
Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
- isolate->counters()->enum_cache_hits()->Increment();
+ int own_property_count = object->map()->EnumLength();
+
+ // Mark that we have an enum cache if we are allowed to cache it.
+ if (cache_result && own_property_count == Map::kInvalidEnumCache) {
+ int num_enum = object->map()->NumberOfDescribedProperties(DONT_ENUM);
+ object->map()->SetEnumLength(num_enum);
+ }
+
DescriptorArray* desc = object->map()->instance_descriptors();
- return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()),
- isolate);
+ Handle<FixedArray> keys(FixedArray::cast(desc->GetEnumCache()), isolate);
+
+ isolate->counters()->enum_cache_hits()->Increment();
+ return keys;
}
- isolate->counters()->enum_cache_misses()->Increment();
+
Handle<Map> map(object->map());
- int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
+ if (map->instance_descriptors()->IsEmpty()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_result) map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
- Handle<FixedArray> indices;
- Handle<FixedArray> sort_array2;
+ isolate->counters()->enum_cache_misses()->Increment();
- if (cache_result) {
- indices = isolate->factory()->NewFixedArray(num_enum);
- sort_array2 = isolate->factory()->NewFixedArray(num_enum);
- }
+ int num_enum = map->NumberOfDescribedProperties(DONT_ENUM);
+
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
+ Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
+ int index = 0;
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (!details.IsDontEnum()) {
storage->set(index, descs->GetKey(i));
- PropertyDetails details = descs->GetDetails(i);
- sort_array->set(index, Smi::FromInt(details.index()));
if (!indices.is_null()) {
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
- sort_array2 = Handle<FixedArray>();
} else {
int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
indices->set(index, Smi::FromInt(field_index));
- sort_array2->set(index, Smi::FromInt(details.index()));
}
}
index++;
}
}
- storage->SortPairs(*sort_array, sort_array->length());
- if (!indices.is_null()) {
- indices->SortPairs(*sort_array2, sort_array2->length());
- }
+ ASSERT(index == storage->length());
+
+ Handle<FixedArray> bridge_storage =
+ isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ desc->SetEnumCache(*bridge_storage,
+ *storage,
+ indices.is_null() ? Object::cast(Smi::FromInt(0))
+ : Object::cast(*indices));
if (cache_result) {
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage,
- *storage,
- indices.is_null() ? Object::cast(Smi::FromInt(0))
- : Object::cast(*indices));
+ object->map()->SetEnumLength(index);
}
- ASSERT(storage->length() == index);
return storage;
} else {
- int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
- object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
+ Handle<StringDictionary> dictionary(object->property_dictionary());
+
+ int length = dictionary->NumberOfElements();
+ if (length == 0) {
+ return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+ }
+
+ // The enumeration array is generated by allocating an array big enough to
+ // hold all properties that have been seen, whether they are are deleted or
+ // not. Subsequently all visible properties are added to the array. If some
+ // properties were not visible, the array is trimmed so it only contains
+ // visible properties. This improves over adding elements and sorting by
+ // index by having linear complexity rather than n*log(n).
+
+ // By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
+ // we can predict the number of holes in the final array. If there will be
+ // more than 50% holes, regenerate the enumeration indices to reduce the
+ // number of holes to a minimum. This avoids allocating a large array if
+ // many properties were added but subsequently deleted.
+ int next_enumeration = dictionary->NextEnumerationIndex();
+ if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
+ StringDictionary::DoGenerateNewEnumerationIndices(dictionary);
+ next_enumeration = dictionary->NextEnumerationIndex();
+ }
+
+ Handle<FixedArray> storage =
+ isolate->factory()->NewFixedArray(next_enumeration);
+
+ storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
+ ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_ENUM));
return storage;
}
}
@@ -958,4 +995,47 @@ int Utf8Length(Handle<String> str) {
return len;
}
+
+DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
+ : impl_(isolate->handle_scope_implementer()) {
+ ASSERT(impl_->isolate() == Isolate::Current());
+ impl_->BeginDeferredScope();
+ v8::ImplementationUtilities::HandleScopeData* data =
+ impl_->isolate()->handle_scope_data();
+ Object** new_next = impl_->GetSpareOrNewBlock();
+ Object** new_limit = &new_next[kHandleBlockSize];
+ ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
+ impl_->blocks()->Add(new_next);
+
+#ifdef DEBUG
+ prev_level_ = data->level;
+#endif
+ data->level++;
+ prev_limit_ = data->limit;
+ prev_next_ = data->next;
+ data->next = new_next;
+ data->limit = new_limit;
+}
+
+
+DeferredHandleScope::~DeferredHandleScope() {
+ impl_->isolate()->handle_scope_data()->level--;
+ ASSERT(handles_detached_);
+ ASSERT(impl_->isolate()->handle_scope_data()->level == prev_level_);
+}
+
+
+DeferredHandles* DeferredHandleScope::Detach() {
+ DeferredHandles* deferred = impl_->Detach(prev_limit_);
+ v8::ImplementationUtilities::HandleScopeData* data =
+ impl_->isolate()->handle_scope_data();
+ data->next = prev_next_;
+ data->limit = prev_limit_;
+#ifdef DEBUG
+ handles_detached_ = true;
+#endif
+ return deferred;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 960696b5fb..b35693e95f 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -95,6 +95,10 @@ class Handle {
};
+class DeferredHandles;
+class HandleScopeImplementer;
+
+
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
// allocated within that handle scope until either the handle scope is
@@ -156,8 +160,37 @@ class HandleScope {
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
+ friend class v8::internal::DeferredHandles;
friend class v8::HandleScope;
+ friend class v8::internal::HandleScopeImplementer;
friend class v8::ImplementationUtilities;
+ friend class v8::internal::Isolate;
+};
+
+
+class DeferredHandles;
+
+
+class DeferredHandleScope {
+ public:
+ explicit DeferredHandleScope(Isolate* isolate);
+ // The DeferredHandles object returned stores the Handles created
+ // since the creation of this DeferredHandleScope. The Handles are
+ // alive as long as the DeferredHandles object is alive.
+ DeferredHandles* Detach();
+ ~DeferredHandleScope();
+
+ private:
+ Object** prev_limit_;
+ Object** prev_next_;
+ HandleScopeImplementer* impl_;
+
+#ifdef DEBUG
+ bool handles_detached_;
+ int prev_level_;
+#endif
+
+ friend class HandleScopeImplementer;
};
@@ -216,7 +249,7 @@ Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
// if none exists.
Handle<JSValue> GetScriptWrapper(Handle<Script> script);
-// Script line number computations.
+// Script line number computations. Note that the line number is zero-based.
void InitScriptLineEnds(Handle<Script> script);
// For string calculates an array of line end positions. If the string
// does not end with a new line character, this character may optionally be
@@ -294,6 +327,7 @@ class NoHandleAllocation BASE_EMBEDDED {
inline ~NoHandleAllocation();
private:
int level_;
+ bool active_;
#endif
};
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index 6f76e9f7f4..11f6ace7d8 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -59,7 +59,8 @@ class TemplateHashMapImpl {
struct Entry {
void* key;
void* value;
- uint32_t hash; // the full hash value for key
+ uint32_t hash; // The full hash value for key
+ int order; // If you never remove entries this is the insertion order.
};
// If an entry with matching key is found, Lookup()
@@ -140,6 +141,7 @@ TemplateHashMapImpl<AllocationPolicy>::Lookup(
p->key = key;
p->value = NULL;
p->hash = hash;
+ p->order = occupancy_;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
@@ -297,7 +299,9 @@ void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
- Lookup(p->key, p->hash, true, allocator)->value = p->value;
+ Entry* entry = Lookup(p->key, p->hash, true, allocator);
+ entry->value = p->value;
+ entry->order = p->order;
n--;
}
}
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 9d79db2466..4a827fef17 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -468,10 +468,12 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
+ } else {
+ // Give up and reset the counters in case of an overflow.
+ amount_of_external_allocated_memory_ = 0;
+ amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
- intptr_t amount_since_last_global_gc =
- amount_of_external_allocated_memory_ -
- amount_of_external_allocated_memory_at_last_global_gc_;
+ intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
@@ -479,8 +481,19 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
// Avoid underflow.
if (amount >= 0) {
amount_of_external_allocated_memory_ = amount;
+ } else {
+ // Give up and reset the counters in case of an overflow.
+ amount_of_external_allocated_memory_ = 0;
+ amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
}
+ if (FLAG_trace_external_memory) {
+ PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
+ PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
+ " amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
+ change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
+ reinterpret_cast<intptr_t>(isolate()));
+ }
ASSERT(amount_of_external_allocated_memory_ >= 0);
return amount_of_external_allocated_memory_;
}
@@ -757,37 +770,47 @@ double GCTracer::SizeOfHeapObjects() {
}
-#ifdef DEBUG
DisallowAllocationFailure::DisallowAllocationFailure() {
+#ifdef DEBUG
old_state_ = HEAP->disallow_allocation_failure_;
HEAP->disallow_allocation_failure_ = true;
+#endif
}
DisallowAllocationFailure::~DisallowAllocationFailure() {
+#ifdef DEBUG
HEAP->disallow_allocation_failure_ = old_state_;
-}
#endif
+}
#ifdef DEBUG
AssertNoAllocation::AssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(false);
+ Isolate* isolate = ISOLATE;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ old_state_ = isolate->heap()->allow_allocation(false);
+ }
}
AssertNoAllocation::~AssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
+ if (active_) HEAP->allow_allocation(old_state_);
}
DisableAssertNoAllocation::DisableAssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(true);
+ Isolate* isolate = ISOLATE;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ old_state_ = isolate->heap()->allow_allocation(true);
+ }
}
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
+ if (active_) HEAP->allow_allocation(old_state_);
}
#else
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index c37c084fa9..9ba769212d 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -139,6 +139,7 @@ Heap::Heap()
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
max_gc_pause_(0),
+ total_gc_time_ms_(0),
max_alive_after_gc_(0),
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
@@ -155,7 +156,8 @@ Heap::Heap()
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
promotion_queue_(this),
configured_(false),
- chunks_queued_for_free_(NULL) {
+ chunks_queued_for_free_(NULL),
+ relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -173,12 +175,14 @@ Heap::Heap()
}
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- global_contexts_list_ = NULL;
+ native_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
+
+ ClearObjectStats(true);
}
@@ -320,48 +324,53 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- isolate_->memory_allocator()->Size(),
- isolate_->memory_allocator()->Available());
- PrintF("New space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- Heap::new_space_.Size(),
- new_space_.Available());
- PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- old_pointer_space_->Size(),
- old_pointer_space_->Available(),
- old_pointer_space_->Waste());
- PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- old_data_space_->Size(),
- old_data_space_->Available(),
- old_data_space_->Waste());
- PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- code_space_->Size(),
- code_space_->Available(),
- code_space_->Waste());
- PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- map_space_->Size(),
- map_space_->Available(),
- map_space_->Waste());
- PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- cell_space_->Size(),
- cell_space_->Available(),
- cell_space_->Waste());
- PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- lo_space_->Size(),
- lo_space_->Available());
+ PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB\n",
+ isolate_->memory_allocator()->Size() / KB,
+ isolate_->memory_allocator()->Available() / KB);
+ PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ new_space_.Size() / KB,
+ new_space_.Available() / KB,
+ new_space_.CommittedMemory() / KB);
+ PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ old_pointer_space_->SizeOfObjects() / KB,
+ old_pointer_space_->Available() / KB,
+ old_pointer_space_->CommittedMemory() / KB);
+ PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ old_data_space_->SizeOfObjects() / KB,
+ old_data_space_->Available() / KB,
+ old_data_space_->CommittedMemory() / KB);
+ PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ code_space_->SizeOfObjects() / KB,
+ code_space_->Available() / KB,
+ code_space_->CommittedMemory() / KB);
+ PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ map_space_->SizeOfObjects() / KB,
+ map_space_->Available() / KB,
+ map_space_->CommittedMemory() / KB);
+ PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ cell_space_->SizeOfObjects() / KB,
+ cell_space_->Available() / KB,
+ cell_space_->CommittedMemory() / KB);
+ PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ lo_space_->SizeOfObjects() / KB,
+ lo_space_->Available() / KB,
+ lo_space_->CommittedMemory() / KB);
+ PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
}
@@ -440,6 +449,56 @@ void Heap::GarbageCollectionEpilogue() {
symbol_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
symbol_table()->NumberOfElements());
+
+ if (CommittedMemory() > 0) {
+ isolate_->counters()->external_fragmentation_total()->AddSample(
+ static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+
+ isolate_->counters()->heap_fraction_map_space()->AddSample(
+ static_cast<int>(
+ (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_cell_space()->AddSample(
+ static_cast<int>(
+ (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+
+ isolate_->counters()->heap_sample_total_committed()->AddSample(
+ static_cast<int>(CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_total_used()->AddSample(
+ static_cast<int>(SizeOfObjects() / KB));
+ isolate_->counters()->heap_sample_map_space_committed()->AddSample(
+ static_cast<int>(map_space()->CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
+ static_cast<int>(cell_space()->CommittedMemory() / KB));
+ }
+
+#define UPDATE_COUNTERS_FOR_SPACE(space) \
+ isolate_->counters()->space##_bytes_available()->Set( \
+ static_cast<int>(space()->Available())); \
+ isolate_->counters()->space##_bytes_committed()->Set( \
+ static_cast<int>(space()->CommittedMemory())); \
+ isolate_->counters()->space##_bytes_used()->Set( \
+ static_cast<int>(space()->SizeOfObjects()));
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
+ if (space()->CommittedMemory() > 0) { \
+ isolate_->counters()->external_fragmentation_##space()->AddSample( \
+ static_cast<int>(100 - \
+ (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
+ }
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+ UPDATE_COUNTERS_FOR_SPACE(space) \
+ UPDATE_FRAGMENTATION_FOR_SPACE(space)
+
+ UPDATE_COUNTERS_FOR_SPACE(new_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
+#undef UPDATE_COUNTERS_FOR_SPACE
+#undef UPDATE_FRAGMENTATION_FOR_SPACE
+#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
+
#if defined(DEBUG)
ReportStatisticsAfterGC();
#endif // DEBUG
@@ -696,7 +755,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
void Heap::ClearJSFunctionResultCaches() {
if (isolate_->bootstrapper()->IsActive()) return;
- Object* context = global_contexts_list_;
+ Object* context = native_contexts_list_;
while (!context->IsUndefined()) {
// Get the caches for this context. GC can happen when the context
// is not fully initialized, so the caches can be undefined.
@@ -723,7 +782,7 @@ void Heap::ClearNormalizedMapCaches() {
return;
}
- Object* context = global_contexts_list_;
+ Object* context = native_contexts_list_;
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -852,8 +911,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// have to limit maximal capacity of the young generation.
new_space_high_promotion_mode_active_ = true;
if (FLAG_trace_gc) {
- PrintF("Limited new space size due to high promotion rate: %d MB\n",
- new_space_.InitialCapacity() / MB);
+ PrintPID("Limited new space size due to high promotion rate: %d MB\n",
+ new_space_.InitialCapacity() / MB);
}
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
@@ -863,8 +922,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// to grow again.
new_space_high_promotion_mode_active_ = false;
if (FLAG_trace_gc) {
- PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
- new_space_.MaximumCapacity() / MB);
+ PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
+ new_space_.MaximumCapacity() / MB);
}
}
@@ -943,7 +1002,8 @@ void Heap::MarkCompactPrologue() {
isolate_->keyed_lookup_cache()->Clear();
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
- StringSplitCache::Clear(string_split_cache());
+ RegExpResultsCache::Clear(string_split_cache());
+ RegExpResultsCache::Clear(regexp_multiple_cache());
isolate_->compilation_cache()->MarkCompactPrologue();
@@ -1159,6 +1219,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
+ RelocationLock relocation_lock(this);
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
@@ -1225,18 +1286,18 @@ void Heap::Scavenge() {
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL; cell = cell_iterator.Next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+ for (HeapObject* heap_object = cell_iterator.Next();
+ heap_object != NULL;
+ heap_object = cell_iterator.Next()) {
+ if (heap_object->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
+ Address value_address = cell->ValueAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
}
- // Scavenge object reachable from the global contexts list directly.
- scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
+ // Scavenge object reachable from the native contexts list directly.
+ scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
@@ -1396,7 +1457,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* undefined = undefined_value();
Object* head = undefined;
Context* tail = NULL;
- Object* candidate = global_contexts_list_;
+ Object* candidate = native_contexts_list_;
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
@@ -1469,7 +1530,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
}
// Update the head of the list of contexts.
- global_contexts_list_ = head;
+ native_contexts_list_ = head;
}
@@ -1595,7 +1656,7 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
- table_.Register(kVisitGlobalContext,
+ table_.Register(kVisitNativeContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<Context::kSize>);
@@ -1681,7 +1742,7 @@ class ScavengingVisitor : public StaticVisitorBase {
RecordCopiedObject(heap, target);
HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Isolate* isolate = heap->isolate();
- if (isolate->logger()->is_logging() ||
+ if (isolate->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(isolate)) {
if (target->IsSharedFunctionInfo()) {
PROFILE(isolate, SharedFunctionInfoMoveEvent(
@@ -1989,9 +2050,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawMap();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
// Map::cast cannot be used due to uninitialized map field.
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
@@ -2004,6 +2064,7 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
+ reinterpret_cast<Map*>(result)->set_bit_field3(0);
return result;
}
@@ -2012,9 +2073,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawMap();
+ if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
map->set_map_no_write_barrier(meta_map());
@@ -2026,12 +2086,13 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
- map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- map->init_prototype_transitions(undefined_value());
+ map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
+ int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache);
+ map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
// If the map object is aligned fill the padding area with Smi 0 objects.
@@ -2076,8 +2137,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
{ MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
if (!maybe_info->To(&info)) return maybe_info;
}
- info->set_ic_total_count(0);
- info->set_ic_with_type_info_count(0);
+ info->initialize_storage();
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
@@ -2165,17 +2225,14 @@ bool Heap::CreateInitialMaps() {
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
- meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
- meta_map()->init_prototype_transitions(undefined_value());
+ meta_map()->init_back_pointer(undefined_value());
- fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
- fixed_array_map()->init_prototype_transitions(undefined_value());
+ fixed_array_map()->init_back_pointer(undefined_value());
- oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
- oddball_map()->init_prototype_transitions(undefined_value());
+ oddball_map()->init_back_pointer(undefined_value());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2383,9 +2440,16 @@ bool Heap::CreateInitialMaps() {
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
- Map* global_context_map = Map::cast(obj);
- global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
- set_global_context_map(global_context_map);
+ set_global_context_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Map* native_context_map = Map::cast(obj);
+ native_context_map->set_dictionary_map(true);
+ native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
+ set_native_context_map(native_context_map);
{ MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
SharedFunctionInfo::kAlignedSize);
@@ -2698,12 +2762,18 @@ bool Heap::CreateInitialObjects() {
set_single_character_string_cache(FixedArray::cast(obj));
// Allocate cache for string split.
- { MaybeObject* maybe_obj =
- AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
+ { MaybeObject* maybe_obj = AllocateFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_string_split_cache(FixedArray::cast(obj));
+ { MaybeObject* maybe_obj = AllocateFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_regexp_multiple_cache(FixedArray::cast(obj));
+
// Allocate cache for external strings pointing to native source code.
{ MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2729,70 +2799,98 @@ bool Heap::CreateInitialObjects() {
}
-Object* StringSplitCache::Lookup(
- FixedArray* cache, String* string, String* pattern) {
- if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
- uint32_t hash = string->Hash();
- uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
+Object* RegExpResultsCache::Lookup(Heap* heap,
+ String* key_string,
+ Object* key_pattern,
+ ResultsCacheType type) {
+ FixedArray* cache;
+ if (!key_string->IsSymbol()) return Smi::FromInt(0);
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ ASSERT(key_pattern->IsString());
+ if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
+ cache = heap->string_split_cache();
+ } else {
+ ASSERT(type == REGEXP_MULTIPLE_INDICES);
+ ASSERT(key_pattern->IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == string &&
- cache->get(index + kPatternOffset) == pattern) {
+ if (cache->get(index + kStringOffset) == key_string &&
+ cache->get(index + kPatternOffset) == key_pattern) {
return cache->get(index + kArrayOffset);
}
- index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
- if (cache->get(index + kStringOffset) == string &&
- cache->get(index + kPatternOffset) == pattern) {
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index + kStringOffset) == key_string &&
+ cache->get(index + kPatternOffset) == key_pattern) {
return cache->get(index + kArrayOffset);
}
return Smi::FromInt(0);
}
-void StringSplitCache::Enter(Heap* heap,
- FixedArray* cache,
- String* string,
- String* pattern,
- FixedArray* array) {
- if (!string->IsSymbol() || !pattern->IsSymbol()) return;
- uint32_t hash = string->Hash();
- uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
+void RegExpResultsCache::Enter(Heap* heap,
+ String* key_string,
+ Object* key_pattern,
+ FixedArray* value_array,
+ ResultsCacheType type) {
+ FixedArray* cache;
+ if (!key_string->IsSymbol()) return;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ ASSERT(key_pattern->IsString());
+ if (!key_pattern->IsSymbol()) return;
+ cache = heap->string_split_cache();
+ } else {
+ ASSERT(type == REGEXP_MULTIPLE_INDICES);
+ ASSERT(key_pattern->IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index + kStringOffset, string);
- cache->set(index + kPatternOffset, pattern);
- cache->set(index + kArrayOffset, array);
+ cache->set(index + kStringOffset, key_string);
+ cache->set(index + kPatternOffset, key_pattern);
+ cache->set(index + kArrayOffset, value_array);
} else {
uint32_t index2 =
- ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index2 + kStringOffset, string);
- cache->set(index2 + kPatternOffset, pattern);
- cache->set(index2 + kArrayOffset, array);
+ cache->set(index2 + kStringOffset, key_string);
+ cache->set(index2 + kPatternOffset, key_pattern);
+ cache->set(index2 + kArrayOffset, value_array);
} else {
cache->set(index2 + kStringOffset, Smi::FromInt(0));
cache->set(index2 + kPatternOffset, Smi::FromInt(0));
cache->set(index2 + kArrayOffset, Smi::FromInt(0));
- cache->set(index + kStringOffset, string);
- cache->set(index + kPatternOffset, pattern);
- cache->set(index + kArrayOffset, array);
+ cache->set(index + kStringOffset, key_string);
+ cache->set(index + kPatternOffset, key_pattern);
+ cache->set(index + kArrayOffset, value_array);
}
}
- if (array->length() < 100) { // Limit how many new symbols we want to make.
- for (int i = 0; i < array->length(); i++) {
- String* str = String::cast(array->get(i));
+ // If the array is a reasonably short list of substrings, convert it into a
+ // list of symbols.
+ if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+ for (int i = 0; i < value_array->length(); i++) {
+ String* str = String::cast(value_array->get(i));
Object* symbol;
MaybeObject* maybe_symbol = heap->LookupSymbol(str);
if (maybe_symbol->ToObject(&symbol)) {
- array->set(i, symbol);
+ value_array->set(i, symbol);
}
}
}
- array->set_map_no_write_barrier(heap->fixed_cow_array_map());
+ // Convert backing store to a copy-on-write array.
+ value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
}
-void StringSplitCache::Clear(FixedArray* cache) {
- for (int i = 0; i < kStringSplitCacheSize; i++) {
+void RegExpResultsCache::Clear(FixedArray* cache) {
+ for (int i = 0; i < kRegExpResultsCacheSize; i++) {
cache->set(i, Smi::FromInt(0));
}
}
@@ -2822,7 +2920,7 @@ void Heap::AllocateFullSizeNumberStringCache() {
// The idea is to have a small number string cache in the snapshot to keep
// boot-time memory usage down. If we expand the number string cache already
// while creating the snapshot then that didn't work out.
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
MaybeObject* maybe_obj =
AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
Object* new_cache;
@@ -3010,6 +3108,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
+ share->ClearOptimizedCodeMap();
share->set_scope_info(ScopeInfo::Empty());
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
@@ -3663,30 +3762,27 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// from the function's context, since the function can be from a
// different context.
JSFunction* object_function =
- function->context()->global_context()->object_function();
+ function->context()->native_context()->object_function();
// Each function prototype gets a copy of the object function map.
// This avoid unwanted sharing of maps between prototypes of different
// constructors.
Map* new_map;
ASSERT(object_function->has_initial_map());
- { MaybeObject* maybe_map =
- object_function->initial_map()->CopyDropTransitions(
- DescriptorArray::MAY_BE_SHARED);
- if (!maybe_map->To<Map>(&new_map)) return maybe_map;
- }
+ MaybeObject* maybe_map = object_function->initial_map()->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
Object* prototype;
- { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
+ MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
+ if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
+
// When creating the prototype for the function we must set its
// constructor to the function.
- Object* result;
- { MaybeObject* maybe_result =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_symbol(), function, DONT_ENUM);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_failure =
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
+ constructor_symbol(), function, DONT_ENUM);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
return prototype;
}
@@ -3716,12 +3812,12 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
!JSFunction::cast(callee)->shared()->is_classic_mode();
if (strict_mode_callee) {
boilerplate =
- isolate()->context()->global_context()->
+ isolate()->context()->native_context()->
strict_mode_arguments_boilerplate();
arguments_object_size = kArgumentsObjectSizeStrict;
} else {
boilerplate =
- isolate()->context()->global_context()->arguments_boilerplate();
+ isolate()->context()->native_context()->arguments_boilerplate();
arguments_object_size = kArgumentsObjectSize;
}
@@ -3787,21 +3883,18 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// suggested by the function.
int instance_size = fun->shared()->CalculateInstanceSize();
int in_object_properties = fun->shared()->CalculateInObjectProperties();
- Object* map_obj;
- { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
- if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
- }
+ Map* map;
+ MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
+ if (!maybe_map->To(&map)) return maybe_map;
// Fetch or allocate prototype.
Object* prototype;
if (fun->has_instance_prototype()) {
prototype = fun->instance_prototype();
} else {
- { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
+ MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
+ if (!maybe_prototype->To(&prototype)) return maybe_prototype;
}
- Map* map = Map::cast(map_obj);
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
@@ -3820,22 +3913,17 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
- { MaybeObject* maybe_descriptors_obj =
- DescriptorArray::Allocate(count, DescriptorArray::MAY_BE_SHARED);
- if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
- return maybe_descriptors_obj;
- }
- }
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
+ if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
+
DescriptorArray::WhitenessWitness witness(descriptors);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
- FieldDescriptor field(name, i, NONE);
- field.SetEnumerationIndex(i);
+ FieldDescriptor field(name, i, NONE, i + 1);
descriptors->Set(i, &field, witness);
}
- descriptors->SetNextEnumerationIndex(count);
- descriptors->SortUnchecked(witness);
+ descriptors->Sort();
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
@@ -3844,7 +3932,8 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
if (HasDuplicates(descriptors)) {
fun->shared()->ForbidInlineConstructor();
} else {
- map->set_instance_descriptors(descriptors);
+ MaybeObject* maybe_failure = map->InitializeDescriptors(descriptors);
+ if (maybe_failure->IsFailure()) return maybe_failure;
map->set_pre_allocated_property_fields(count);
map->set_unused_property_fields(in_object_properties - count);
}
@@ -3951,13 +4040,18 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateJSModule() {
+MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
if (!maybe_map->To(&map)) return maybe_map;
// Allocate the object based on the map.
- return AllocateJSObjectFromMap(map, TENURED);
+ JSModule* module;
+ MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
+ if (!maybe_module->To(&module)) return maybe_module;
+ module->set_context(context);
+ module->set_scope_info(scope_info);
+ return module;
}
@@ -4071,6 +4165,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
Map* map = constructor->initial_map();
+ ASSERT(map->is_dictionary_map());
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
@@ -4088,13 +4183,11 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
// Allocate a dictionary object for backing storage.
- Object* obj;
- { MaybeObject* maybe_obj =
- StringDictionary::Allocate(
- map->NumberOfDescribedProperties() * 2 + initial_size);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- StringDictionary* dictionary = StringDictionary::cast(obj);
+ StringDictionary* dictionary;
+ MaybeObject* maybe_dictionary =
+ StringDictionary::Allocate(
+ map->NumberOfDescribedProperties() * 2 + initial_size);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
@@ -4102,36 +4195,32 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ CALLBACKS,
+ details.descriptor_index());
Object* value = descs->GetCallbacksObject(i);
- { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
+ MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
+ if (!maybe_value->ToObject(&value)) return maybe_value;
- Object* result;
- { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
+ MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
+ if (!maybe_added->To(&dictionary)) return maybe_added;
}
// Allocate the global object and initialize it with the backing store.
- { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- JSObject* global = JSObject::cast(obj);
+ JSObject* global;
+ MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
+ if (!maybe_global->To(&global)) return maybe_global;
+
InitializeJSObjectFromMap(global, dictionary, map);
// Create a new map for the global object.
- { MaybeObject* maybe_obj = map->CopyDropDescriptors();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
+ Map* new_map;
+ MaybeObject* maybe_map = map->CopyDropDescriptors();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ new_map->set_dictionary_map(true);
// Set up the global object as a normalized object.
global->set_map(new_map);
- global->map()->clear_instance_descriptors();
global->set_properties(dictionary);
// Make sure result is a global object with properties in dictionary.
@@ -4260,7 +4349,7 @@ MaybeObject* Heap::ReinitializeJSReceiver(
map->set_function_with_prototype(true);
InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
JSFunction::cast(object)->set_context(
- isolate()->context()->global_context());
+ isolate()->context()->native_context());
}
// Put in filler if the new object is smaller than the old.
@@ -4555,10 +4644,10 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
MaybeObject* Heap::AllocateJSArray(
ElementsKind elements_kind,
PretenureFlag pretenure) {
- Context* global_context = isolate()->context()->global_context();
- JSFunction* array_function = global_context->array_function();
+ Context* native_context = isolate()->context()->native_context();
+ JSFunction* array_function = native_context->array_function();
Map* map = array_function->initial_map();
- Object* maybe_map_array = global_context->js_array_maps();
+ Object* maybe_map_array = native_context->js_array_maps();
if (!maybe_map_array->IsUndefined()) {
Object* maybe_transitioned_map =
FixedArray::cast(maybe_map_array)->get(elements_kind);
@@ -4841,33 +4930,50 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateGlobalContext() {
+MaybeObject* Heap::AllocateNativeContext() {
Object* result;
{ MaybeObject* maybe_result =
- AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
+ AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(global_context_map());
+ context->set_map_no_write_barrier(native_context_map());
context->set_js_array_maps(undefined_value());
- ASSERT(context->IsGlobalContext());
+ ASSERT(context->IsNativeContext());
ASSERT(result->IsContext());
return result;
}
-MaybeObject* Heap::AllocateModuleContext(Context* previous,
+MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
ScopeInfo* scope_info) {
Object* result;
{ MaybeObject* maybe_result =
- AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
+ AllocateFixedArray(scope_info->ContextLength(), TENURED);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(module_context_map());
- context->set_previous(previous);
+ context->set_map_no_write_barrier(global_context_map());
+ context->set_closure(function);
+ context->set_previous(function->context());
context->set_extension(scope_info);
- context->set_global(previous->global());
+ context->set_global_object(function->context()->global_object());
+ ASSERT(context->IsGlobalContext());
+ ASSERT(result->IsContext());
+ return context;
+}
+
+
+MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateFixedArray(scope_info->ContextLength(), TENURED);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map_no_write_barrier(module_context_map());
+ // Context links will be set later.
+ context->set_extension(Smi::FromInt(0));
return context;
}
@@ -4882,8 +4988,8 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
context->set_map_no_write_barrier(function_context_map());
context->set_closure(function);
context->set_previous(function->context());
- context->set_extension(NULL);
- context->set_global(function->context()->global());
+ context->set_extension(Smi::FromInt(0));
+ context->set_global_object(function->context()->global_object());
return context;
}
@@ -4903,7 +5009,7 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
context->set_closure(function);
context->set_previous(previous);
context->set_extension(name);
- context->set_global(previous->global());
+ context->set_global_object(previous->global_object());
context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
return context;
}
@@ -4921,7 +5027,7 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
- context->set_global(previous->global());
+ context->set_global_object(previous->global_object());
return context;
}
@@ -4939,7 +5045,7 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
context->set_closure(function);
context->set_previous(previous);
context->set_extension(scope_info);
- context->set_global(previous->global());
+ context->set_global_object(previous->global_object());
return context;
}
@@ -5302,37 +5408,9 @@ void Heap::Verify() {
cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
-
- VerifyNoAccessorPairSharing();
-}
-
-
-void Heap::VerifyNoAccessorPairSharing() {
- // Verification is done in 2 phases: First we mark all AccessorPairs, checking
- // that we mark only unmarked pairs, then we clear all marks, restoring the
- // initial state. We use the Smi tag of the AccessorPair's getter as the
- // marking bit, because we can never see a Smi as the getter.
- for (int phase = 0; phase < 2; phase++) {
- HeapObjectIterator iter(map_space());
- for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
- if (obj->IsMap()) {
- DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetType(i) == CALLBACKS &&
- descs->GetValue(i)->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
- uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
- uintptr_t after = (phase == 0) ?
- ((before & ~kSmiTagMask) | kSmiTag) :
- ((before & ~kHeapObjectTag) | kHeapObjectTag);
- CHECK(before != after);
- accessors->set_getter(reinterpret_cast<Object*>(after));
- }
- }
- }
- }
- }
}
+
+
#endif // DEBUG
@@ -5689,6 +5767,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over local handles in handle scopes.
isolate_->handle_scope_implementer()->Iterate(v);
+ isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
@@ -5751,8 +5830,8 @@ bool Heap::ConfigureHeap(int max_semispace_size,
if (max_semispace_size < Page::kPageSize) {
max_semispace_size = Page::kPageSize;
if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be less than %dkbytes\n",
- Page::kPageSize >> 10);
+ PrintPID("Max semispace size cannot be less than %dkbytes\n",
+ Page::kPageSize >> 10);
}
}
max_semispace_size_ = max_semispace_size;
@@ -5767,8 +5846,8 @@ bool Heap::ConfigureHeap(int max_semispace_size,
if (max_semispace_size_ > reserved_semispace_size_) {
max_semispace_size_ = reserved_semispace_size_;
if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be more than %dkbytes\n",
- reserved_semispace_size_ >> 10);
+ PrintPID("Max semispace size cannot be more than %dkbytes\n",
+ reserved_semispace_size_ >> 10);
}
}
} else {
@@ -5793,7 +5872,7 @@ bool Heap::ConfigureHeap(int max_semispace_size,
max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- external_allocation_limit_ = 10 * max_semispace_size_;
+ external_allocation_limit_ = 16 * max_semispace_size_;
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
@@ -6143,7 +6222,7 @@ bool Heap::SetUp(bool create_heap_objects) {
// Create initial objects
if (!CreateInitialObjects()) return false;
- global_contexts_list_ = undefined_value();
+ native_contexts_list_ = undefined_value();
}
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
@@ -6151,6 +6230,8 @@ bool Heap::SetUp(bool create_heap_objects) {
store_buffer()->SetUp();
+ if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+
return true;
}
@@ -6183,6 +6264,7 @@ void Heap::TearDown() {
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
PrintF("max_gc_pause=%d ", get_max_gc_pause());
+ PrintF("total_gc_time=%d ", total_gc_time_ms_);
PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
get_max_alive_after_gc());
@@ -6236,6 +6318,8 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
+ delete relocation_mutex_;
+
#ifdef DEBUG
delete debug_utils_;
debug_utils_ = NULL;
@@ -6653,7 +6737,7 @@ void PathTracer::TracePathFrom(Object** root) {
ASSERT((search_target_ == kAnyGlobalObject) ||
search_target_->IsHeapObject());
found_target_in_trace_ = false;
- object_stack_.Clear();
+ Reset();
MarkVisitor mark_visitor(this);
MarkRecursively(root, &mark_visitor);
@@ -6665,8 +6749,8 @@ void PathTracer::TracePathFrom(Object** root) {
}
-static bool SafeIsGlobalContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+static bool SafeIsNativeContext(HeapObject* obj) {
+ return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
}
@@ -6688,7 +6772,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
return;
}
- bool is_global_context = SafeIsGlobalContext(obj);
+ bool is_native_context = SafeIsNativeContext(obj);
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6698,7 +6782,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
// Scan the object body.
- if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
+ if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
// This is specialized to scan Context's properly.
Object** start = reinterpret_cast<Object**>(obj->address() +
Context::kHeaderSize);
@@ -6757,11 +6841,7 @@ void PathTracer::ProcessResults() {
for (int i = 0; i < object_stack_.length(); i++) {
if (i > 0) PrintF("\n |\n |\n V\n\n");
Object* obj = object_stack_[i];
-#ifdef OBJECT_PRINT
obj->Print();
-#else
- obj->ShortPrint();
-#endif
}
PrintF("=====================================\n");
}
@@ -6770,6 +6850,15 @@ void PathTracer::ProcessResults() {
#ifdef DEBUG
+// Triggers a depth-first traversal of reachable objects from one
+// given root object and finds a path to a specific heap object and
+// prints it.
+void Heap::TracePathToObjectFrom(Object* target, Object* root) {
+ PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+ tracer.VisitPointer(&root);
+}
+
+
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
void Heap::TracePathToObject(Object* target) {
@@ -6857,6 +6946,7 @@ GCTracer::~GCTracer() {
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
+ heap_->total_gc_time_ms_ += time;
heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
heap_->alive_after_last_gc_);
@@ -6864,9 +6954,13 @@ GCTracer::~GCTracer() {
heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
static_cast<int>(spent_in_mutator_));
}
+ } else if (FLAG_trace_gc_verbose) {
+ heap_->total_gc_time_ms_ += time;
}
- PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+ if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
+
+ PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
if (!FLAG_trace_gc_nvp) {
int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
@@ -6908,9 +7002,7 @@ GCTracer::~GCTracer() {
PrintF(".\n");
} else {
PrintF("pause=%d ", time);
- PrintF("mutator=%d ",
- static_cast<int>(spent_in_mutator_));
-
+ PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
PrintF("gc=");
switch (collector_) {
case SCAVENGER:
@@ -7176,4 +7268,63 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
+
+void Heap::ClearObjectStats(bool clear_last_time_stats) {
+ memset(object_counts_, 0, sizeof(object_counts_));
+ memset(object_sizes_, 0, sizeof(object_sizes_));
+ if (clear_last_time_stats) {
+ memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
+ memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+ }
+}
+
+
+static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+
+
+void Heap::CheckpointObjectStats() {
+ ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ Counters* counters = isolate()->counters();
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ counters->count_of_##name()->Increment( \
+ static_cast<int>(object_counts_[name])); \
+ counters->count_of_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[name])); \
+ counters->size_of_##name()->Increment( \
+ static_cast<int>(object_sizes_[name])); \
+ counters->size_of_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[name]));
+ INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+ int index;
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
+ counters->count_of_CODE_TYPE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_TYPE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_TYPE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_TYPE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
+ counters->count_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+
+ memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ ClearObjectStats();
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index f80f2a7804..cb167d30aa 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -64,7 +64,7 @@ namespace internal {
V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Map, heap_number_map, HeapNumberMap) \
- V(Map, global_context_map, GlobalContextMap) \
+ V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
V(Map, scope_info_map, ScopeInfoMap) \
@@ -87,6 +87,7 @@ namespace internal {
V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
+ V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
V(Object, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
V(Map, string_map, StringMap) \
@@ -130,6 +131,7 @@ namespace internal {
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, module_context_map, ModuleContextMap) \
+ V(Map, global_context_map, GlobalContextMap) \
V(Map, oddball_map, OddballMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
@@ -150,7 +152,9 @@ namespace internal {
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -531,7 +535,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSModule();
+ MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
+ ScopeInfo* scope_info);
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
@@ -821,13 +826,16 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateHashTable(
int length, PretenureFlag pretenure = NOT_TENURED);
- // Allocate a global (but otherwise uninitialized) context.
- MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
+ // Allocate a native (but otherwise uninitialized) context.
+ MUST_USE_RESULT MaybeObject* AllocateNativeContext();
- // Allocate a module context.
- MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
+ // Allocate a global context.
+ MUST_USE_RESULT MaybeObject* AllocateGlobalContext(JSFunction* function,
ScopeInfo* scope_info);
+ // Allocate a module context.
+ MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info);
+
// Allocate a function context.
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
JSFunction* function);
@@ -1101,8 +1109,8 @@ class Heap {
#endif
void AddGCPrologueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCEpilogueCallback callback);
+ GCPrologueCallback callback, GCType gc_type_filter);
+ void RemoveGCPrologueCallback(GCPrologueCallback callback);
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
@@ -1149,13 +1157,13 @@ class Heap {
// not match the empty string.
String* hidden_symbol() { return hidden_symbol_; }
- void set_global_contexts_list(Object* object) {
- global_contexts_list_ = object;
+ void set_native_contexts_list(Object* object) {
+ native_contexts_list_ = object;
}
- Object* global_contexts_list() { return global_contexts_list_; }
+ Object* native_contexts_list() { return native_contexts_list_; }
// Number of mark-sweeps.
- int ms_count() { return ms_count_; }
+ unsigned int ms_count() { return ms_count_; }
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
@@ -1226,9 +1234,9 @@ class Heap {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
- // Get address of global contexts list for serialization support.
- Object** global_contexts_list_address() {
- return &global_contexts_list_;
+ // Get address of native contexts list for serialization support.
+ Object** native_contexts_list_address() {
+ return &native_contexts_list_;
}
#ifdef DEBUG
@@ -1238,10 +1246,6 @@ class Heap {
// Verify the heap is in its normal state before or after a GC.
void Verify();
- // Verify that AccessorPairs are not shared, i.e. make sure that they have
- // exactly one pointer to them.
- void VerifyNoAccessorPairSharing();
-
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer();
@@ -1290,6 +1294,7 @@ class Heap {
return disallow_allocation_failure_;
}
+ void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
void TracePathToGlobal();
#endif
@@ -1393,15 +1398,15 @@ class Heap {
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_DECLARATION
+ // Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+
kSymbolTableRootIndex,
kStrongRootListLength = kSymbolTableRootIndex,
kRootListLength
@@ -1588,6 +1593,16 @@ class Heap {
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
+ void SetGetterStubDeoptPCOffset(int pc_offset) {
+ ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+ }
+
+ void SetSetterStubDeoptPCOffset(int pc_offset) {
+ ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+ }
+
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -1601,6 +1616,60 @@ class Heap {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
+ intptr_t amount_of_external_allocated_memory() {
+ return amount_of_external_allocated_memory_;
+ }
+
+ // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+ // stored in a contiguous linear buffer. Stats groups are stored one after
+ // another.
+ enum {
+ FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+ FIRST_FIXED_ARRAY_SUB_TYPE =
+ FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
+ OBJECT_STATS_COUNT =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
+ };
+
+ void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
+ ASSERT(type <= LAST_TYPE);
+ if (sub_type < 0) {
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ } else {
+ if (type == CODE_TYPE) {
+ ASSERT(sub_type <= Code::LAST_CODE_KIND);
+ object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
+ object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
+ } else if (type == FIXED_ARRAY_TYPE) {
+ ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
+ }
+ }
+ }
+
+ void CheckpointObjectStats();
+
+ // We don't use a ScopedLock here since we want to lock the heap
+ // only when FLAG_parallel_recompilation is true.
+ class RelocationLock {
+ public:
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+ ~RelocationLock() {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Unlock();
+ }
+ }
+
+ private:
+ Heap* heap_;
+ };
+
private:
Heap();
@@ -1653,7 +1722,7 @@ class Heap {
// Returns the amount of external memory registered since last global gc.
intptr_t PromotedExternalMemorySize();
- int ms_count_; // how many mark-sweep collections happened
+ unsigned int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
// For post mortem debugging.
@@ -1725,7 +1794,7 @@ class Heap {
// last GC.
int old_gen_exhausted_;
- Object* global_contexts_list_;
+ Object* native_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
@@ -1994,14 +2063,24 @@ class Heap {
void AdvanceIdleIncrementalMarking(intptr_t step_size);
+ void ClearObjectStats(bool clear_last_time_stats = false);
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
+ // Object counts and used memory by InstanceType
+ size_t object_counts_[OBJECT_STATS_COUNT];
+ size_t object_counts_last_time_[OBJECT_STATS_COUNT];
+ size_t object_sizes_[OBJECT_STATS_COUNT];
+ size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
+
// Maximum GC pause.
int max_gc_pause_;
+ // Total time spent in GC.
+ int total_gc_time_ms_;
+
// Maximum size of objects alive after GC.
intptr_t max_alive_after_gc_;
@@ -2046,6 +2125,8 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
+ Mutex* relocation_mutex_;
+
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
@@ -2054,7 +2135,7 @@ class Heap {
friend class Page;
friend class Isolate;
friend class MarkCompactCollector;
- friend class StaticMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
friend class MapCompact;
DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -2094,10 +2175,26 @@ class HeapStats {
};
+class DisallowAllocationFailure {
+ public:
+ inline DisallowAllocationFailure();
+ inline ~DisallowAllocationFailure();
+
+#ifdef DEBUG
+ private:
+ bool old_state_;
+#endif
+};
+
+
class AlwaysAllocateScope {
public:
inline AlwaysAllocateScope();
inline ~AlwaysAllocateScope();
+
+ private:
+ // Implicitly disable artificial allocation failures.
+ DisallowAllocationFailure disallow_allocation_failure_;
};
@@ -2322,9 +2419,11 @@ class DescriptorLookupCache {
static int Hash(DescriptorArray* array, String* name) {
// Uses only lower 32 bits if pointers are larger.
uint32_t array_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array))
+ >> kPointerSizeLog2;
uint32_t name_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
+ >> kPointerSizeLog2;
return (array_hash ^ name_hash) % kLength;
}
@@ -2342,18 +2441,6 @@ class DescriptorLookupCache {
};
-#ifdef DEBUG
-class DisallowAllocationFailure {
- public:
- inline DisallowAllocationFailure();
- inline ~DisallowAllocationFailure();
-
- private:
- bool old_state_;
-};
-#endif
-
-
// A helper class to document/test C++ scopes where we do not
// expect a GC. Usage:
//
@@ -2369,6 +2456,7 @@ class AssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
+ bool active_;
#endif
};
@@ -2381,6 +2469,7 @@ class DisableAssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
+ bool active_;
#endif
};
@@ -2500,24 +2589,31 @@ class GCTracer BASE_EMBEDDED {
};
-class StringSplitCache {
+class RegExpResultsCache {
public:
- static Object* Lookup(FixedArray* cache, String* string, String* pattern);
+ enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+ // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
+ // On success, the returned result is guaranteed to be a COW-array.
+ static Object* Lookup(Heap* heap,
+ String* key_string,
+ Object* key_pattern,
+ ResultsCacheType type);
+ // Attempt to add value_array to the cache specified by type. On success,
+ // value_array is turned into a COW-array.
static void Enter(Heap* heap,
- FixedArray* cache,
- String* string,
- String* pattern,
- FixedArray* array);
+ String* key_string,
+ Object* key_pattern,
+ FixedArray* value_array,
+ ResultsCacheType type);
static void Clear(FixedArray* cache);
- static const int kStringSplitCacheSize = 0x100;
+ static const int kRegExpResultsCacheSize = 0x100;
private:
static const int kArrayEntriesPerCacheEntry = 4;
static const int kStringOffset = 0;
static const int kPatternOffset = 1;
static const int kArrayOffset = 2;
-
- static MaybeObject* WrapFixedArrayInJSArray(Object* fixed_array);
};
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index db3c3f3f78..0192a763f0 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -156,6 +156,20 @@ void Range::Union(Range* other) {
}
+void Range::CombinedMax(Range* other) {
+ upper_ = Max(upper_, other->upper_);
+ lower_ = Max(lower_, other->lower_);
+ set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
+}
+
+
+void Range::CombinedMin(Range* other) {
+ upper_ = Min(upper_, other->upper_);
+ lower_ = Min(lower_, other->lower_);
+ set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
+}
+
+
void Range::Sar(int32_t value) {
int32_t bits = value & 0x1F;
lower_ = lower_ >> bits;
@@ -861,12 +875,14 @@ HValue* HBitwise::Canonicalize() {
int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
if (left()->IsConstant() &&
HConstant::cast(left())->HasInteger32Value() &&
- HConstant::cast(left())->Integer32Value() == nop_constant) {
+ HConstant::cast(left())->Integer32Value() == nop_constant &&
+ !right()->CheckFlag(kUint32)) {
return right();
}
if (right()->IsConstant() &&
HConstant::cast(right())->HasInteger32Value() &&
- HConstant::cast(right())->Integer32Value() == nop_constant) {
+ HConstant::cast(right())->Integer32Value() == nop_constant &&
+ !left()->CheckFlag(kUint32)) {
return left();
}
return this;
@@ -878,7 +894,9 @@ HValue* HBitNot::Canonicalize() {
if (value()->IsBitNot()) {
HValue* result = HBitNot::cast(value())->value();
ASSERT(result->representation().IsInteger32());
- return result;
+ if (!result->CheckFlag(kUint32)) {
+ return result;
+ }
}
return this;
}
@@ -941,7 +959,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
// introduced.
if (value()->representation().IsInteger32()) return value();
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_X64)
if (value()->IsDiv() && (value()->UseCount() == 1)) {
// TODO(2038): Implement this optimization for non ARM architectures.
HDiv* hdiv = HDiv::cast(value());
@@ -1072,6 +1091,11 @@ void HCheckInstanceType::PrintDataTo(StringStream* stream) {
}
+void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
+ stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder());
+}
+
+
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@@ -1100,6 +1124,7 @@ Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
if (from().IsInteger32() &&
to().IsTagged() &&
+ !value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
}
@@ -1232,6 +1257,24 @@ Range* HMod::InferRange(Zone* zone) {
}
+Range* HMathMinMax::InferRange(Zone* zone) {
+ if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
+ Range* res = a->Copy(zone);
+ if (operation_ == kMathMax) {
+ res->CombinedMax(b);
+ } else {
+ ASSERT(operation_ == kMathMin);
+ res->CombinedMin(b);
+ }
+ return res;
+ } else {
+ return HValue::InferRange(zone);
+ }
+}
+
+
void HPhi::PrintTo(StringStream* stream) {
stream->Add("[");
for (int i = 0; i < OperandCount(); ++i) {
@@ -1346,7 +1389,7 @@ void HPhi::ResetInteger32Uses() {
void HSimulate::PrintDataTo(StringStream* stream) {
- stream->Add("id=%d", ast_id());
+ stream->Add("id=%d", ast_id().ToInt());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
@@ -1375,45 +1418,82 @@ void HDeoptimize::PrintDataTo(StringStream* stream) {
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
- stream->Add("%s, id=%d", *name, function()->id());
+ stream->Add("%s, id=%d", *name, function()->id().ToInt());
+}
+
+
+static bool IsInteger32(double value) {
+ double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
+ return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
}
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
has_int32_value_(false),
- has_double_value_(false),
- int32_value_(0),
- double_value_(0) {
+ has_double_value_(false) {
set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
- double roundtrip_value = static_cast<double>(static_cast<int32_t>(n));
- has_int32_value_ = BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(n);
- if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
+ has_int32_value_ = IsInteger32(n);
+ int32_value_ = DoubleToInt32(n);
double_value_ = n;
has_double_value_ = true;
}
}
+HConstant::HConstant(int32_t integer_value, Representation r)
+ : has_int32_value_(true),
+ has_double_value_(true),
+ int32_value_(integer_value),
+ double_value_(FastI2D(integer_value)) {
+ set_representation(r);
+ SetFlag(kUseGVN);
+}
+
+
+HConstant::HConstant(double double_value, Representation r)
+ : has_int32_value_(IsInteger32(double_value)),
+ has_double_value_(true),
+ int32_value_(DoubleToInt32(double_value)),
+ double_value_(double_value) {
+ set_representation(r);
+ SetFlag(kUseGVN);
+}
+
+
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
+ if (handle_.is_null()) {
+ ASSERT(has_int32_value_ || has_double_value_);
+ if (has_int32_value_) return new(zone) HConstant(int32_value_, r);
+ return new(zone) HConstant(double_value_, r);
+ }
return new(zone) HConstant(handle_, r);
}
HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
- if (!has_double_value_) return NULL;
- int32_t truncated = NumberToInt32(*handle_);
- return new(zone) HConstant(FACTORY->NewNumberFromInt(truncated),
- Representation::Integer32());
+ if (has_int32_value_) {
+ if (handle_.is_null()) {
+ return new(zone) HConstant(int32_value_, Representation::Integer32());
+ } else {
+ // Re-use the existing Handle if possible.
+ return new(zone) HConstant(handle_, Representation::Integer32());
+ }
+ } else if (has_double_value_) {
+ return new(zone) HConstant(DoubleToInt32(double_value_),
+ Representation::Integer32());
+ } else {
+ return NULL;
+ }
}
-bool HConstant::ToBoolean() const {
+bool HConstant::ToBoolean() {
// Converts the constant's boolean value according to
// ECMAScript section 9.2 ToBoolean conversion.
if (HasInteger32Value()) return Integer32Value() != 0;
@@ -1421,17 +1501,25 @@ bool HConstant::ToBoolean() const {
double v = DoubleValue();
return v != 0 && !isnan(v);
}
- if (handle()->IsTrue()) return true;
- if (handle()->IsFalse()) return false;
- if (handle()->IsUndefined()) return false;
- if (handle()->IsNull()) return false;
- if (handle()->IsString() &&
- String::cast(*handle())->length() == 0) return false;
+ Handle<Object> literal = handle();
+ if (literal->IsTrue()) return true;
+ if (literal->IsFalse()) return false;
+ if (literal->IsUndefined()) return false;
+ if (literal->IsNull()) return false;
+ if (literal->IsString() && String::cast(*literal)->length() == 0) {
+ return false;
+ }
return true;
}
void HConstant::PrintDataTo(StringStream* stream) {
- handle()->ShortPrint(stream);
+ if (has_int32_value_) {
+ stream->Add("%d ", int32_value_);
+ } else if (has_double_value_) {
+ stream->Add("%f ", FmtElm(double_value_));
+ } else {
+ handle()->ShortPrint(stream);
+ }
}
@@ -1638,13 +1726,10 @@ static bool PrototypeChainCanNeverResolve(
}
LookupResult lookup(isolate);
- JSObject::cast(current)->map()->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- if (lookup.type() != MAP_TRANSITION) return false;
- } else if (!lookup.IsCacheable()) {
- return false;
- }
-
+ Map* map = JSObject::cast(current)->map();
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsFound()) return false;
+ if (!lookup.IsCacheable()) return false;
current = JSObject::cast(current)->GetPrototype();
}
return true;
@@ -1669,7 +1754,7 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(map->GetIsolate());
- map->LookupInDescriptors(NULL, *name, &lookup);
+ map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case FIELD: {
@@ -1685,20 +1770,24 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
case CONSTANT_FUNCTION:
types_.Add(types->at(i), zone);
break;
- case MAP_TRANSITION:
- if (!map->has_named_interceptor() &&
- PrototypeChainCanNeverResolve(map, name)) {
- negative_lookups.Add(types->at(i), zone);
- }
+ case CALLBACKS:
break;
- default:
+ case TRANSITION:
+ case INTERCEPTOR:
+ case NONEXISTENT:
+ case NORMAL:
+ case HANDLER:
+ UNREACHABLE();
break;
}
- } else if (lookup.IsCacheable()) {
- if (!map->has_named_interceptor() &&
- PrototypeChainCanNeverResolve(map, name)) {
- negative_lookups.Add(types->at(i), zone);
- }
+ } else if (lookup.IsCacheable() &&
+ // For dicts the lookup on the map will fail, but the object may
+ // contain the property so we cannot generate a negative lookup
+ // (which would just be a map check and return undefined).
+ !map->is_dictionary_map() &&
+ !map->has_named_interceptor() &&
+ PrototypeChainCanNeverResolve(map, name)) {
+ negative_lookups.Add(types->at(i), zone);
}
}
@@ -1757,7 +1846,8 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
@@ -1782,7 +1872,8 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
}
@@ -1811,6 +1902,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
+ key_load->key(),
key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
@@ -1871,7 +1963,8 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
}
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
}
@@ -2079,6 +2172,10 @@ HType HPhi::CalculateInferredType() {
HType HConstant::CalculateInferredType() {
+ if (has_int32_value_) {
+ return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber();
+ }
+ if (has_double_value_) return HType::HeapNumber();
return HType::TypeFromValue(handle_);
}
@@ -2226,6 +2323,13 @@ HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
+HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+ visited->Add(id());
+ SetFlag(kBailoutOnMinusZero);
+ return NULL;
+}
+
+
HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
if (range() == NULL || range()->CanBeMinusZero()) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 780d57d61a..6c938cd4bf 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -124,7 +124,6 @@ class LChunkBuilder;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@@ -140,7 +139,9 @@ class LChunkBuilder;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@@ -170,6 +171,7 @@ class LChunkBuilder;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(Sub) \
V(ThisFunction) \
@@ -224,6 +226,16 @@ class LChunkBuilder;
virtual Opcode opcode() const { return HValue::k##type; }
+#ifdef DEBUG
+#define ASSERT_ALLOCATION_DISABLED do { \
+ OptimizingCompilerThread* thread = \
+ ISOLATE->optimizing_compiler_thread(); \
+ ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
+ } while (0)
+#else
+#define ASSERT_ALLOCATION_DISABLED do {} while (0)
+#endif
+
class Range: public ZoneObject {
public:
Range()
@@ -276,6 +288,8 @@ class Range: public ZoneObject {
void Intersect(Range* other);
void Union(Range* other);
+ void CombinedMax(Range* other);
+ void CombinedMin(Range* other);
void AddConstant(int32_t value);
void Sar(int32_t value);
@@ -549,7 +563,14 @@ class HValue: public ZoneObject {
kIsArguments,
kTruncatingToInt32,
kIsDead,
- kLastFlag = kIsDead
+ // Instructions that are allowed to produce full range unsigned integer
+ // values are marked with kUint32 flag. If arithmetic shift or a load from
+ // EXTERNAL_UNSIGNED_INT_ELEMENTS array is not marked with this flag
+ // it will deoptimize if result does not fit into signed integer range.
+ // HGraph::ComputeSafeUint32Operations is responsible for setting this
+ // flag.
+ kUint32,
+ kLastFlag = kUint32
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@@ -857,9 +878,14 @@ class HInstruction: public HValue {
void InsertBefore(HInstruction* next);
void InsertAfter(HInstruction* previous);
+ // The position is a write-once variable.
int position() const { return position_; }
bool has_position() const { return position_ != RelocInfo::kNoPosition; }
- void set_position(int position) { position_ = position; }
+ void set_position(int position) {
+ ASSERT(!has_position());
+ ASSERT(position != RelocInfo::kNoPosition);
+ position_ = position;
+ }
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
@@ -1281,7 +1307,7 @@ class HClampToUint8: public HUnaryOperation {
class HSimulate: public HInstruction {
public:
- HSimulate(int ast_id, int pop_count, Zone* zone)
+ HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
@@ -1291,9 +1317,9 @@ class HSimulate: public HInstruction {
virtual void PrintDataTo(StringStream* stream);
- bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
- int ast_id() const { return ast_id_; }
- void set_ast_id(int id) {
+ bool HasAstId() const { return !ast_id_.IsNone(); }
+ BailoutId ast_id() const { return ast_id_; }
+ void set_ast_id(BailoutId id) {
ASSERT(!HasAstId());
ast_id_ = id;
}
@@ -1341,7 +1367,7 @@ class HSimulate: public HInstruction {
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
}
- int ast_id_;
+ BailoutId ast_id_;
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
@@ -1385,20 +1411,29 @@ class HStackCheck: public HTemplateInstruction<1> {
};
+enum InliningKind {
+ NORMAL_RETURN, // Normal function/method call and return.
+ DROP_EXTRA_ON_RETURN, // Drop an extra value from the environment on return.
+ CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
+ GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
+ SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
+};
+
+
class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
CallKind call_kind,
- bool is_construct,
+ InliningKind inlining_kind,
Variable* arguments_var,
ZoneList<HValue*>* arguments_values)
: closure_(closure),
arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind),
- is_construct_(is_construct),
+ inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_values_(arguments_values) {
}
@@ -1409,7 +1444,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count() const { return arguments_count_; }
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
- bool is_construct() const { return is_construct_; }
+ InliningKind inlining_kind() const { return inlining_kind_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1425,7 +1460,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count_;
FunctionLiteral* function_;
CallKind call_kind_;
- bool is_construct_;
+ InliningKind inlining_kind_;
Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
};
@@ -1469,7 +1504,7 @@ class HPushArgument: public HUnaryOperation {
class HThisFunction: public HTemplateInstruction<0> {
public:
- explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
+ HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -1478,18 +1513,10 @@ class HThisFunction: public HTemplateInstruction<0> {
return Representation::None();
}
- Handle<JSFunction> closure() const { return closure_; }
-
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) {
- HThisFunction* b = HThisFunction::cast(other);
- return *closure() == *b->closure();
- }
-
- private:
- Handle<JSFunction> closure_;
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1881,6 +1908,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
class HFixedArrayBaseLength: public HUnaryOperation {
public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
+ set_type(HType::Smi());
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
@@ -1897,6 +1925,26 @@ class HFixedArrayBaseLength: public HUnaryOperation {
};
+class HMapEnumLength: public HUnaryOperation {
+ public:
+ explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
+ set_type(HType::Smi());
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HElementsKind: public HUnaryOperation {
public:
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
@@ -2021,14 +2069,18 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
};
-class HLoadElements: public HUnaryOperation {
+class HLoadElements: public HTemplateInstruction<2> {
public:
- explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
+ HLoadElements(HValue* value, HValue* typecheck) {
+ SetOperandAt(0, value);
+ SetOperandAt(1, typecheck);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnElementsPointer);
}
+ HValue* value() { return OperandAt(0); }
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2289,8 +2341,10 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
return Representation::None();
}
+ virtual void PrintDataTo(StringStream* stream);
+
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->IsAllocationAllowed());
+ ASSERT_ALLOCATION_DISABLED;
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
@@ -2459,20 +2513,42 @@ class HArgumentsObject: public HTemplateInstruction<0> {
class HConstant: public HTemplateInstruction<0> {
public:
HConstant(Handle<Object> handle, Representation r);
+ HConstant(int32_t value, Representation r);
+ HConstant(double value, Representation r);
- Handle<Object> handle() const { return handle_; }
+ Handle<Object> handle() {
+ if (handle_.is_null()) {
+ handle_ = FACTORY->NewNumber(double_value_, TENURED);
+ }
+ ASSERT(has_int32_value_ || !handle_->IsSmi());
+ return handle_;
+ }
bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
bool ImmortalImmovable() const {
+ if (has_int32_value_) {
+ return false;
+ }
+ if (has_double_value_) {
+ if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
+ isnan(double_value_)) {
+ return true;
+ }
+ return false;
+ }
+
+ ASSERT(!handle_.is_null());
Heap* heap = HEAP;
+ // We should have handled minus_zero_value and nan_value in the
+ // has_double_value_ clause above.
+ ASSERT(*handle_ != heap->minus_zero_value());
+ ASSERT(*handle_ != heap->nan_value());
if (*handle_ == heap->undefined_value()) return true;
if (*handle_ == heap->null_value()) return true;
if (*handle_ == heap->true_value()) return true;
if (*handle_ == heap->false_value()) return true;
if (*handle_ == heap->the_hole_value()) return true;
- if (*handle_ == heap->minus_zero_value()) return true;
- if (*handle_ == heap->nan_value()) return true;
if (*handle_ == heap->empty_string()) return true;
return false;
}
@@ -2482,18 +2558,14 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual bool IsConvertibleToInteger() const {
- if (handle_->IsSmi()) return true;
- if (handle_->IsHeapNumber() &&
- (HeapNumber::cast(*handle_)->value() ==
- static_cast<double>(NumberToInt32(*handle_)))) return true;
- return false;
+ return has_int32_value_;
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
- bool IsInteger() const { return handle_->IsSmi(); }
+ bool IsInteger() { return handle()->IsSmi(); }
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
HConstant* CopyToTruncatedInt32(Zone* zone) const;
bool HasInteger32Value() const { return has_int32_value_; }
@@ -2506,24 +2578,35 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasDoubleValue());
return double_value_;
}
- bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
+ bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
ASSERT(HasNumberValue());
- if (has_int32_value_) return int32_value_;
- return DoubleToInt32(double_value_);
+ // Irrespective of whether a numeric HConstant can be safely
+ // represented as an int32, we store the (in some cases lossy)
+ // representation of the number in int32_value_.
+ return int32_value_;
}
- bool HasStringValue() const { return handle_->IsString(); }
- bool ToBoolean() const;
+ bool ToBoolean();
+
+ bool IsUint32() {
+ return HasInteger32Value() && (Integer32Value() >= 0);
+ }
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
- intptr_t hash = reinterpret_cast<intptr_t>(*handle());
- // Prevent smis from having fewer hash values when truncated to
- // the least significant bits.
- const int kShiftSize = kSmiShiftSize + kSmiTagSize;
- STATIC_ASSERT(kShiftSize != 0);
- return hash ^ (hash >> kShiftSize);
+ ASSERT_ALLOCATION_DISABLED;
+ intptr_t hash;
+
+ if (has_int32_value_) {
+ hash = static_cast<intptr_t>(int32_value_);
+ } else if (has_double_value_) {
+ hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+ } else {
+ ASSERT(!handle_.is_null());
+ hash = reinterpret_cast<intptr_t>(*handle_);
+ }
+
+ return hash;
}
#ifdef DEBUG
@@ -2537,15 +2620,32 @@ class HConstant: public HTemplateInstruction<0> {
virtual bool DataEquals(HValue* other) {
HConstant* other_constant = HConstant::cast(other);
- return handle().is_identical_to(other_constant->handle());
+ if (has_int32_value_) {
+ return other_constant->has_int32_value_ &&
+ int32_value_ == other_constant->int32_value_;
+ } else if (has_double_value_) {
+ return other_constant->has_double_value_ &&
+ BitCast<int64_t>(double_value_) ==
+ BitCast<int64_t>(other_constant->double_value_);
+ } else {
+ ASSERT(!handle_.is_null());
+ return !other_constant->handle_.is_null() &&
+ *handle_ == *other_constant->handle_;
+ }
}
private:
+ // If this is a numerical constant, handle_ either points to to the
+ // HeapObject the constant originated from or is null. If the
+ // constant is non-numeric, handle_ always points to a valid
+ // constant HeapObject.
Handle<Object> handle_;
- // The following two values represent the int32 and the double value of the
- // given constant if there is a lossless conversion between the constant
- // and the specific representation.
+ // We store the HConstant in the most specific form safely possible.
+ // The two flags, has_int32_value_ and has_double_value_ tell us if
+ // int32_value_ and double_value_ hold valid, safe representations
+ // of the constant. has_int32_value_ implies has_double_value_ but
+ // not the converse.
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
int32_t int32_value_;
@@ -2706,16 +2806,42 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
};
+enum BoundsCheckKeyMode {
+ DONT_ALLOW_SMI_KEY,
+ ALLOW_SMI_KEY
+};
+
+
class HBoundsCheck: public HTemplateInstruction<2> {
public:
- HBoundsCheck(HValue* index, HValue* length) {
+ HBoundsCheck(HValue* index, HValue* length,
+ BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY)
+ : key_mode_(key_mode) {
SetOperandAt(0, index);
SetOperandAt(1, length);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int arg_index) {
+ if (key_mode_ == DONT_ALLOW_SMI_KEY ||
+ !length()->representation().IsTagged()) {
+ return Representation::Integer32();
+ }
+ // If the index is tagged and isn't constant, then allow the length
+ // to be tagged, since it is usually already tagged from loading it out of
+ // the length field of a JSArray. This allows for direct comparison without
+ // untagging.
+ if (index()->representation().IsTagged() && !index()->IsConstant()) {
+ return Representation::Tagged();
+ }
+ // Also allow the length to be tagged if the index is constant, because
+ // it can be tagged to allow direct comparison.
+ if (index()->IsConstant() &&
+ index()->representation().IsInteger32() &&
+ arg_index == 1) {
+ return Representation::Tagged();
+ }
return Representation::Integer32();
}
@@ -2728,6 +2854,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+ BoundsCheckKeyMode key_mode_;
};
@@ -2782,8 +2909,11 @@ class HMathFloorOfDiv: public HBinaryOperation {
: HBinaryOperation(context, left, right) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
+ SetFlag(kCanOverflow);
}
+ virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
@@ -3388,6 +3518,47 @@ class HDiv: public HArithmeticBinaryOperation {
};
+class HMathMinMax: public HArithmeticBinaryOperation {
+ public:
+ enum Operation { kMathMin, kMathMax };
+
+ HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
+ : HArithmeticBinaryOperation(context, left, right),
+ operation_(op) { }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
+ }
+
+ virtual Representation InferredRepresentation() {
+ if (left()->representation().IsInteger32() &&
+ right()->representation().IsInteger32()) {
+ return Representation::Integer32();
+ }
+ return Representation::Double();
+ }
+
+ virtual bool IsCommutative() const { return true; }
+
+ Operation operation() { return operation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ return other->IsMathMinMax() &&
+ HMathMinMax::cast(other)->operation_ == operation_;
+ }
+
+ virtual Range* InferRange(Zone* zone);
+
+ private:
+ Operation operation_;
+};
+
+
class HBitwise: public HBitwiseBinaryOperation {
public:
HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
@@ -3484,11 +3655,11 @@ class HSar: public HBitwiseBinaryOperation {
class HOsrEntry: public HTemplateInstruction<0> {
public:
- explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
+ explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
SetGVNFlag(kChangesOsrEntries);
}
- int ast_id() const { return ast_id_; }
+ BailoutId ast_id() const { return ast_id_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -3497,7 +3668,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
private:
- int ast_id_;
+ BailoutId ast_id_;
};
@@ -3596,7 +3767,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
+ ASSERT_ALLOCATION_DISABLED;
return reinterpret_cast<intptr_t>(*cell_);
}
@@ -3983,10 +4154,11 @@ class ArrayInstructionInterface {
};
class HLoadKeyedFastElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj,
HValue* key,
+ HValue* dependency,
ElementsKind elements_kind = FAST_ELEMENTS)
: bit_field_(0) {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
@@ -3997,6 +4169,7 @@ class HLoadKeyedFastElement
}
SetOperandAt(0, obj);
SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
@@ -4004,6 +4177,7 @@ class HLoadKeyedFastElement
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
@@ -4020,9 +4194,9 @@ class HLoadKeyedFastElement
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
+ if (index == 0) return Representation::Tagged();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
virtual void PrintDataTo(StringStream* stream);
@@ -4052,17 +4226,19 @@ enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
class HLoadKeyedFastDoubleElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
+ HValue* dependency,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
set_representation(Representation::Double());
SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
@@ -4070,6 +4246,7 @@ class HLoadKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
@@ -4079,9 +4256,9 @@ class HLoadKeyedFastDoubleElement
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
+ if (index == 0) return Representation::Tagged();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
bool RequiresHoleCheck() {
@@ -4108,16 +4285,18 @@ class HLoadKeyedFastDoubleElement
class HLoadKeyedSpecializedArrayElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
+ HValue* dependency,
ElementsKind elements_kind)
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
set_representation(Representation::Double());
@@ -4133,15 +4312,15 @@ class HLoadKeyedSpecializedArrayElement
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32, but the base pointer
- // for the element load is a naked pointer.
- return index == 0
- ? Representation::External()
- : Representation::Integer32();
+ // The key is supposed to be Integer32.
+ if (index == 0) return Representation::External();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
@@ -4797,10 +4976,12 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
class HRegExpLiteral: public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
+ Handle<FixedArray> literals,
Handle<String> pattern,
Handle<String> flags,
int literal_index)
: HMaterializedLiteral<1>(literal_index, 0),
+ literals_(literals),
pattern_(pattern),
flags_(flags) {
SetOperandAt(0, context);
@@ -4808,6 +4989,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
}
HValue* context() { return OperandAt(0); }
+ Handle<FixedArray> literals() { return literals_; }
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
@@ -4819,6 +5001,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
private:
+ Handle<FixedArray> literals_;
Handle<String> pattern_;
Handle<String> flags_;
};
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 2c8a0f659c..75344bb513 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -133,10 +133,10 @@ HDeoptimize* HBasicBlock::CreateDeoptimize(
}
-HSimulate* HBasicBlock::CreateSimulate(int ast_id) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
- ASSERT(ast_id == AstNode::kNoNumber ||
+ ASSERT(ast_id.IsNone() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
@@ -166,7 +166,8 @@ void HBasicBlock::Finish(HControlInstruction* end) {
void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
- bool drop_extra = state != NULL && state->drop_extra();
+ bool drop_extra = state != NULL &&
+ state->inlining_kind() == DROP_EXTRA_ON_RETURN;
bool arguments_pushed = state != NULL && state->arguments_pushed();
if (block->IsInlineReturnTarget()) {
@@ -174,24 +175,24 @@ void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
last_environment_ = last_environment()->DiscardInlined(drop_extra);
}
- AddSimulate(AstNode::kNoNumber);
+ AddSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(block);
Finish(instr);
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
- HBasicBlock* target,
FunctionState* state) {
- bool drop_extra = state != NULL && state->drop_extra();
- bool arguments_pushed = state != NULL && state->arguments_pushed();
+ HBasicBlock* target = state->function_return();
+ bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
+ bool arguments_pushed = state->arguments_pushed();
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
last_environment_ = last_environment()->DiscardInlined(drop_extra);
last_environment()->Push(return_value);
- AddSimulate(AstNode::kNoNumber);
+ AddSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(target);
Finish(instr);
}
@@ -204,7 +205,7 @@ void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
}
-void HBasicBlock::SetJoinId(int ast_id) {
+void HBasicBlock::SetJoinId(BailoutId ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
for (int i = 0; i < length; i++) {
@@ -527,7 +528,8 @@ void HGraph::Verify(bool do_full_verify) const {
// Check that all join blocks have predecessors that end with an
// unconditional goto and agree on their environment node id.
if (block->predecessors()->length() >= 2) {
- int id = block->predecessors()->first()->last_environment()->ast_id();
+ BailoutId id =
+ block->predecessors()->first()->last_environment()->ast_id();
for (int k = 0; k < block->predecessors()->length(); k++) {
HBasicBlock* predecessor = block->predecessors()->at(k);
ASSERT(predecessor->end()->IsGoto());
@@ -568,9 +570,9 @@ void HGraph::Verify(bool do_full_verify) const {
HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
- Object* value) {
+ Handle<Object> value) {
if (!pointer->is_set()) {
- HConstant* constant = new(zone()) HConstant(Handle<Object>(value),
+ HConstant* constant = new(zone()) HConstant(value,
Representation::Tagged());
constant->InsertAfter(GetConstantUndefined());
pointer->set(constant);
@@ -579,34 +581,45 @@ HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
}
+HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
+ int32_t value) {
+ if (!pointer->is_set()) {
+ HConstant* constant =
+ new(zone()) HConstant(value, Representation::Integer32());
+ constant->InsertAfter(GetConstantUndefined());
+ pointer->set(constant);
+ }
+ return pointer->get();
+}
+
+
HConstant* HGraph::GetConstant1() {
- return GetConstant(&constant_1_, Smi::FromInt(1));
+ return GetConstantInt32(&constant_1_, 1);
}
HConstant* HGraph::GetConstantMinus1() {
- return GetConstant(&constant_minus1_, Smi::FromInt(-1));
+ return GetConstantInt32(&constant_minus1_, -1);
}
HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, isolate()->heap()->true_value());
+ return GetConstant(&constant_true_, isolate()->factory()->true_value());
}
HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, isolate()->heap()->false_value());
+ return GetConstant(&constant_false_, isolate()->factory()->false_value());
}
HConstant* HGraph::GetConstantHole() {
- return GetConstant(&constant_hole_, isolate()->heap()->the_hole_value());
+ return GetConstant(&constant_hole_, isolate()->factory()->the_hole_value());
}
HGraphBuilder::HGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle,
- Zone* zone)
+ TypeFeedbackOracle* oracle)
: function_state_(NULL),
initial_function_state_(this, info, oracle, NORMAL_RETURN),
ast_context_(NULL),
@@ -614,8 +627,8 @@ HGraphBuilder::HGraphBuilder(CompilationInfo* info,
graph_(NULL),
current_block_(NULL),
inlined_count_(0),
- globals_(10, zone),
- zone_(zone),
+ globals_(10, info->zone()),
+ zone_(info->zone()),
inline_bailout_(false) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
@@ -625,7 +638,7 @@ HGraphBuilder::HGraphBuilder(CompilationInfo* info,
HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
HBasicBlock* second,
- int join_id) {
+ BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
@@ -674,64 +687,27 @@ void HBasicBlock::FinishExit(HControlInstruction* instruction) {
}
-HGraph::HGraph(CompilationInfo* info, Zone* zone)
+HGraph::HGraph(CompilationInfo* info)
: isolate_(info->isolate()),
next_block_id_(0),
entry_block_(NULL),
- blocks_(8, zone),
- values_(16, zone),
+ blocks_(8, info->zone()),
+ values_(16, info->zone()),
phi_list_(NULL),
- zone_(zone),
- is_recursive_(false) {
+ uint32_instructions_(NULL),
+ info_(info),
+ zone_(info->zone()),
+ is_recursive_(false),
+ use_optimistic_licm_(false),
+ type_change_checksum_(0) {
start_environment_ =
- new(zone) HEnvironment(NULL, info->scope(), info->closure(), zone);
- start_environment_->set_ast_id(AstNode::kFunctionEntryId);
+ new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
}
-Handle<Code> HGraph::Compile(CompilationInfo* info, Zone* zone) {
- int values = GetMaximumValueID();
- if (values > LUnallocated::kMaxVirtualRegisters) {
- if (FLAG_trace_bailout) {
- PrintF("Not enough virtual registers for (values).\n");
- }
- return Handle<Code>::null();
- }
- LAllocator allocator(values, this);
- LChunkBuilder builder(info, this, &allocator);
- LChunk* chunk = builder.Build();
- if (chunk == NULL) return Handle<Code>::null();
-
- if (!allocator.Allocate(chunk)) {
- if (FLAG_trace_bailout) {
- PrintF("Not enough virtual registers (regalloc).\n");
- }
- return Handle<Code>::null();
- }
-
- MacroAssembler assembler(info->isolate(), NULL, 0);
- LCodeGen generator(chunk, &assembler, info, zone);
-
- chunk->MarkEmptyBlocks();
-
- if (generator.GenerateCode()) {
- if (FLAG_trace_codegen) {
- PrintF("Crankshaft Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
- Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
- Handle<Code> code =
- CodeGenerator::MakeCodeEpilogue(&assembler, flags, info);
- generator.FinishCode(code);
- CodeGenerator::PrintCode(code, info);
- return code;
- }
- return Handle<Code>::null();
-}
-
-
HBasicBlock* HGraph::CreateBasicBlock() {
HBasicBlock* result = new(zone()) HBasicBlock(this);
blocks_.Add(result, zone());
@@ -752,66 +728,319 @@ void HGraph::Canonicalize() {
}
}
+// Block ordering was implemented with two mutually recursive methods,
+// HGraph::Postorder and HGraph::PostorderLoopBlocks.
+// The recursion could lead to stack overflow so the algorithm has been
+// implemented iteratively.
+// At a high level the algorithm looks like this:
+//
+// Postorder(block, loop_header) : {
+// if (block has already been visited or is of another loop) return;
+// mark block as visited;
+// if (block is a loop header) {
+// VisitLoopMembers(block, loop_header);
+// VisitSuccessorsOfLoopHeader(block);
+// } else {
+// VisitSuccessors(block)
+// }
+// put block in result list;
+// }
+//
+// VisitLoopMembers(block, outer_loop_header) {
+// foreach (block b in block loop members) {
+// VisitSuccessorsOfLoopMember(b, outer_loop_header);
+// if (b is loop header) VisitLoopMembers(b);
+// }
+// }
+//
+// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
+// foreach (block b in block successors) Postorder(b, outer_loop_header)
+// }
+//
+// VisitSuccessorsOfLoopHeader(block) {
+// foreach (block b in block successors) Postorder(b, block)
+// }
+//
+// VisitSuccessors(block, loop_header) {
+// foreach (block b in block successors) Postorder(b, loop_header)
+// }
+//
+// The ordering is started calling Postorder(entry, NULL).
+//
+// Each instance of PostorderProcessor represents the "stack frame" of the
+// recursion, and particularly keeps the state of the loop (iteration) of the
+// "Visit..." function it represents.
+// To recycle memory we keep all the frames in a double linked list but
+// this means that we cannot use constructors to initialize the frames.
+//
+class PostorderProcessor : public ZoneObject {
+ public:
+ // Back link (towards the stack bottom).
+ PostorderProcessor* parent() {return father_; }
+ // Forward link (towards the stack top).
+ PostorderProcessor* child() {return child_; }
+ HBasicBlock* block() { return block_; }
+ HLoopInformation* loop() { return loop_; }
+ HBasicBlock* loop_header() { return loop_header_; }
-void HGraph::OrderBlocks() {
- HPhase phase("H_Block ordering");
- BitVector visited(blocks_.length(), zone());
+ static PostorderProcessor* CreateEntryProcessor(Zone* zone,
+ HBasicBlock* block,
+ BitVector* visited) {
+ PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
+ return result->SetupSuccessors(zone, block, NULL, visited);
+ }
- ZoneList<HBasicBlock*> reverse_result(8, zone());
- HBasicBlock* start = blocks_[0];
- Postorder(start, &visited, &reverse_result, NULL);
+ PostorderProcessor* PerformStep(Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ PostorderProcessor* next =
+ PerformNonBacktrackingStep(zone, visited, order);
+ if (next != NULL) {
+ return next;
+ } else {
+ return Backtrack(zone, visited, order);
+ }
+ }
- blocks_.Rewind(0);
- int index = 0;
- for (int i = reverse_result.length() - 1; i >= 0; --i) {
- HBasicBlock* b = reverse_result[i];
- blocks_.Add(b, zone());
- b->set_block_id(index++);
+ private:
+ explicit PostorderProcessor(PostorderProcessor* father)
+ : father_(father), child_(NULL), successor_iterator(NULL) { }
+
+ // Each enum value states the cycle whose state is kept by this instance.
+ enum LoopKind {
+ NONE,
+ SUCCESSORS,
+ SUCCESSORS_OF_LOOP_HEADER,
+ LOOP_MEMBERS,
+ SUCCESSORS_OF_LOOP_MEMBER
+ };
+
+ // Each "Setup..." method is like a constructor for a cycle state.
+ PostorderProcessor* SetupSuccessors(Zone* zone,
+ HBasicBlock* block,
+ HBasicBlock* loop_header,
+ BitVector* visited) {
+ if (block == NULL || visited->Contains(block->block_id()) ||
+ block->parent_loop_header() != loop_header) {
+ kind_ = NONE;
+ block_ = NULL;
+ loop_ = NULL;
+ loop_header_ = NULL;
+ return this;
+ } else {
+ block_ = block;
+ loop_ = NULL;
+ visited->Add(block->block_id());
+
+ if (block->IsLoopHeader()) {
+ kind_ = SUCCESSORS_OF_LOOP_HEADER;
+ loop_header_ = block;
+ InitializeSuccessors();
+ PostorderProcessor* result = Push(zone);
+ return result->SetupLoopMembers(zone, block, block->loop_information(),
+ loop_header);
+ } else {
+ ASSERT(block->IsFinished());
+ kind_ = SUCCESSORS;
+ loop_header_ = loop_header;
+ InitializeSuccessors();
+ return this;
+ }
+ }
}
-}
+ PostorderProcessor* SetupLoopMembers(Zone* zone,
+ HBasicBlock* block,
+ HLoopInformation* loop,
+ HBasicBlock* loop_header) {
+ kind_ = LOOP_MEMBERS;
+ block_ = block;
+ loop_ = loop;
+ loop_header_ = loop_header;
+ InitializeLoopMembers();
+ return this;
+ }
-void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header) {
- for (int i = 0; i < loop->blocks()->length(); ++i) {
- HBasicBlock* b = loop->blocks()->at(i);
- for (HSuccessorIterator it(b->end()); !it.Done(); it.Advance()) {
- Postorder(it.Current(), visited, order, loop_header);
+ PostorderProcessor* SetupSuccessorsOfLoopMember(
+ HBasicBlock* block,
+ HLoopInformation* loop,
+ HBasicBlock* loop_header) {
+ kind_ = SUCCESSORS_OF_LOOP_MEMBER;
+ block_ = block;
+ loop_ = loop;
+ loop_header_ = loop_header;
+ InitializeSuccessors();
+ return this;
+ }
+
+ // This method "allocates" a new stack frame.
+ PostorderProcessor* Push(Zone* zone) {
+ if (child_ == NULL) {
+ child_ = new(zone) PostorderProcessor(this);
+ }
+ return child_;
+ }
+
+ void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
+ ASSERT(block_->end()->FirstSuccessor() == NULL ||
+ order->Contains(block_->end()->FirstSuccessor()) ||
+ block_->end()->FirstSuccessor()->IsLoopHeader());
+ ASSERT(block_->end()->SecondSuccessor() == NULL ||
+ order->Contains(block_->end()->SecondSuccessor()) ||
+ block_->end()->SecondSuccessor()->IsLoopHeader());
+ order->Add(block_, zone);
+ }
+
+ // This method is the basic block to walk up the stack.
+ PostorderProcessor* Pop(Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ switch (kind_) {
+ case SUCCESSORS:
+ case SUCCESSORS_OF_LOOP_HEADER:
+ ClosePostorder(order, zone);
+ return father_;
+ case LOOP_MEMBERS:
+ return father_;
+ case SUCCESSORS_OF_LOOP_MEMBER:
+ if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
+ // In this case we need to perform a LOOP_MEMBERS cycle so we
+ // initialize it and return this instead of father.
+ return SetupLoopMembers(zone, block(),
+ block()->loop_information(), loop_header_);
+ } else {
+ return father_;
+ }
+ case NONE:
+ return father_;
}
- if (b->IsLoopHeader() && b != loop->loop_header()) {
- PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
+ UNREACHABLE();
+ return NULL;
+ }
+
+ // Walks up the stack.
+ PostorderProcessor* Backtrack(Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ PostorderProcessor* parent = Pop(zone, visited, order);
+ while (parent != NULL) {
+ PostorderProcessor* next =
+ parent->PerformNonBacktrackingStep(zone, visited, order);
+ if (next != NULL) {
+ return next;
+ } else {
+ parent = parent->Pop(zone, visited, order);
+ }
}
+ return NULL;
}
-}
+ PostorderProcessor* PerformNonBacktrackingStep(
+ Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ HBasicBlock* next_block;
+ switch (kind_) {
+ case SUCCESSORS:
+ next_block = AdvanceSuccessors();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessors(zone, next_block,
+ loop_header_, visited);
+ }
+ break;
+ case SUCCESSORS_OF_LOOP_HEADER:
+ next_block = AdvanceSuccessors();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessors(zone, next_block,
+ block(), visited);
+ }
+ break;
+ case LOOP_MEMBERS:
+ next_block = AdvanceLoopMembers();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessorsOfLoopMember(next_block,
+ loop_, loop_header_);
+ }
+ break;
+ case SUCCESSORS_OF_LOOP_MEMBER:
+ next_block = AdvanceSuccessors();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessors(zone, next_block,
+ loop_header_, visited);
+ }
+ break;
+ case NONE:
+ return NULL;
+ }
+ return NULL;
+ }
+
+ // The following two methods implement a "foreach b in successors" cycle.
+ void InitializeSuccessors() {
+ loop_index = 0;
+ loop_length = 0;
+ successor_iterator = HSuccessorIterator(block_->end());
+ }
-void HGraph::Postorder(HBasicBlock* block,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header) {
- if (block == NULL || visited->Contains(block->block_id())) return;
- if (block->parent_loop_header() != loop_header) return;
- visited->Add(block->block_id());
- if (block->IsLoopHeader()) {
- PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- Postorder(it.Current(), visited, order, block);
+ HBasicBlock* AdvanceSuccessors() {
+ if (!successor_iterator.Done()) {
+ HBasicBlock* result = successor_iterator.Current();
+ successor_iterator.Advance();
+ return result;
}
- } else {
- ASSERT(block->IsFinished());
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- Postorder(it.Current(), visited, order, loop_header);
+ return NULL;
+ }
+
+ // The following two methods implement a "foreach b in loop members" cycle.
+ void InitializeLoopMembers() {
+ loop_index = 0;
+ loop_length = loop_->blocks()->length();
+ }
+
+ HBasicBlock* AdvanceLoopMembers() {
+ if (loop_index < loop_length) {
+ HBasicBlock* result = loop_->blocks()->at(loop_index);
+ loop_index++;
+ return result;
+ } else {
+ return NULL;
}
}
- ASSERT(block->end()->FirstSuccessor() == NULL ||
- order->Contains(block->end()->FirstSuccessor()) ||
- block->end()->FirstSuccessor()->IsLoopHeader());
- ASSERT(block->end()->SecondSuccessor() == NULL ||
- order->Contains(block->end()->SecondSuccessor()) ||
- block->end()->SecondSuccessor()->IsLoopHeader());
- order->Add(block, zone());
+
+ LoopKind kind_;
+ PostorderProcessor* father_;
+ PostorderProcessor* child_;
+ HLoopInformation* loop_;
+ HBasicBlock* block_;
+ HBasicBlock* loop_header_;
+ int loop_index;
+ int loop_length;
+ HSuccessorIterator successor_iterator;
+};
+
+
+void HGraph::OrderBlocks() {
+ HPhase phase("H_Block ordering");
+ BitVector visited(blocks_.length(), zone());
+
+ ZoneList<HBasicBlock*> reverse_result(8, zone());
+ HBasicBlock* start = blocks_[0];
+ PostorderProcessor* postorder =
+ PostorderProcessor::CreateEntryProcessor(zone(), start, &visited);
+ while (postorder != NULL) {
+ postorder = postorder->PerformStep(zone(), &visited, &reverse_result);
+ }
+ blocks_.Rewind(0);
+ int index = 0;
+ for (int i = reverse_result.length() - 1; i >= 0; --i) {
+ HBasicBlock* b = reverse_result[i];
+ blocks_.Add(b, zone());
+ b->set_block_id(index++);
+ }
}
@@ -1487,15 +1716,15 @@ class HGlobalValueNumberer BASE_EMBEDDED {
block_side_effects_(graph->blocks()->length(), graph->zone()),
loop_side_effects_(graph->blocks()->length(), graph->zone()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
- ASSERT(info->isolate()->heap()->allow_allocation(false));
+#ifdef DEBUG
+ ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
+ !info->isolate()->heap()->IsAllocationAllowed());
+#endif
block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
graph_->zone());
loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
graph_->zone());
}
- ~HGlobalValueNumberer() {
- ASSERT(!info_->isolate()->heap()->allow_allocation(true));
- }
// Returns true if values with side effects are removed.
bool Analyze();
@@ -1673,6 +1902,8 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+ TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
+ graph_->use_optimistic_licm() ? "yes" : "no");
for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph_->blocks()->at(i);
if (block->IsLoopHeader()) {
@@ -1716,6 +1947,9 @@ void HGlobalValueNumberer::ProcessLoopBlock(
*GetGVNFlagsString(instr->gvn_flags()),
*GetGVNFlagsString(loop_kills));
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ if (can_hoist && !graph()->use_optimistic_licm()) {
+ can_hoist = block->IsLoopSuccessorDominator();
+ }
if (instr->IsTransitionElementsKind()) {
// It's possible to hoist transitions out of a loop as long as the
// hoisting wouldn't move the transition past an instruction that has a
@@ -1803,7 +2037,7 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
+ return info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count;
}
@@ -2331,8 +2565,8 @@ void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
break;
}
- // For multiplication and division, we must propagate to the left and
- // the right side.
+ // For multiplication, division, and Math.min/max(), we must propagate
+ // to the left and the right side.
if (current->IsMul()) {
HMul* mul = HMul::cast(current);
mul->EnsureAndPropagateNotMinusZero(visited);
@@ -2343,6 +2577,11 @@ void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
div->EnsureAndPropagateNotMinusZero(visited);
PropagateMinusZeroChecks(div->left(), visited);
PropagateMinusZeroChecks(div->right(), visited);
+ } else if (current->IsMathMinMax()) {
+ HMathMinMax* minmax = HMathMinMax::cast(current);
+ visited->Add(minmax->id());
+ PropagateMinusZeroChecks(minmax->left(), visited);
+ PropagateMinusZeroChecks(minmax->right(), visited);
}
current = current->EnsureAndPropagateNotMinusZero(visited);
@@ -2492,6 +2731,228 @@ void HGraph::MarkDeoptimizeOnUndefined() {
}
+// Discover instructions that can be marked with kUint32 flag allowing
+// them to produce full range uint32 values.
+class Uint32Analysis BASE_EMBEDDED {
+ public:
+ explicit Uint32Analysis(Zone* zone) : zone_(zone), phis_(4, zone) { }
+
+ void Analyze(HInstruction* current);
+
+ void UnmarkUnsafePhis();
+
+ private:
+ bool IsSafeUint32Use(HValue* val, HValue* use);
+ bool Uint32UsesAreSafe(HValue* uint32val);
+ bool CheckPhiOperands(HPhi* phi);
+ void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist);
+
+ Zone* zone_;
+ ZoneList<HPhi*> phis_;
+};
+
+
+bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) {
+ // Operations that operatate on bits are safe.
+ if (use->IsBitwise() ||
+ use->IsShl() ||
+ use->IsSar() ||
+ use->IsShr() ||
+ use->IsBitNot()) {
+ return true;
+ } else if (use->IsChange() || use->IsSimulate()) {
+ // Conversions and deoptimization have special support for unt32.
+ return true;
+ } else if (use->IsStoreKeyedSpecializedArrayElement()) {
+ // Storing a value into an external integer array is a bit level operation.
+ HStoreKeyedSpecializedArrayElement* store =
+ HStoreKeyedSpecializedArrayElement::cast(use);
+
+ if (store->value() == val) {
+ // Clamping or a conversion to double should have beed inserted.
+ ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+// Iterate over all uses and verify that they are uint32 safe: either don't
+// distinguish between int32 and uint32 due to their bitwise nature or
+// have special support for uint32 values.
+// Encountered phis are optimisitically treated as safe uint32 uses,
+// marked with kUint32 flag and collected in the phis_ list. A separate
+// path will be performed later by UnmarkUnsafePhis to clear kUint32 from
+// phis that are not actually uint32-safe (it requries fix point iteration).
+bool Uint32Analysis::Uint32UsesAreSafe(HValue* uint32val) {
+ bool collect_phi_uses = false;
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ if (use->IsPhi()) {
+ if (!use->CheckFlag(HInstruction::kUint32)) {
+ // There is a phi use of this value from a phis that is not yet
+ // collected in phis_ array. Separate pass is required.
+ collect_phi_uses = true;
+ }
+
+ // Optimistically treat phis as uint32 safe.
+ continue;
+ }
+
+ if (!IsSafeUint32Use(uint32val, use)) {
+ return false;
+ }
+ }
+
+ if (collect_phi_uses) {
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ // There is a phi use of this value from a phis that is not yet
+ // collected in phis_ array. Separate pass is required.
+ if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
+ use->SetFlag(HInstruction::kUint32);
+ phis_.Add(HPhi::cast(use), zone_);
+ }
+ }
+ }
+
+ return true;
+}
+
+
+// Analyze instruction and mark it with kUint32 if all its uses are uint32
+// safe.
+void Uint32Analysis::Analyze(HInstruction* current) {
+ if (Uint32UsesAreSafe(current)) current->SetFlag(HInstruction::kUint32);
+}
+
+
+// Check if all operands to the given phi are marked with kUint32 flag.
+bool Uint32Analysis::CheckPhiOperands(HPhi* phi) {
+ if (!phi->CheckFlag(HInstruction::kUint32)) {
+ // This phi is not uint32 safe. No need to check operands.
+ return false;
+ }
+
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (!operand->CheckFlag(HInstruction::kUint32)) {
+ // Lazyly mark constants that fit into uint32 range with kUint32 flag.
+ if (operand->IsConstant() &&
+ HConstant::cast(operand)->IsUint32()) {
+ operand->SetFlag(HInstruction::kUint32);
+ continue;
+ }
+
+ // This phi is not safe, some operands are not uint32 values.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+// Remove kUint32 flag from the phi itself and its operands. If any operand
+// was a phi marked with kUint32 place it into a worklist for
+// transitive clearing of kUint32 flag.
+void Uint32Analysis::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
+ phi->ClearFlag(HInstruction::kUint32);
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (operand->CheckFlag(HInstruction::kUint32)) {
+ operand->ClearFlag(HInstruction::kUint32);
+ if (operand->IsPhi()) {
+ worklist->Add(HPhi::cast(operand), zone_);
+ }
+ }
+ }
+}
+
+
+void Uint32Analysis::UnmarkUnsafePhis() {
+ // No phis were collected. Nothing to do.
+ if (phis_.length() == 0) return;
+
+ // Worklist used to transitively clear kUint32 from phis that
+ // are used as arguments to other phis.
+ ZoneList<HPhi*> worklist(phis_.length(), zone_);
+
+ // Phi can be used as a uint32 value if and only if
+ // all its operands are uint32 values and all its
+ // uses are uint32 safe.
+
+ // Iterate over collected phis and unmark those that
+ // are unsafe. When unmarking phi unmark its operands
+ // and add it to the worklist if it is a phi as well.
+ // Phis that are still marked as safe are shifted down
+ // so that all safe phis form a prefix of the phis_ array.
+ int phi_count = 0;
+ for (int i = 0; i < phis_.length(); i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
+ phis_[phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+
+ // Now phis array contains only those phis that have safe
+ // non-phi uses. Start transitively clearing kUint32 flag
+ // from phi operands of discovered non-safe phies until
+ // only safe phies are left.
+ while (!worklist.is_empty()) {
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ UnmarkPhi(phi, &worklist);
+ }
+
+ // Check if any operands to safe phies were unmarked
+ // turning a safe phi into unsafe. The same value
+ // can flow into several phis.
+ int new_phi_count = 0;
+ for (int i = 0; i < phi_count; i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi)) {
+ phis_[new_phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+ phi_count = new_phi_count;
+ }
+}
+
+
+void HGraph::ComputeSafeUint32Operations() {
+ if (!FLAG_opt_safe_uint32_operations || uint32_instructions_ == NULL) {
+ return;
+ }
+
+ Uint32Analysis analysis(zone());
+ for (int i = 0; i < uint32_instructions_->length(); ++i) {
+ HInstruction* current = uint32_instructions_->at(i);
+ if (current->IsLinked() && current->representation().IsInteger32()) {
+ analysis.Analyze(current);
+ }
+ }
+
+ // Some phis might have been optimistically marked with kUint32 flag.
+ // Remove this flag from those phis that are unsafe and propagate
+ // this information transitively potentially clearing kUint32 flag
+ // from some non-phi operations that are used as operands to unsafe phis.
+ analysis.UnmarkUnsafePhis();
+}
+
+
void HGraph::ComputeMinusZeroChecks() {
BitVector visited(GetMaximumValueID(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
@@ -2521,12 +2982,12 @@ void HGraph::ComputeMinusZeroChecks() {
FunctionState::FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
- ReturnHandlingFlag return_handling)
+ InliningKind inlining_kind)
: owner_(owner),
compilation_info_(info),
oracle_(oracle),
call_context_(NULL),
- return_handling_(return_handling),
+ inlining_kind_(inlining_kind),
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
@@ -2539,10 +3000,13 @@ FunctionState::FunctionState(HGraphBuilder* owner,
HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
if_true->MarkAsInlineReturnTarget();
if_false->MarkAsInlineReturnTarget();
- Expression* cond = TestContext::cast(owner->ast_context())->condition();
+ TestContext* outer_test_context = TestContext::cast(owner->ast_context());
+ Expression* cond = outer_test_context->condition();
+ TypeFeedbackOracle* outer_oracle = outer_test_context->oracle();
// The AstContext constructor pushed on the context stack. This newed
// instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ = new TestContext(owner, cond, if_true, if_false);
+ test_context_ =
+ new TestContext(owner, cond, outer_oracle, if_true, if_false);
} else {
function_return_ = owner->graph()->CreateBasicBlock();
function_return()->MarkAsInlineReturnTarget();
@@ -2618,14 +3082,15 @@ void TestContext::ReturnValue(HValue* value) {
}
-void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
-void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+void EffectContext::ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
@@ -2637,7 +3102,7 @@ void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
}
-void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout("bad value context for arguments object value");
@@ -2648,7 +3113,7 @@ void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
-void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout("bad value context for arguments object value");
@@ -2668,7 +3133,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
}
-void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
HGraphBuilder* builder = owner();
builder->AddInstruction(instr);
@@ -2683,7 +3148,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
-void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
@@ -2707,8 +3172,8 @@ void TestContext::BuildBranch(HValue* value) {
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- unsigned test_id = condition()->test_id();
- ToBooleanStub::Types expected(builder->oracle()->ToBooleanTypes(test_id));
+ TypeFeedbackId test_id = condition()->test_id();
+ ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
@@ -2734,11 +3199,7 @@ void TestContext::BuildBranch(HValue* value) {
void HGraphBuilder::Bailout(const char* reason) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *name, reason);
- }
+ info()->set_bailout_reason(reason);
SetStackOverflow();
}
@@ -2766,17 +3227,14 @@ void HGraphBuilder::VisitForTypeOf(Expression* expr) {
void HGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, expr, true_block, false_block);
+ TestContext for_test(this, expr, oracle(), true_block, false_block);
Visit(expr);
}
-HValue* HGraphBuilder::VisitArgument(Expression* expr) {
- VisitForValue(expr);
- if (HasStackOverflow() || current_block() == NULL) return NULL;
- HValue* value = Pop();
- Push(AddInstruction(new(zone()) HPushArgument(value)));
- return value;
+void HGraphBuilder::VisitArgument(Expression* expr) {
+ CHECK_ALIVE(VisitForValue(expr));
+ Push(AddInstruction(new(zone()) HPushArgument(Pop())));
}
@@ -2795,7 +3253,7 @@ void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info(), zone());
+ graph_ = new(zone()) HGraph(info());
if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
{
@@ -2831,7 +3289,7 @@ HGraph* HGraphBuilder::CreateGraph() {
HEnvironment* initial_env = environment()->CopyWithoutHistory();
HBasicBlock* body_entry = CreateBasicBlock(initial_env);
current_block()->Goto(body_entry);
- body_entry->SetJoinId(AstNode::kFunctionEntryId);
+ body_entry->SetJoinId(BailoutId::FunctionEntry());
set_current_block(body_entry);
// Handle implicit declaration of the function name in named function
@@ -2840,7 +3298,7 @@ HGraph* HGraphBuilder::CreateGraph() {
VisitVariableDeclaration(scope->function());
}
VisitDeclarations(scope->declarations());
- AddSimulate(AstNode::kDeclarationsId);
+ AddSimulate(BailoutId::Declarations());
HValue* context = environment()->LookupContext();
AddInstruction(
@@ -2854,50 +3312,78 @@ HGraph* HGraphBuilder::CreateGraph() {
current_block()->FinishExit(instr);
set_current_block(NULL);
}
+
+ // If the checksum of the number of type info changes is the same as the
+ // last time this function was compiled, then this recompile is likely not
+ // due to missing/inadequate type feedback, but rather too aggressive
+ // optimization. Disable optimistic LICM in that case.
+ Handle<Code> unoptimized_code(info()->shared_info()->code());
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
+ Handle<TypeFeedbackInfo> type_info(
+ Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ int checksum = type_info->own_type_change_checksum();
+ int composite_checksum = graph()->update_type_change_checksum(checksum);
+ graph()->set_use_optimistic_licm(
+ !type_info->matches_inlined_type_change_checksum(composite_checksum));
+ type_info->set_inlined_type_change_checksum(composite_checksum);
}
- graph()->OrderBlocks();
- graph()->AssignDominators();
+ return graph();
+}
+
+bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
+ *bailout_reason = SmartArrayPointer<char>();
+ OrderBlocks();
+ AssignDominators();
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
- graph()->Verify(true);
+ Verify(true);
#endif
- graph()->PropagateDeoptimizingMark();
- if (!graph()->CheckConstPhiUses()) {
- Bailout("Unsupported phi use of const variable");
- return NULL;
+ PropagateDeoptimizingMark();
+ if (!CheckConstPhiUses()) {
+ *bailout_reason = SmartArrayPointer<char>(StrDup(
+ "Unsupported phi use of const variable"));
+ return false;
}
- graph()->EliminateRedundantPhis();
- if (!graph()->CheckArgumentsPhiUses()) {
- Bailout("Unsupported phi use of arguments");
- return NULL;
+ EliminateRedundantPhis();
+ if (!CheckArgumentsPhiUses()) {
+ *bailout_reason = SmartArrayPointer<char>(StrDup(
+ "Unsupported phi use of arguments"));
+ return false;
}
- if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
- graph()->CollectPhis();
+ if (FLAG_eliminate_dead_phis) EliminateUnreachablePhis();
+ CollectPhis();
- if (graph()->has_osr_loop_entry()) {
- const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
+ if (has_osr_loop_entry()) {
+ const ZoneList<HPhi*>* phis = osr_loop_entry()->phis();
for (int j = 0; j < phis->length(); j++) {
HPhi* phi = phis->at(j);
- graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
+ osr_values()->at(phi->merged_index())->set_incoming_value(phi);
}
}
- HInferRepresentation rep(graph());
+ HInferRepresentation rep(this);
rep.Analyze();
- graph()->MarkDeoptimizeOnUndefined();
- graph()->InsertRepresentationChanges();
+ MarkDeoptimizeOnUndefined();
+ InsertRepresentationChanges();
+
+ InitializeInferredTypes();
- graph()->InitializeInferredTypes();
- graph()->Canonicalize();
+ // Must be performed before canonicalization to ensure that Canonicalize
+ // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
+ // zero.
+ ComputeSafeUint32Operations();
+
+ Canonicalize();
// Perform common subexpression elimination and loop-invariant code motion.
if (FLAG_use_gvn) {
- HPhase phase("H_Global value numbering", graph());
- HGlobalValueNumberer gvn(graph(), info());
+ HPhase phase("H_Global value numbering", this);
+ HGlobalValueNumberer gvn(this, info());
bool removed_side_effects = gvn.Analyze();
// Trigger a second analysis pass to further eliminate duplicate values that
// could only be discovered by removing side-effect-generating instructions
@@ -2909,19 +3395,19 @@ HGraph* HGraphBuilder::CreateGraph() {
}
if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(graph());
+ HRangeAnalysis rangeAnalysis(this);
rangeAnalysis.Analyze();
}
- graph()->ComputeMinusZeroChecks();
+ ComputeMinusZeroChecks();
// Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(graph());
+ HStackCheckEliminator sce(this);
sce.Process();
- graph()->EliminateRedundantBoundsChecks();
- graph()->DehoistSimpleArrayIndexComputations();
+ EliminateRedundantBoundsChecks();
+ DehoistSimpleArrayIndexComputations();
- return graph();
+ return true;
}
@@ -3012,7 +3498,8 @@ class BoundsCheckBbData: public ZoneObject {
int32_t LowerOffset() const { return lower_offset_; }
int32_t UpperOffset() const { return upper_offset_; }
HBasicBlock* BasicBlock() const { return basic_block_; }
- HBoundsCheck* Check() const { return check_; }
+ HBoundsCheck* LowerCheck() const { return lower_check_; }
+ HBoundsCheck* UpperCheck() const { return upper_check_; }
BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
@@ -3020,76 +3507,85 @@ class BoundsCheckBbData: public ZoneObject {
return offset >= LowerOffset() && offset <= UpperOffset();
}
- // This method removes new_check and modifies the current check so that it
- // also "covers" what new_check covered.
- // The obvious precondition is that new_check follows Check() in the
- // same basic block, and that new_offset is not covered (otherwise we
- // could simply remove new_check).
- // As a consequence LowerOffset() or UpperOffset() change (the covered
+ bool HasSingleCheck() { return lower_check_ == upper_check_; }
+
+ // The goal of this method is to modify either upper_offset_ or
+ // lower_offset_ so that also new_offset is covered (the covered
// range grows).
//
- // In the general case the check covering the current range should be like
- // these two checks:
- // 0 <= Key()->IndexBase() + LowerOffset()
- // Key()->IndexBase() + UpperOffset() < Key()->Length()
- //
- // We can transform the second check like this:
- // Key()->IndexBase() + LowerOffset() <
- // Key()->Length() + (LowerOffset() - UpperOffset())
- // so we can handle both checks with a single unsigned comparison.
+ // The precondition is that new_check follows UpperCheck() and
+ // LowerCheck() in the same basic block, and that new_offset is not
+ // covered (otherwise we could simply remove new_check).
//
- // The bulk of this method changes Check()->index() and Check()->length()
- // replacing them with new HAdd instructions to perform the transformation
- // described above.
+ // If HasSingleCheck() is true then new_check is added as "second check"
+ // (either upper or lower; note that HasSingleCheck() becomes false).
+ // Otherwise one of the current checks is modified so that it also covers
+ // new_offset, and new_check is removed.
void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsInteger32());
+ bool keep_new_check = false;
if (new_offset > upper_offset_) {
upper_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ upper_check_ = new_check;
+ } else {
+ BuildOffsetAdd(upper_check_,
+ &added_upper_index_,
+ &added_upper_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ upper_check_->SetOperandAt(0, added_upper_index_);
+ }
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ lower_check_ = new_check;
+ } else {
+ BuildOffsetAdd(lower_check_,
+ &added_lower_index_,
+ &added_lower_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ lower_check_->SetOperandAt(0, added_lower_index_);
+ }
} else {
ASSERT(false);
}
- BuildOffsetAdd(&added_index_,
- &added_index_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- lower_offset_);
- Check()->SetOperandAt(0, added_index_);
- BuildOffsetAdd(&added_length_,
- &added_length_offset_,
- Key()->Length(),
- new_check->length()->representation(),
- lower_offset_ - upper_offset_);
- Check()->SetOperandAt(1, added_length_);
-
- new_check->DeleteAndReplaceWith(NULL);
+ if (!keep_new_check) {
+ new_check->DeleteAndReplaceWith(NULL);
+ }
}
void RemoveZeroOperations() {
- RemoveZeroAdd(&added_index_, &added_index_offset_);
- RemoveZeroAdd(&added_length_, &added_length_offset_);
+ RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
+ RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
}
BoundsCheckBbData(BoundsCheckKey* key,
int32_t lower_offset,
int32_t upper_offset,
HBasicBlock* bb,
- HBoundsCheck* check,
+ HBoundsCheck* lower_check,
+ HBoundsCheck* upper_check,
BoundsCheckBbData* next_in_bb,
BoundsCheckBbData* father_in_dt)
: key_(key),
lower_offset_(lower_offset),
upper_offset_(upper_offset),
basic_block_(bb),
- check_(check),
- added_index_offset_(NULL),
- added_index_(NULL),
- added_length_offset_(NULL),
- added_length_(NULL),
+ lower_check_(lower_check),
+ upper_check_(upper_check),
+ added_lower_index_(NULL),
+ added_lower_offset_(NULL),
+ added_upper_index_(NULL),
+ added_upper_offset_(NULL),
next_in_bb_(next_in_bb),
father_in_dt_(father_in_dt) { }
@@ -3098,29 +3594,30 @@ class BoundsCheckBbData: public ZoneObject {
int32_t lower_offset_;
int32_t upper_offset_;
HBasicBlock* basic_block_;
- HBoundsCheck* check_;
- HConstant* added_index_offset_;
- HAdd* added_index_;
- HConstant* added_length_offset_;
- HAdd* added_length_;
+ HBoundsCheck* lower_check_;
+ HBoundsCheck* upper_check_;
+ HAdd* added_lower_index_;
+ HConstant* added_lower_offset_;
+ HAdd* added_upper_index_;
+ HConstant* added_upper_offset_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
- void BuildOffsetAdd(HAdd** add,
+ void BuildOffsetAdd(HBoundsCheck* check,
+ HAdd** add,
HConstant** constant,
HValue* original_value,
Representation representation,
int32_t new_offset) {
HConstant* new_constant = new(BasicBlock()->zone())
- HConstant(Handle<Object>(Smi::FromInt(new_offset)),
- Representation::Integer32());
+ HConstant(new_offset, Representation::Integer32());
if (*add == NULL) {
- new_constant->InsertBefore(Check());
+ new_constant->InsertBefore(check);
*add = new(BasicBlock()->zone()) HAdd(NULL,
original_value,
new_constant);
(*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(Check());
+ (*add)->InsertBefore(check);
} else {
new_constant->InsertBefore(*add);
(*constant)->DeleteAndReplaceWith(new_constant);
@@ -3193,6 +3690,7 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
offset,
bb,
check,
+ check,
bb_data_list,
NULL);
*data_p = bb_data_list;
@@ -3211,7 +3709,8 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
new_lower_offset,
new_upper_offset,
bb,
- check,
+ data->LowerCheck(),
+ data->UpperCheck(),
bb_data_list,
data);
table->Insert(key, bb_data_list, zone());
@@ -3237,7 +3736,6 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
void HGraph::EliminateRedundantBoundsChecks() {
HPhase phase("H_Eliminate bounds checks", this);
- AssertNoAllocation no_gc;
BoundsCheckTable checks_table(zone());
EliminateRedundantBoundsChecks(entry_block(), &checks_table);
}
@@ -3337,7 +3835,7 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddSimulate(int ast_id) {
+void HGraphBuilder::AddSimulate(BailoutId ast_id) {
ASSERT(current_block() != NULL);
current_block()->AddSimulate(ast_id);
}
@@ -3581,28 +4079,29 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ FunctionState* state = function_state();
AstContext* context = call_context();
if (context == NULL) {
// Not an inlined return, so an actual one.
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
current_block()->FinishExit(new(zone()) HReturn(result));
- } else if (function_state()->is_construct()) {
- // Return from an inlined construct call. In a test context the return
- // value will always evaluate to true, in a value context the return value
- // needs to be a JSObject.
+ } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
+ // Return from an inlined construct call. In a test context the return value
+ // will always evaluate to true, in a value context the return value needs
+ // to be a JSObject.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), function_state());
+ current_block()->Goto(test->if_true(), state);
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), function_state());
+ current_block()->Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
- HValue* receiver = environment()->Lookup(0);
+ HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
new(zone()) HHasInstanceTypeAndBranch(return_value,
FIRST_SPEC_OBJECT_TYPE,
@@ -3612,31 +4111,36 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
current_block()->Finish(typecheck);
- if_spec_object->AddLeaveInlined(return_value,
- function_return(),
- function_state());
- not_spec_object->AddLeaveInlined(receiver,
- function_return(),
- function_state());
+ if_spec_object->AddLeaveInlined(return_value, state);
+ not_spec_object->AddLeaveInlined(receiver, state);
+ }
+ } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
+ // Return from an inlined setter call. The returned value is never used, the
+ // value of an assignment is always the value of the RHS of the assignment.
+ CHECK_ALIVE(VisitForEffect(stmt->expression()));
+ if (context->IsTest()) {
+ HValue* rhs = environment()->arguments_environment()->Lookup(1);
+ context->ReturnValue(rhs);
+ } else if (context->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(context->IsValue());
+ HValue* rhs = environment()->arguments_environment()->Lookup(1);
+ current_block()->AddLeaveInlined(rhs, state);
}
} else {
- // Return from an inlined function, visit the subexpression in the
+ // Return from a normal inlined function. Visit the subexpression in the
// expression context of the call.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
- VisitForControl(stmt->expression(),
- test->if_true(),
- test->if_false());
+ VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), function_state());
+ current_block()->Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
- HValue* return_value = Pop();
- current_block()->AddLeaveInlined(return_value,
- function_return(),
- function_state());
+ current_block()->AddLeaveInlined(Pop(), state);
}
}
set_current_block(NULL);
@@ -3711,7 +4215,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// 2. Build all the tests, with dangling true branches
- int default_id = AstNode::kNoNumber;
+ BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) {
@@ -3764,9 +4268,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HBasicBlock* last_block = current_block();
if (not_string_block != NULL) {
- int join_id = (default_id != AstNode::kNoNumber)
- ? default_id
- : stmt->ExitId();
+ BailoutId join_id = !default_id.IsNone() ? default_id : stmt->ExitId();
last_block = CreateJoin(last_block, not_string_block, join_id);
}
@@ -3856,7 +4358,7 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
non_osr_entry->Goto(loop_predecessor);
set_current_block(osr_entry);
- int osr_entry_id = statement->OsrEntryId();
+ BailoutId osr_entry_id = statement->OsrEntryId();
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
ZoneList<HUnknownOSRValue*>* osr_values =
@@ -4080,15 +4582,14 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
map,
DescriptorArray::kEnumCacheBridgeCacheIndex));
- HInstruction* array_length = AddInstruction(
- new(zone()) HFixedArrayBaseLength(array));
+ HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
HInstruction* start_index = AddInstruction(new(zone()) HConstant(
Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
Push(map);
Push(array);
- Push(array_length);
+ Push(enum_length);
Push(start_index);
HInstruction* index_cache = AddInstruction(
@@ -4128,7 +4629,8 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HValue* key = AddInstruction(
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
- environment()->ExpressionStackAt(0))); // Iteration index.
+ environment()->ExpressionStackAt(0), // Iteration index.
+ environment()->ExpressionStackAt(0)));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@@ -4283,8 +4785,7 @@ HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
}
Handle<GlobalObject> global(info()->global_object());
global->Lookup(*var->name(), lookup);
- if (!lookup->IsFound() ||
- lookup->type() != NORMAL ||
+ if (!lookup->IsNormal() ||
(is_store && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
@@ -4314,8 +4815,9 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Variable* variable = expr->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
- return Bailout("reference to global harmony declared variable");
+ if (IsLexicalVariableMode(variable->mode())) {
+ // TODO(rossberg): should this be an ASSERT?
+ return Bailout("reference to global lexical variable");
}
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
@@ -4360,9 +4862,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
if (value == graph()->GetConstantHole()) {
- ASSERT(variable->mode() == CONST ||
- variable->mode() == CONST_HARMONY ||
- variable->mode() == LET);
+ ASSERT(IsDeclaredVariableMode(variable->mode()) &&
+ variable->mode() != VAR);
return Bailout("reference to uninitialized variable");
}
return ast_context()->ReturnValue(value);
@@ -4394,9 +4895,12 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ Handle<JSFunction> closure = function_state()->compilation_info()->closure();
+ Handle<FixedArray> literals(closure->literals());
HValue* context = environment()->LookupContext();
HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
+ literals,
expr->pattern(),
expr->flags(),
expr->literal_index());
@@ -4404,6 +4908,86 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+static void LookupInPrototypes(Handle<Map> map,
+ Handle<String> name,
+ LookupResult* lookup) {
+ while (map->prototype()->IsJSObject()) {
+ Handle<JSObject> holder(JSObject::cast(map->prototype()));
+ if (!holder->HasFastProperties()) break;
+ map = Handle<Map>(holder->map());
+ map->LookupDescriptor(*holder, *name, lookup);
+ if (lookup->IsFound()) return;
+ }
+ lookup->NotFound();
+}
+
+
+// Tries to find a JavaScript accessor of the given name in the prototype chain
+// starting at the given map. Return true iff there is one, including the
+// corresponding AccessorPair plus its holder (which could be null when the
+// accessor is found directly in the given map).
+static bool LookupAccessorPair(Handle<Map> map,
+ Handle<String> name,
+ Handle<AccessorPair>* accessors,
+ Handle<JSObject>* holder) {
+ LookupResult lookup(map->GetIsolate());
+
+ // Check for a JavaScript accessor directly in the map.
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup.GetValueFromMap(*map));
+ if (!callback->IsAccessorPair()) return false;
+ *accessors = Handle<AccessorPair>::cast(callback);
+ *holder = Handle<JSObject>();
+ return true;
+ }
+
+ // Everything else, e.g. a field, can't be an accessor call.
+ if (lookup.IsFound()) return false;
+
+ // Check for a JavaScript accessor somewhere in the proto chain.
+ LookupInPrototypes(map, name, &lookup);
+ if (lookup.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup.GetValue());
+ if (!callback->IsAccessorPair()) return false;
+ *accessors = Handle<AccessorPair>::cast(callback);
+ *holder = Handle<JSObject>(lookup.holder());
+ return true;
+ }
+
+ // We haven't found a JavaScript accessor anywhere.
+ return false;
+}
+
+
+static bool LookupGetter(Handle<Map> map,
+ Handle<String> name,
+ Handle<JSFunction>* getter,
+ Handle<JSObject>* holder) {
+ Handle<AccessorPair> accessors;
+ if (LookupAccessorPair(map, name, &accessors, holder) &&
+ accessors->getter()->IsJSFunction()) {
+ *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
+ return true;
+ }
+ return false;
+}
+
+
+static bool LookupSetter(Handle<Map> map,
+ Handle<String> name,
+ Handle<JSFunction>* setter,
+ Handle<JSObject>* holder) {
+ Handle<AccessorPair> accessors;
+ if (LookupAccessorPair(map, name, &accessors, holder) &&
+ accessors->setter()->IsJSFunction()) {
+ *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
+ return true;
+ }
+ return false;
+}
+
+
// Determines whether the given array or object literal boilerplate satisfies
// all limits to be considered for fast deep-copying and computes the total
// size of all objects that are part of the graph.
@@ -4521,8 +5105,23 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
+ Handle<Map> map = property->GetReceiverType();
+ Handle<String> name = property->key()->AsPropertyName();
HInstruction* store;
- CHECK_ALIVE(store = BuildStoreNamed(literal, value, property));
+ if (map.is_null()) {
+ // If we don't know the monomorphic type, do a generic store.
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
+ } else {
+#if DEBUG
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ ASSERT(!LookupSetter(map, name, &setter, &holder));
+#endif
+ CHECK_ALIVE(store = BuildStoreNamedMonomorphic(literal,
+ name,
+ value,
+ map));
+ }
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(key->id());
} else {
@@ -4619,7 +5218,9 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
- elements = new(zone()) HLoadElements(literal);
+ // Pass in literal as dummy depedency, since the receiver always has
+ // elements.
+ elements = new(zone()) HLoadElements(literal, literal);
AddInstruction(elements);
HValue* key = AddInstruction(
@@ -4662,10 +5263,17 @@ static bool ComputeLoadStoreField(Handle<Map> type,
Handle<String> name,
LookupResult* lookup,
bool is_store) {
- type->LookupInDescriptors(NULL, *name, lookup);
- if (!lookup->IsFound()) return false;
- if (lookup->type() == FIELD) return true;
- return is_store && (lookup->type() == MAP_TRANSITION) &&
+ // If we directly find a field, the access can be inlined.
+ type->LookupDescriptor(NULL, *name, lookup);
+ if (lookup->IsField()) return true;
+
+ // For a load, we are out of luck if there is no such field.
+ if (!is_store) return false;
+
+ // 2nd chance: A store into a non-existent field can still be inlined if we
+ // have a matching transition and some room left in the object.
+ type->LookupTransition(NULL, *name, lookup);
+ return lookup->IsTransitionToField(*type) &&
(type->unused_property_fields() > 0);
}
@@ -4673,8 +5281,8 @@ static bool ComputeLoadStoreField(Handle<Map> type,
static int ComputeLoadStoreFieldIndex(Handle<Map> type,
Handle<String> name,
LookupResult* lookup) {
- ASSERT(lookup->type() == FIELD || lookup->type() == MAP_TRANSITION);
- if (lookup->type() == FIELD) {
+ ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
+ if (lookup->IsField()) {
return lookup->GetLocalFieldIndexFromMap(*type);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
@@ -4686,20 +5294,20 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
- Handle<Map> type,
+ Handle<Map> map,
LookupResult* lookup,
bool smi_and_map_check) {
ASSERT(lookup->IsFound());
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, type, zone()));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
// If the property does not exist yet, we have to check that it wasn't made
// readonly or turned into a setter by some meanwhile modifications on the
// prototype chain.
- if (!lookup->IsProperty() && type->prototype()->IsJSReceiver()) {
- Object* proto = type->prototype();
+ if (!lookup->IsProperty() && map->prototype()->IsJSReceiver()) {
+ Object* proto = map->prototype();
// First check that the prototype chain isn't affected already.
LookupResult proto_result(isolate());
proto->Lookup(*name, &proto_result);
@@ -4718,24 +5326,24 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
}
ASSERT(proto->IsJSObject());
AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(type->prototype())),
+ Handle<JSObject>(JSObject::cast(map->prototype())),
Handle<JSObject>(JSObject::cast(proto))));
}
- int index = ComputeLoadStoreFieldIndex(type, name, lookup);
+ int index = ComputeLoadStoreFieldIndex(map, name, lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
- offset += type->instance_size();
+ offset += map->instance_size();
} else {
offset += FixedArray::kHeaderSize;
}
HStoreNamedField* instr =
new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
- if (lookup->type() == MAP_TRANSITION) {
- Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
+ if (lookup->IsTransitionToField(*map)) {
+ Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
@@ -4758,44 +5366,31 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
}
-HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
+HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
HValue* value,
- ObjectLiteral::Property* prop) {
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
- LookupResult lookup(isolate());
- Handle<Map> type = prop->GetReceiverType();
- bool is_monomorphic = prop->IsMonomorphic() &&
- ComputeLoadStoreField(type, name, &lookup, true);
-
- return is_monomorphic
- ? BuildStoreNamedField(object, name, value, type, &lookup,
- true) // Needs smi and map check.
- : BuildStoreNamedGeneric(object, name, value);
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder) {
+ AddCheckConstantFunction(holder, object, map, true);
+ AddInstruction(new(zone()) HPushArgument(object));
+ AddInstruction(new(zone()) HPushArgument(value));
+ return new(zone()) HCallConstantFunction(setter, 2);
}
-HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
- HValue* value,
- Expression* expr) {
- Property* prop = (expr->AsProperty() != NULL)
- ? expr->AsProperty()
- : expr->AsAssignment()->target()->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
+HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map) {
+ // Handle a store to a known field.
LookupResult lookup(isolate());
- SmallMapList* types = expr->GetReceiverTypes();
- bool is_monomorphic = expr->IsMonomorphic() &&
- ComputeLoadStoreField(types->first(), name, &lookup, true);
+ if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ // true = needs smi and map check.
+ return BuildStoreNamedField(object, name, value, map, &lookup, true);
+ }
- return is_monomorphic
- ? BuildStoreNamedField(object, name, value, types->first(), &lookup,
- true) // Needs smi and map check.
- : BuildStoreNamedGeneric(object, name, value);
+ // No luck, do a generic store.
+ return BuildStoreNamedGeneric(object, name, value);
}
@@ -4835,10 +5430,11 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
// Use monomorphic load if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
+ AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* instr;
if (count == types->length() && is_monomorphic_field) {
AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
- instr = BuildLoadNamedField(object, expr, map, &lookup, false);
+ instr = BuildLoadNamedField(object, map, &lookup, false);
} else {
HValue* context = environment()->LookupContext();
instr = new(zone()) HLoadNamedFieldPolymorphic(context,
@@ -4935,36 +5531,63 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
expr->RecordTypeFeedback(oracle(), zone());
CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* value = NULL;
- HInstruction* instr = NULL;
-
if (prop->key()->IsPropertyName()) {
// Named store.
CHECK_ALIVE(VisitForValue(expr->value()));
- value = Pop();
- HValue* object = Pop();
+ HValue* value = environment()->ExpressionStackAt(0);
+ HValue* object = environment()->ExpressionStackAt(1);
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->handle());
ASSERT(!name.is_null());
+ HInstruction* instr = NULL;
SmallMapList* types = expr->GetReceiverTypes();
- if (expr->IsMonomorphic()) {
- CHECK_ALIVE(instr = BuildStoreNamed(object, value, expr));
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> map;
+ if (monomorphic) {
+ map = types->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ AddCheckConstantFunction(holder, object, map, true);
+ if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) {
+ return;
+ }
+ Drop(2);
+ AddInstruction(new(zone()) HPushArgument(object));
+ AddInstruction(new(zone()) HPushArgument(value));
+ instr = new(zone()) HCallConstantFunction(setter, 2);
+ } else {
+ Drop(2);
+ CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
+ name,
+ value,
+ map));
+ }
} else if (types != NULL && types->length() > 1) {
- HandlePolymorphicStoreNamedField(expr, object, value, types, name);
- return;
-
+ Drop(2);
+ return HandlePolymorphicStoreNamedField(expr, object, value, types, name);
} else {
+ Drop(2);
instr = BuildStoreNamedGeneric(object, name, value);
}
+ Push(value);
+ instr->set_position(expr->position());
+ AddInstruction(instr);
+ if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ return ast_context()->ReturnValue(Pop());
+
} else {
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
CHECK_ALIVE(VisitForValue(expr->value()));
- value = Pop();
+ HValue* value = Pop();
HValue* key = Pop();
HValue* object = Pop();
bool has_side_effects = false;
@@ -4977,11 +5600,6 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
}
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
}
@@ -4991,7 +5609,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position,
- int ast_id) {
+ BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
@@ -5108,18 +5726,31 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (prop->key()->IsPropertyName()) {
// Named property.
CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* obj = Top();
-
- HInstruction* load = NULL;
- if (prop->IsMonomorphic()) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map = prop->GetReceiverTypes()->first();
- load = BuildLoadNamed(obj, prop, map, name);
+ HValue* object = Top();
+
+ Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+ Handle<Map> map;
+ HInstruction* load;
+ bool monomorphic = prop->IsMonomorphic();
+ if (monomorphic) {
+ map = prop->GetReceiverTypes()->first();
+ // We can't generate code for a monomorphic dict mode load so
+ // just pretend it is not monomorphic.
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> getter;
+ Handle<JSObject> holder;
+ if (LookupGetter(map, name, &getter, &holder)) {
+ load = BuildCallGetter(object, map, getter, holder);
+ } else {
+ load = BuildLoadNamedMonomorphic(object, name, prop, map);
+ }
} else {
- load = BuildLoadNamedGeneric(obj, prop);
+ load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
+ if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -5130,7 +5761,21 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
HInstruction* store;
- CHECK_ALIVE(store = BuildStoreNamed(obj, instr, prop));
+ if (!monomorphic) {
+ // If we don't know the monomorphic type, do a generic store.
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
+ } else {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ store = BuildCallSetter(object, instr, map, setter, holder);
+ } else {
+ CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
+ name,
+ instr,
+ map));
+ }
+ }
AddInstruction(store);
// Drop the simulated receiver and value. Return the value.
Drop(2);
@@ -5147,11 +5792,11 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, expr->CompoundLoadId(), RelocInfo::kNoPosition,
+ obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(expr->CompoundLoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -5331,20 +5976,19 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Property* expr,
- Handle<Map> type,
+ Handle<Map> map,
LookupResult* lookup,
bool smi_and_map_check) {
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, type, zone()));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
- int index = lookup->GetLocalFieldIndexFromMap(*type);
+ int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + type->instance_size();
+ int offset = (index * kPointerSize) + map->instance_size();
return new(zone()) HLoadNamedField(object, true, offset);
} else {
// Non-negative property indices are in the properties array.
@@ -5354,39 +5998,50 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
}
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
+HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
+ Handle<String> name,
Property* expr) {
if (expr->IsUninitialized() && !FLAG_always_opt) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
}
- ASSERT(expr->key()->IsPropertyName());
- Handle<Object> name = expr->key()->AsLiteral()->handle();
HValue* context = environment()->LookupContext();
- return new(zone()) HLoadNamedGeneric(context, obj, name);
+ return new(zone()) HLoadNamedGeneric(context, object, name);
+}
+
+
+HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder) {
+ AddCheckConstantFunction(holder, object, map, true);
+ AddInstruction(new(zone()) HPushArgument(object));
+ return new(zone()) HCallConstantFunction(getter, 1);
}
-HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
- Property* expr,
- Handle<Map> map,
- Handle<String> name) {
+HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map) {
+ // Handle a load from a known field.
+ ASSERT(!map->is_dictionary_map());
LookupResult lookup(isolate());
- map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsFound() && lookup.type() == FIELD) {
- return BuildLoadNamedField(obj,
- expr,
- map,
- &lookup,
- true);
- } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- AddInstruction(HCheckMaps::NewWithTransitions(obj, map, zone()));
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsField()) {
+ return BuildLoadNamedField(object, map, &lookup, true);
+ }
+
+ // Handle a load of a constant known function.
+ if (lookup.IsConstantFunction()) {
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
return new(zone()) HConstant(function, Representation::Tagged());
- } else {
- return BuildLoadNamedGeneric(obj, expr);
}
+
+ // No luck, do a generic load.
+ return BuildLoadNamedGeneric(object, name, expr);
}
@@ -5401,6 +6056,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
@@ -5443,8 +6099,14 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
external_elements, checked_key, val, elements_kind);
} else {
ASSERT(val == NULL);
- return new(zone()) HLoadKeyedSpecializedArrayElement(
- external_elements, checked_key, elements_kind);
+ HLoadKeyedSpecializedArrayElement* load =
+ new(zone()) HLoadKeyedSpecializedArrayElement(
+ external_elements, checked_key, dependency, elements_kind);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
}
}
@@ -5452,6 +6114,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
+ HValue* load_dependency,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
@@ -5480,10 +6143,11 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
OMIT_HOLE_CHECK :
PERFORM_HOLE_CHECK;
if (IsFastDoubleElementsKind(elements_kind)) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key,
+ load_dependency, mode);
} else { // Smi or Object elements.
return new(zone()) HLoadKeyedFastElement(elements, checked_key,
- elements_kind);
+ load_dependency, elements_kind);
}
}
@@ -5494,22 +6158,38 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* dependency,
Handle<Map> map,
bool is_store) {
- HInstruction* mapcheck =
- AddInstruction(new(zone()) HCheckMaps(object, map, zone(), dependency));
+ HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
+ zone(), dependency);
+ AddInstruction(mapcheck);
+ if (dependency) {
+ mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ return BuildUncheckedMonomorphicElementAccess(object, key, val,
+ mapcheck, map, is_store);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ Handle<Map> map,
+ bool is_store) {
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
// ElementsKind transitions. Finally, the dependency can be removed for stores
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
- if (dependency ||
- (map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
+ if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
(map->elements_kind() == FAST_ELEMENTS && is_store)) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
bool fast_smi_only_elements = map->has_fast_smi_elements();
bool fast_elements = map->has_fast_object_elements();
- HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+ HInstruction* elements =
+ AddInstruction(new(zone()) HLoadElements(object, mapcheck));
if (is_store && (fast_elements || fast_smi_only_elements)) {
HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(), zone());
@@ -5520,12 +6200,14 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HInstruction* checked_key = NULL;
if (map->has_external_array_elements()) {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
HLoadExternalArrayPointer* external_elements =
new(zone()) HLoadExternalArrayPointer(elements);
AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(external_elements, checked_key,
- val, map->elements_kind(), is_store);
+ return BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, mapcheck,
+ map->elements_kind(), is_store);
}
ASSERT(fast_smi_only_elements ||
fast_elements ||
@@ -5536,17 +6218,71 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- return BuildFastElementAccess(elements, checked_key, val,
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
+ return BuildFastElementAccess(elements, checked_key, val, mapcheck,
map->elements_kind(), is_store);
}
+HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ SmallMapList* maps) {
+ // For polymorphic loads of similar elements kinds (i.e. all tagged or all
+ // double), always use the "worst case" code without a transition. This is
+ // much faster than transitioning the elements to the worst case, trading a
+ // HTransitionElements for a HCheckMaps, and avoiding mutation of the array.
+ bool has_double_maps = false;
+ bool has_smi_or_object_maps = false;
+ bool has_js_array_access = false;
+ bool has_non_js_array_access = false;
+ Handle<Map> most_general_consolidated_map;
+ for (int i = 0; i < maps->length(); ++i) {
+ Handle<Map> map = maps->at(i);
+ // Don't allow mixing of JSArrays with JSObjects.
+ if (map->instance_type() == JS_ARRAY_TYPE) {
+ if (has_non_js_array_access) return NULL;
+ has_js_array_access = true;
+ } else if (has_js_array_access) {
+ return NULL;
+ } else {
+ has_non_js_array_access = true;
+ }
+ // Don't allow mixed, incompatible elements kinds.
+ if (map->has_fast_double_elements()) {
+ if (has_smi_or_object_maps) return NULL;
+ has_double_maps = true;
+ } else if (map->has_fast_smi_or_object_elements()) {
+ if (has_double_maps) return NULL;
+ has_smi_or_object_maps = true;
+ } else {
+ return NULL;
+ }
+ // Remember the most general elements kind, the code for its load will
+ // properly handle all of the more specific cases.
+ if ((i == 0) || IsMoreGeneralElementsKindTransition(
+ most_general_consolidated_map->elements_kind(),
+ map->elements_kind())) {
+ most_general_consolidated_map = map;
+ }
+ }
+ if (!has_double_maps && !has_smi_or_object_maps) return NULL;
+
+ HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
+ AddInstruction(check_maps);
+ HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
+ object, key, val, check_maps, most_general_consolidated_map, false);
+ return instr;
+}
+
+
HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Expression* prop,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects) {
@@ -5555,6 +6291,19 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
SmallMapList* maps = prop->GetReceiverTypes();
bool todo_external_array = false;
+ if (!is_store) {
+ HInstruction* consolidated_load =
+ TryBuildConsolidatedElementLoad(object, key, val, maps);
+ if (consolidated_load != NULL) {
+ AddInstruction(consolidated_load);
+ *has_side_effects |= consolidated_load->HasObservableSideEffects();
+ if (position != RelocInfo::kNoPosition) {
+ consolidated_load->set_position(position);
+ }
+ return consolidated_load;
+ }
+ }
+
static const int kNumElementTypes = kElementsKindCount;
bool type_todo[kNumElementTypes];
for (int i = 0; i < kNumElementTypes; ++i) {
@@ -5616,17 +6365,19 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
object, key, val, transition, untransitionable_map, is_store));
}
*has_side_effects |= instr->HasObservableSideEffects();
- instr->set_position(position);
+ if (position != RelocInfo::kNoPosition) instr->set_position(position);
return is_store ? NULL : instr;
}
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
+ HInstruction* checkspec =
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
HCompareConstantEqAndBranch* elements_kind_branch = NULL;
- HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+ HInstruction* elements =
+ AddInstruction(new(zone()) HLoadElements(object, checkspec));
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
@@ -5689,9 +6440,11 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HInstruction* length;
length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
HType::Smi()));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
if (!is_store) {
Push(access);
}
@@ -5704,9 +6457,11 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -5715,10 +6470,11 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
}
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind, is_store));
+ external_elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
}
*has_side_effects |= access->HasObservableSideEffects();
- access->set_position(position);
+ if (position != RelocInfo::kNoPosition) access->set_position(position);
if (!is_store) {
Push(access);
}
@@ -5739,7 +6495,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
HValue* key,
HValue* val,
Expression* expr,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects) {
@@ -5765,7 +6521,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
instr = BuildLoadKeyedGeneric(obj, key);
}
}
- instr->set_position(position);
+ if (position != RelocInfo::kNoPosition) instr->set_position(position);
AddInstruction(instr);
*has_side_effects = instr->HasObservableSideEffects();
return instr;
@@ -5912,15 +6668,27 @@ void HGraphBuilder::VisitProperty(Property* expr) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
SmallMapList* types = expr->GetReceiverTypes();
- HValue* obj = Pop();
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> map;
if (expr->IsMonomorphic()) {
- instr = BuildLoadNamed(obj, expr, types->first(), name);
+ map = types->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> getter;
+ Handle<JSObject> holder;
+ if (LookupGetter(map, name, &getter, &holder)) {
+ AddCheckConstantFunction(holder, Top(), map, true);
+ if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
+ AddInstruction(new(zone()) HPushArgument(Pop()));
+ instr = new(zone()) HCallConstantFunction(getter, 1);
+ } else {
+ instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
+ }
} else if (types != NULL && types->length() > 1) {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- HandlePolymorphicLoadNamedField(expr, obj, types, name);
- return;
+ return HandlePolymorphicLoadNamedField(expr, Pop(), types, name);
} else {
- instr = BuildLoadNamedGeneric(obj, expr);
+ instr = BuildLoadNamedGeneric(Pop(), name, expr);
}
} else {
@@ -5950,7 +6718,7 @@ void HGraphBuilder::VisitProperty(Property* expr) {
}
-void HGraphBuilder::AddCheckConstantFunction(Call* expr,
+void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
Handle<Map> receiver_map,
bool smi_and_map_check) {
@@ -5962,10 +6730,9 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map,
zone()));
}
- if (!expr->holder().is_null()) {
+ if (!holder.is_null()) {
AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
- expr->holder()));
+ Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
}
}
@@ -6048,7 +6815,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
set_current_block(if_true);
expr->ComputeTarget(map, name);
- AddCheckConstantFunction(expr, receiver, map, false);
+ AddCheckConstantFunction(expr->holder(), receiver, map, false);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
Handle<JSFunction> caller = info()->closure();
SmartArrayPointer<char> caller_name =
@@ -6162,11 +6929,11 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
- HValue* receiver,
- int ast_id,
- int return_id,
- ReturnHandlingFlag return_handling) {
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -6223,13 +6990,13 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
}
// Parse and allocate variables.
- CompilationInfo target_info(target);
+ CompilationInfo target_info(target, zone());
if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
!Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
- target_shared->DisableOptimization();
+ target_shared->DisableOptimization("parse/scope error");
}
TraceInline(target, caller, "parse failure");
return false;
@@ -6311,24 +7078,25 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Save the pending call context and type feedback oracle. Set up new ones
// for the inlined function.
ASSERT(target_shared->has_deoptimization_support());
+ Handle<Code> unoptimized_code(target_shared->code());
TypeFeedbackOracle target_oracle(
- Handle<Code>(target_shared->code()),
- Handle<Context>(target->context()->global_context()),
+ unoptimized_code,
+ Handle<Context>(target->context()->native_context()),
isolate(),
zone());
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, &target_oracle, return_handling);
+ this, &target_info, &target_oracle, inlining_kind);
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
- arguments->length(),
+ arguments_count,
function,
undefined,
call_kind,
- function_state()->is_construct());
+ function_state()->inlining_kind());
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
@@ -6360,10 +7128,10 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
HEnterInlined* enter_inlined =
new(zone()) HEnterInlined(target,
- arguments->length(),
+ arguments_count,
function,
call_kind,
- function_state()->is_construct(),
+ function_state()->inlining_kind(),
function->scope()->arguments(),
arguments_values);
function_state()->set_entry(enter_inlined);
@@ -6383,7 +7151,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Bail out if the inline function did, as we cannot residualize a call
// instead.
TraceInline(target, caller, "inline graph construction failed");
- target_shared->DisableOptimization();
+ target_shared->DisableOptimization("inlining bailed out");
inline_bailout_ = true;
delete target_state;
return true;
@@ -6392,30 +7160,51 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Update inlined nodes count.
inlined_count_ += nodes_added;
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
+ Handle<TypeFeedbackInfo> type_info(
+ Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ graph()->update_type_change_checksum(type_info->own_type_change_checksum());
+
TraceInline(target, caller, NULL);
if (current_block() != NULL) {
- // Add default return value (i.e. undefined for normals calls or the newly
- // allocated receiver for construct calls) if control can fall off the
- // body. In a test context, undefined is false and any JSObject is true.
- if (call_context()->IsValue()) {
- ASSERT(function_return() != NULL);
- HValue* return_value = function_state()->is_construct()
- ? receiver
- : undefined;
- current_block()->AddLeaveInlined(return_value,
- function_return(),
- function_state());
- } else if (call_context()->IsEffect()) {
- ASSERT(function_return() != NULL);
- current_block()->Goto(function_return(), function_state());
+ FunctionState* state = function_state();
+ if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
+ // Falling off the end of an inlined construct call. In a test context the
+ // return value will always evaluate to true, in a value context the
+ // return value is the newly allocated receiver.
+ if (call_context()->IsTest()) {
+ current_block()->Goto(inlined_test_context()->if_true(), state);
+ } else if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(call_context()->IsValue());
+ current_block()->AddLeaveInlined(implicit_return_value, state);
+ }
+ } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
+ // Falling off the end of an inlined setter call. The returned value is
+ // never used, the value of an assignment is always the value of the RHS
+ // of the assignment.
+ if (call_context()->IsTest()) {
+ inlined_test_context()->ReturnValue(implicit_return_value);
+ } else if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(call_context()->IsValue());
+ current_block()->AddLeaveInlined(implicit_return_value, state);
+ }
} else {
- ASSERT(call_context()->IsTest());
- ASSERT(inlined_test_context() != NULL);
- HBasicBlock* target = function_state()->is_construct()
- ? inlined_test_context()->if_true()
- : inlined_test_context()->if_false();
- current_block()->Goto(target, function_state());
+ // Falling off the end of a normal inlined function. This basically means
+ // returning undefined.
+ if (call_context()->IsTest()) {
+ current_block()->Goto(inlined_test_context()->if_false(), state);
+ } else if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(call_context()->IsValue());
+ current_block()->AddLeaveInlined(undefined, state);
+ }
}
}
@@ -6463,7 +7252,7 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
return TryInline(call_kind,
expr->target(),
- expr->arguments(),
+ expr->arguments()->length(),
NULL,
expr->id(),
expr->ReturnId(),
@@ -6471,17 +7260,43 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
}
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr, HValue* receiver) {
+bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
- expr->arguments(),
- receiver,
+ expr->arguments()->length(),
+ implicit_return_value,
expr->id(),
expr->ReturnId(),
CONSTRUCT_CALL_RETURN);
}
+bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Property* prop) {
+ return TryInline(CALL_AS_METHOD,
+ getter,
+ 0,
+ NULL,
+ prop->id(),
+ prop->LoadId(),
+ GETTER_CALL_RETURN);
+}
+
+
+bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value) {
+ return TryInline(CALL_AS_METHOD,
+ setter,
+ 1,
+ implicit_return_value,
+ assignment->id(),
+ assignment->AssignmentId(),
+ SETTER_CALL_RETURN);
+}
+
+
bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
@@ -6555,7 +7370,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathCos:
case kMathTan:
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
HValue* argument = Pop();
HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
@@ -6568,7 +7383,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathPow:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
@@ -6610,7 +7425,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathRandom:
if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
Drop(1); // Receiver.
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -6623,82 +7438,15 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathMax:
case kMathMin:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
- Pop(); // Pop receiver.
-
- HValue* left_operand = left;
- HValue* right_operand = right;
-
- // If we do not have two integers, we convert to double for comparison.
- if (!left->representation().IsInteger32() ||
- !right->representation().IsInteger32()) {
- if (!left->representation().IsDouble()) {
- HChange* left_convert = new(zone()) HChange(
- left,
- Representation::Double(),
- false, // Do not truncate when converting to double.
- true); // Deoptimize for undefined.
- left_convert->SetFlag(HValue::kBailoutOnMinusZero);
- left_operand = AddInstruction(left_convert);
- }
- if (!right->representation().IsDouble()) {
- HChange* right_convert = new(zone()) HChange(
- right,
- Representation::Double(),
- false, // Do not truncate when converting to double.
- true); // Deoptimize for undefined.
- right_convert->SetFlag(HValue::kBailoutOnMinusZero);
- right_operand = AddInstruction(right_convert);
- }
- }
-
- ASSERT(left_operand->representation().Equals(
- right_operand->representation()));
- ASSERT(!left_operand->representation().IsTagged());
-
- Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
-
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(left_operand, right_operand, op);
- compare->SetInputRepresentation(left_operand->representation());
-
- HBasicBlock* return_left = graph()->CreateBasicBlock();
- HBasicBlock* return_right = graph()->CreateBasicBlock();
-
- compare->SetSuccessorAt(0, return_left);
- compare->SetSuccessorAt(1, return_right);
- current_block()->Finish(compare);
-
- set_current_block(return_left);
- Push(left);
- set_current_block(return_right);
- // The branch above always returns the right operand if either of
- // them is NaN, but the spec requires that max/min(NaN, X) = NaN.
- // We add another branch that checks if the left operand is NaN or not.
- if (left_operand->representation().IsDouble()) {
- // If left_operand != left_operand then it is NaN.
- HCompareIDAndBranch* compare_nan = new(zone()) HCompareIDAndBranch(
- left_operand, left_operand, Token::EQ);
- compare_nan->SetInputRepresentation(left_operand->representation());
- HBasicBlock* left_is_number = graph()->CreateBasicBlock();
- HBasicBlock* left_is_nan = graph()->CreateBasicBlock();
- compare_nan->SetSuccessorAt(0, left_is_number);
- compare_nan->SetSuccessorAt(1, left_is_nan);
- current_block()->Finish(compare_nan);
- set_current_block(left_is_nan);
- Push(left);
- set_current_block(left_is_number);
- Push(right);
- return_right = CreateJoin(left_is_number, left_is_nan, expr->id());
- } else {
- Push(right);
- }
-
- HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
- set_current_block(join);
- ast_context()->ReturnValue(Pop());
+ Drop(1); // Receiver.
+ HValue* context = environment()->LookupContext();
+ HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
+ : HMathMinMax::kMathMax;
+ HMathMinMax* result = new(zone()) HMathMinMax(context, left, right, op);
+ ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
@@ -6739,7 +7487,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
VisitForValue(prop->obj());
if (HasStackOverflow() || current_block() == NULL) return true;
HValue* function = Top();
- AddCheckConstantFunction(expr, function, function_map, true);
+ AddCheckConstantFunction(expr->holder(), function, function_map, true);
Drop(1);
VisitForValue(args->at(0));
@@ -6858,7 +7606,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
call = PreProcessCall(
new(zone()) HCallNamed(context, name, argument_count));
} else {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
@@ -7059,8 +7807,8 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
- HValue* constructor = NULL;
- CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+ CHECK_ALIVE(VisitArgument(expr->expression()));
+ HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HInstruction* call =
new(zone()) HCallNew(context, constructor, argument_count);
@@ -7118,7 +7866,6 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
int argument_count = expr->arguments()->length();
HCallRuntime* call =
new(zone()) HCallRuntime(context, name, function, argument_count);
- call->set_position(RelocInfo::kNoPosition);
Drop(argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -7373,8 +8120,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot::Mode mode =
- (var->mode() == LET || var->mode() == CONST_HARMONY)
+ HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
@@ -7399,24 +8145,49 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (returns_original_input) Push(graph_->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* obj = Top();
-
- HInstruction* load = NULL;
- if (prop->IsMonomorphic()) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map = prop->GetReceiverTypes()->first();
- load = BuildLoadNamed(obj, prop, map, name);
+ HValue* object = Top();
+
+ Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+ Handle<Map> map;
+ HInstruction* load;
+ bool monomorphic = prop->IsMonomorphic();
+ if (monomorphic) {
+ map = prop->GetReceiverTypes()->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> getter;
+ Handle<JSObject> holder;
+ if (LookupGetter(map, name, &getter, &holder)) {
+ load = BuildCallGetter(object, map, getter, holder);
+ } else {
+ load = BuildLoadNamedMonomorphic(object, name, prop, map);
+ }
} else {
- load = BuildLoadNamedGeneric(obj, prop);
+ load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
+ if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
HInstruction* store;
- CHECK_ALIVE(store = BuildStoreNamed(obj, after, prop));
+ if (!monomorphic) {
+ // If we don't know the monomorphic type, do a generic store.
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
+ } else {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ store = BuildCallSetter(object, after, map, setter, holder);
+ } else {
+ CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
+ name,
+ after,
+ map));
+ }
+ }
AddInstruction(store);
// Overwrite the receiver in the bailout environment with the result
@@ -7437,11 +8208,11 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, expr->CountId(), RelocInfo::kNoPosition,
+ obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(expr->CountId());
+ if (has_side_effects) AddSimulate(prop->LoadId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -7526,6 +8297,18 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
break;
case Token::SHR:
instr = HShr::NewHShr(zone(), context, left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr()) {
+ bool can_be_shift_by_zero = true;
+ if (right->IsConstant()) {
+ HConstant* right_const = HConstant::cast(right);
+ if (right_const->HasInteger32Value() &&
+ (right_const->Integer32Value() & 0x1f) != 0) {
+ can_be_shift_by_zero = false;
+ }
+ }
+
+ if (can_be_shift_by_zero) graph()->RecordUint32Instruction(instr);
+ }
break;
case Token::SHL:
instr = HShl::NewHShl(zone(), context, left, right);
@@ -7538,8 +8321,8 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
// for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation.
if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->HasStringValue()) ||
- (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
+ ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
+ (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
return instr;
}
Representation rep = ToRepresentation(info);
@@ -7624,7 +8407,7 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- unsigned test_id = expr->left()->test_id();
+ TypeFeedbackId test_id = expr->left()->test_id();
ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
HBranch* test = is_logical_and
? new(zone()) HBranch(Top(), eval_right, empty_block, expected)
@@ -7757,7 +8540,7 @@ static bool MatchLiteralCompareTypeof(HValue* left,
if (left->IsTypeof() &&
Token::IsEqualityOp(op) &&
right->IsConstant() &&
- HConstant::cast(right)->HasStringValue()) {
+ HConstant::cast(right)->handle()->IsString()) {
*typeof_expr = HTypeof::cast(left);
*check = Handle<String>::cast(HConstant::cast(right)->handle());
return true;
@@ -7863,9 +8646,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<GlobalObject> global(info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
- if (lookup.IsFound() &&
- lookup.type() == NORMAL &&
- lookup.GetValue()->IsJSFunction()) {
+ if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
@@ -7963,13 +8744,25 @@ void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
}
+HInstruction* HGraphBuilder::BuildThisFunction() {
+ // If we share optimized code between different closures, the
+ // this-function is not a constant, except inside an inlined body.
+ if (function_state()->outer() != NULL) {
+ return new(zone()) HConstant(
+ function_state()->compilation_info()->closure(),
+ Representation::Tagged());
+ } else {
+ return new(zone()) HThisFunction;
+ }
+}
+
+
void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- HThisFunction* self = new(zone()) HThisFunction(
- function_state()->compilation_info()->closure());
- return ast_context()->ReturnInstruction(self, expr->id());
+ HInstruction* instr = BuildThisFunction();
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -8196,7 +8989,7 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
- HValue* value = function_state()->is_construct()
+ HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
? graph()->GetConstantTrue()
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
@@ -8600,7 +9393,7 @@ HEnvironment::HEnvironment(HEnvironment* outer,
outer_(outer),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber),
+ ast_id_(BailoutId::None()),
zone_(zone) {
Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
}
@@ -8636,7 +9429,7 @@ HEnvironment::HEnvironment(HEnvironment* outer,
outer_(outer),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber),
+ ast_id_(BailoutId::None()),
zone_(zone) {
}
@@ -8786,11 +9579,9 @@ HEnvironment* HEnvironment::CopyForInlining(
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind,
- bool is_construct) const {
+ InliningKind inlining_kind) const {
ASSERT(frame_type() == JS_FUNCTION);
- Zone* zone = closure()->GetIsolate()->zone();
-
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
@@ -8798,11 +9589,19 @@ HEnvironment* HEnvironment::CopyForInlining(
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
- if (is_construct) {
+ if (inlining_kind == CONSTRUCT_CALL_RETURN) {
// Create artificial constructor stub environment. The receiver should
// actually be the constructor function, but we pass the newly allocated
// object instead, DoComputeConstructStubFrame() relies on that.
outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
+ } else if (inlining_kind == GETTER_CALL_RETURN) {
+ // We need an additional StackFrame::INTERNAL frame for restoring the
+ // correct context.
+ outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments);
+ } else if (inlining_kind == SETTER_CALL_RETURN) {
+ // We need an additional StackFrame::INTERNAL frame for temporarily saving
+ // the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter.
+ outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments);
}
if (arity != arguments) {
@@ -8811,7 +9610,7 @@ HEnvironment* HEnvironment::CopyForInlining(
}
HEnvironment* inner =
- new(zone) HEnvironment(outer, function->scope(), target, zone);
+ new(zone()) HEnvironment(outer, function->scope(), target, zone());
// Get the argument values from the original environment.
for (int i = 0; i <= arity; ++i) { // Include receiver.
HValue* push = (i <= arguments) ?
@@ -8822,7 +9621,7 @@ HEnvironment* HEnvironment::CopyForInlining(
// builtin function, pass undefined as the receiver for function
// calls (instead of the global receiver).
if ((target->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && !is_construct) {
+ call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN) {
inner->SetValueAt(0, undefined);
}
inner->SetValueAt(arity + 1, LookupContext());
@@ -8830,7 +9629,7 @@ HEnvironment* HEnvironment::CopyForInlining(
inner->SetValueAt(i, undefined);
}
- inner->set_ast_id(AstNode::kFunctionEntryId);
+ inner->set_ast_id(BailoutId::FunctionEntry());
return inner;
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 6fa3d1b9ff..7d23ac7306 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -118,14 +118,14 @@ class HBasicBlock: public ZoneObject {
bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
- void SetJoinId(int ast_id);
+ void SetJoinId(BailoutId ast_id);
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
+ void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
@@ -135,9 +135,7 @@ class HBasicBlock: public ZoneObject {
// Add the inlined function exit sequence, adding an HLeaveInlined
// instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value,
- HBasicBlock* target,
- FunctionState* state = NULL);
+ void AddLeaveInlined(HValue* return_value, FunctionState* state);
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
@@ -168,7 +166,7 @@ class HBasicBlock: public ZoneObject {
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(int ast_id);
+ HSimulate* CreateSimulate(BailoutId ast_id);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@@ -244,10 +242,11 @@ class HLoopInformation: public ZoneObject {
class BoundsCheckTable;
class HGraph: public ZoneObject {
public:
- HGraph(CompilationInfo* info, Zone* zone);
+ explicit HGraph(CompilationInfo* info);
Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
+ CompilationInfo* info() const { return info_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -259,6 +258,7 @@ class HGraph: public ZoneObject {
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
+ void ComputeSafeUint32Operations();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
void EliminateUnreachablePhis();
@@ -280,8 +280,6 @@ class HGraph: public ZoneObject {
void CollectPhis();
- Handle<Code> Compile(CompilationInfo* info, Zone* zone);
-
void set_undefined_constant(HConstant* constant) {
undefined_constant_.set(constant);
}
@@ -312,6 +310,8 @@ class HGraph: public ZoneObject {
return NULL;
}
+ bool Optimize(SmartArrayPointer<char>* bailout_reason);
+
#ifdef DEBUG
void Verify(bool do_full_verify) const;
#endif
@@ -336,6 +336,19 @@ class HGraph: public ZoneObject {
osr_values_.set(values);
}
+ int update_type_change_checksum(int delta) {
+ type_change_checksum_ += delta;
+ return type_change_checksum_;
+ }
+
+ bool use_optimistic_licm() {
+ return use_optimistic_licm_;
+ }
+
+ void set_use_optimistic_licm(bool value) {
+ use_optimistic_licm_ = value;
+ }
+
void MarkRecursive() {
is_recursive_ = true;
}
@@ -344,17 +357,18 @@ class HGraph: public ZoneObject {
return is_recursive_;
}
+ void RecordUint32Instruction(HInstruction* instr) {
+ if (uint32_instructions_ == NULL) {
+ uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
+ }
+ uint32_instructions_->Add(instr, zone());
+ }
+
private:
- void Postorder(HBasicBlock* block,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header);
- void PostorderLoopBlocks(HLoopInformation* loop,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
- Object* value);
+ Handle<Object> value);
+ HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
+ int32_t integer_value);
void MarkAsDeoptimizingRecursively(HBasicBlock* block);
void InsertTypeConversions(HInstruction* instr);
@@ -377,6 +391,7 @@ class HGraph: public ZoneObject {
ZoneList<HBasicBlock*> blocks_;
ZoneList<HValue*> values_;
ZoneList<HPhi*>* phi_list_;
+ ZoneList<HInstruction*>* uint32_instructions_;
SetOncePointer<HConstant> undefined_constant_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_minus1_;
@@ -388,9 +403,12 @@ class HGraph: public ZoneObject {
SetOncePointer<HBasicBlock> osr_loop_entry_;
SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+ CompilationInfo* info_;
Zone* zone_;
bool is_recursive_;
+ bool use_optimistic_licm_;
+ int type_change_checksum_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -400,7 +418,13 @@ Zone* HBasicBlock::zone() const { return graph_->zone(); }
// Type of stack frame an environment might refer to.
-enum FrameType { JS_FUNCTION, JS_CONSTRUCT, ARGUMENTS_ADAPTOR };
+enum FrameType {
+ JS_FUNCTION,
+ JS_CONSTRUCT,
+ JS_GETTER,
+ JS_SETTER,
+ ARGUMENTS_ADAPTOR
+};
class HEnvironment: public ZoneObject {
@@ -435,8 +459,8 @@ class HEnvironment: public ZoneObject {
int pop_count() const { return pop_count_; }
int push_count() const { return push_count_; }
- int ast_id() const { return ast_id_; }
- void set_ast_id(int id) { ast_id_ = id; }
+ BailoutId ast_id() const { return ast_id_; }
+ void set_ast_id(BailoutId id) { ast_id_ = id; }
int length() const { return values_.length(); }
bool is_special_index(int i) const {
@@ -514,7 +538,7 @@ class HEnvironment: public ZoneObject {
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind,
- bool is_construct) const;
+ InliningKind inlining_kind) const;
void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
@@ -578,7 +602,7 @@ class HEnvironment: public ZoneObject {
HEnvironment* outer_;
int pop_count_;
int push_count_;
- int ast_id_;
+ BailoutId ast_id_;
Zone* zone_;
};
@@ -607,13 +631,13 @@ class AstContext {
// Add a hydrogen instruction to the instruction stream (recording an
// environment simulation if necessary) and then fill this context with
// the instruction as value.
- virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id) = 0;
// Finishes the current basic block and materialize a boolean for
// value context, nothing for effect, generate a branch for test context.
// Call this function in tail position in the Visit functions for
// expressions.
- virtual void ReturnControl(HControlInstruction* instr, int ast_id) = 0;
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id) = 0;
void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
bool is_for_typeof() { return for_typeof_; }
@@ -648,8 +672,8 @@ class EffectContext: public AstContext {
virtual ~EffectContext();
virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
- virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
};
@@ -661,8 +685,8 @@ class ValueContext: public AstContext {
virtual ~ValueContext();
virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
- virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -675,17 +699,19 @@ class TestContext: public AstContext {
public:
TestContext(HGraphBuilder* owner,
Expression* condition,
+ TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
condition_(condition),
+ oracle_(oracle),
if_true_(if_true),
if_false_(if_false) {
}
virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
- virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@@ -693,6 +719,7 @@ class TestContext: public AstContext {
}
Expression* condition() const { return condition_; }
+ TypeFeedbackOracle* oracle() const { return oracle_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@@ -702,31 +729,24 @@ class TestContext: public AstContext {
void BuildBranch(HValue* value);
Expression* condition_;
+ TypeFeedbackOracle* oracle_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
-enum ReturnHandlingFlag {
- NORMAL_RETURN,
- DROP_EXTRA_ON_RETURN,
- CONSTRUCT_CALL_RETURN
-};
-
-
class FunctionState {
public:
FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
- ReturnHandlingFlag return_handling);
+ InliningKind inlining_kind);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
- bool drop_extra() { return return_handling_ == DROP_EXTRA_ON_RETURN; }
- bool is_construct() { return return_handling_ == CONSTRUCT_CALL_RETURN; }
+ InliningKind inlining_kind() const { return inlining_kind_; }
HBasicBlock* function_return() { return function_return_; }
TestContext* test_context() { return test_context_; }
void ClearInlinedTestContext() {
@@ -756,11 +776,8 @@ class FunctionState {
// inlined. NULL when not inlining.
AstContext* call_context_;
- // Indicate whether we have to perform special handling on return from
- // inlined functions.
- // - DROP_EXTRA_ON_RETURN: Drop an extra value from the environment.
- // - CONSTRUCT_CALL_RETURN: Either use allocated receiver or return value.
- ReturnHandlingFlag return_handling_;
+ // The kind of call which is currently being inlined.
+ InliningKind inlining_kind_;
// When inlining in an effect or value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
@@ -838,7 +855,7 @@ class HGraphBuilder: public AstVisitor {
BreakAndContinueScope* next_;
};
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle, Zone* zone);
+ HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
HGraph* CreateGraph();
@@ -857,7 +874,7 @@ class HGraphBuilder: public AstVisitor {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(int ast_id);
+ void AddSimulate(BailoutId ast_id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@@ -867,7 +884,7 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
- int join_id);
+ BailoutId join_id);
TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
@@ -875,6 +892,12 @@ class HGraphBuilder: public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void* operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+ }
+ void operator delete(void* pointer, Zone* zone) { }
+ void operator delete(void* pointer) { }
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -980,9 +1003,8 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument subexpression and emit a push to the outgoing
- // arguments. Returns the hydrogen value that was pushed.
- HValue* VisitArgument(Expression* expr);
+ // Visit an argument subexpression and emit a push to the outgoing arguments.
+ void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
@@ -1031,14 +1053,18 @@ class HGraphBuilder: public AstVisitor {
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
- HValue* receiver,
- int ast_id,
- int return_id,
- ReturnHandlingFlag return_handling);
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind);
bool TryInlineCall(Call* expr, bool drop_extra = false);
- bool TryInlineConstruct(CallNew* expr, HValue* receiver);
+ bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
+ bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
+ bool TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@@ -1055,7 +1081,7 @@ class HGraphBuilder: public AstVisitor {
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position,
- int ast_id);
+ BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
@@ -1087,37 +1113,37 @@ class HGraphBuilder: public AstVisitor {
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Property* expr,
- Handle<Map> type,
- LookupResult* result,
- bool smi_and_map_check);
- HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- ElementsKind elements_kind,
- bool is_store);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store);
+ HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
+ HValue* key,
+ HValue* val,
+ SmallMapList* maps);
+
+ HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ Handle<Map> map,
+ bool is_store);
+
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store);
+
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Expression* prop,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects);
@@ -1126,37 +1152,62 @@ class HGraphBuilder: public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects);
- HInstruction* BuildLoadNamed(HValue* object,
- Property* prop,
- Handle<Map> map,
- Handle<String> name);
- HInstruction* BuildStoreNamed(HValue* object,
- HValue* value,
- Expression* expr);
- HInstruction* BuildStoreNamed(HValue* object,
- HValue* value,
- ObjectLiteral::Property* prop);
+ HLoadNamedField* BuildLoadNamedField(HValue* object,
+ Handle<Map> map,
+ LookupResult* result,
+ bool smi_and_map_check);
+ HInstruction* BuildLoadNamedGeneric(HValue* object,
+ Handle<String> name,
+ Property* expr);
+ HInstruction* BuildCallGetter(HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder);
+ HInstruction* BuildLoadNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
- Handle<Map> type,
+ Handle<Map> map,
LookupResult* lookup,
bool smi_and_map_check);
HInstruction* BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value);
+ HInstruction* BuildCallSetter(HValue* object,
+ HValue* value,
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder);
+ HInstruction* BuildStoreNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map);
HInstruction* BuildStoreKeyedGeneric(HValue* object,
HValue* key,
HValue* value);
HValue* BuildContextChainWalk(Variable* var);
- void AddCheckConstantFunction(Call* expr,
+ HInstruction* BuildThisFunction();
+
+ void AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
Handle<Map> receiver_map,
bool smi_and_map_check);
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 3cf0d005e9..0b47748d6b 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -150,10 +150,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
@@ -338,9 +335,9 @@ void Assembler::emit(Handle<Object> handle) {
}
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, unsigned id) {
- if (rmode == RelocInfo::CODE_TARGET && id != kNoASTId) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, static_cast<intptr_t>(id));
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
+ if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
} else if (rmode != RelocInfo::NONE) {
RecordRelocInfo(rmode);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index a42f6324e3..ea68c5090f 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1373,7 +1373,7 @@ void Assembler::bind_to(Label* L, int pos) {
ASSERT(offset_to_next <= 0);
// Relative address, relative to point after address.
int disp = pos - fixup_pos - sizeof(int8_t);
- ASSERT(0 <= disp && disp <= 127);
+ CHECK(0 <= disp && disp <= 127);
set_byte_at(fixup_pos, disp);
if (offset_to_next < 0) {
L->link_to(fixup_pos + offset_to_next, Label::kNear);
@@ -1440,7 +1440,7 @@ int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
ASSERT(RelocInfo::IsCodeTarget(rmode));
@@ -1938,6 +1938,16 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
}
+void Assembler::cvtsd2si(Register dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x2D);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2044,6 +2054,15 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4ead80b0ec..f95e7b797c 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -587,6 +587,11 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -883,8 +888,8 @@ class Assembler : public AssemblerBase {
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId);
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
// Jumps
// unconditional jump to L
@@ -978,6 +983,7 @@ class Assembler : public AssemblerBase {
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvtsd2si(Register dst, XMMRegister src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
@@ -993,6 +999,7 @@ class Assembler : public AssemblerBase {
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
+ void orpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
@@ -1111,6 +1118,7 @@ class Assembler : public AssemblerBase {
protected:
bool emit_debug_code() const { return emit_debug_code_; }
+ bool predictable_code_size() const { return predictable_code_size_ ; }
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
@@ -1136,7 +1144,7 @@ class Assembler : public AssemblerBase {
inline void emit(Handle<Object> handle);
inline void emit(uint32_t x,
RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x);
@@ -1186,6 +1194,7 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
+ bool predictable_code_size_;
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index be46ff216f..9bc15e9098 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -74,6 +74,43 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -641,9 +678,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, FieldOperand(ebx, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
@@ -819,9 +856,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, FieldOperand(ebx, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index df04b289b4..140db8a718 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -66,9 +66,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in esi.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
@@ -76,12 +80,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
@@ -94,11 +98,20 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ test(ebx, ebx);
+ __ j(not_zero, &check_optimized, Label::kNear);
+ }
+ __ bind(&install_unoptimized);
+ __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
+ Immediate(factory->undefined_value()));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
@@ -106,6 +119,68 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+ // ecx holds native context, ebx points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // Map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into edx.
+ __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
+ __ j(equal, &install_optimized);
+
+ // Iterate through the rest of map backwards. edx holds an index as a Smi.
+ Label loop;
+ Label restore;
+ __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ // Do not double check first entry.
+ __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ j(equal, &restore);
+ __ sub(edx, Immediate(Smi::FromInt(
+ SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
+ __ j(not_equal, &loop, Label::kNear);
+ // Hit: fetch the optimized code.
+ __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+ // TODO(fschneider): Idea: store proper code pointers in the optimized code
+ // map and either unmangle them on marking or do nothing as the whole map is
+ // discarded on major GC anyway.
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+
+ // Now link a function into a list of optimized functions.
+ __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
+ // No need for write barrier as JSFunction (eax) is in the new space.
+
+ __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
+ // Store JSFunction (eax) into edx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ mov(edx, eax);
+ __ RecordWriteContextSlot(
+ ecx,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ edx,
+ ebx,
+ kDontSaveFPRegs);
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ __ bind(&restore);
+ // Restore SharedFunctionInfo into edx.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&install_unoptimized);
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(ecx); // Temporarily remove return address.
@@ -142,8 +217,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
// Copy the global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
__ mov(ebx, factory->undefined_value());
@@ -186,9 +261,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
@@ -198,7 +273,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(equal, message);
}
__ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
__ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
@@ -208,8 +283,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
// Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
+ __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
+ __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
// Initialize the rest of the slots to the hole value.
if (slots_ == 1) {
@@ -3359,10 +3434,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[0] = mapped parameter count (tagged)
// esp[8] = parameter count (tagged)
// esp[12] = address of receiver argument
- // Get the arguments boilerplate from the current (global) context into edi.
+ // Get the arguments boilerplate from the current native context into edi.
Label has_mapped_parameters, copy;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -3552,9 +3627,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
- // Get the arguments boilerplate from the current (global) context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(edi, offset));
@@ -3673,7 +3748,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(edx, Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough.
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize);
__ j(above, &runtime);
// ecx: RegExp data (FixedArray)
@@ -4067,11 +4142,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
Factory* factory = masm->isolate()->factory();
__ mov(ecx, Immediate(factory->empty_fixed_array()));
__ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
@@ -7073,6 +7148,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
+ // FastNewClosureStub
+ { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7397,6 +7474,38 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Ecx is the only volatile register we must save.
+ __ push(ecx);
+
+ // Calculate and push the original stack pointer.
+ __ lea(eax, Operand(esp, kPointerSize));
+ __ push(eax);
+
+ // Calculate and push the function address.
+ __ mov(eax, Operand(eax, 0));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ __ push(eax);
+
+ // Call the entry hook.
+ int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
+ __ call(Operand(hook_location, RelocInfo::NONE));
+ __ add(esp, Immediate(2 * kPointerSize));
+
+ // Restore ecx.
+ __ pop(ecx);
+ __ ret(0);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 326207fbc6..f50010b9d9 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -117,6 +117,10 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
AssertNoAllocation no_allocation;
@@ -194,8 +198,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ // Iterate over all the functions which share the same code object
+ // and make them use unoptimized version.
+ Context* context = function->context()->native_context();
+ Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ SharedFunctionInfo* shared = function->shared();
+ while (!element->IsUndefined()) {
+ JSFunction* func = JSFunction::cast(element);
+ // Grab element before code replacement as ReplaceCode alters the list.
+ element = func->next_function_link();
+ if (func->code() == code) {
+ func->ReplaceCode(shared->code());
+ }
+ }
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -284,11 +299,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -310,7 +325,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// the ast id. Confusing.
ASSERT(bailout_id_ == ast_id);
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -330,9 +345,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -456,15 +471,15 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(pc);
}
Code* continuation =
- function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@@ -679,16 +694,143 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address + 4 stack entries from
+ // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
+ // entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index b5ddcca192..008fdde7ea 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -553,6 +553,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
case 2: mnem = "not"; break;
case 3: mnem = "neg"; break;
case 4: mnem = "mul"; break;
+ case 5: mnem = "imul"; break;
case 7: mnem = "idiv"; break;
default: UnimplementedInstruction();
}
@@ -1266,6 +1267,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x56) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("orpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
@@ -1463,6 +1472,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
switch (b2) {
case 0x2A: mnem = "cvtsi2sd"; break;
case 0x2C: mnem = "cvttsd2si"; break;
+ case 0x2D: mnem = "cvtsd2si"; break;
case 0x51: mnem = "sqrtsd"; break;
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
@@ -1475,7 +1485,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (b2 == 0x2A) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- } else if (b2 == 0x2C) {
+ } else if (b2 == 0x2C || b2 == 0x2D) {
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 5a513fd483..7fb7cc3215 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -123,6 +123,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -176,10 +178,13 @@ void FullCodeGenerator::Generate() {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
+ Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
@@ -227,7 +232,7 @@ void FullCodeGenerator::Generate() {
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ SafePush(Immediate(Smi::FromInt(num_parameters)));
+ __ push(Immediate(Smi::FromInt(num_parameters)));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -257,7 +262,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -272,7 +277,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@@ -317,20 +322,12 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = 10;
- }
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 100;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -342,7 +339,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -404,7 +401,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -760,7 +757,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
@@ -813,10 +810,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ push(esi);
__ push(Immediate(variable->name()));
// VariableDeclaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -1094,19 +1090,28 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a map in register eax. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
+
+ __ EnumLength(edx, eax);
+ __ cmp(edx, Immediate(Smi::FromInt(0)));
+ __ j(equal, &no_descriptors);
+
__ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(eax); // Map.
- __ push(edx); // Enumeration cache.
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ push(eax); // Enumeration cache length (as smi).
+ __ push(ecx); // Enumeration cache.
+ __ push(edx); // Number of valid entries for the map in the enum cache.
__ push(Immediate(Smi::FromInt(0))); // Initial index.
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ add(esp, Immediate(kPointerSize));
+ __ jmp(&exit);
+
// We got a fixed array in register eax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1115,7 +1120,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(ebx, cell);
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
@@ -1271,9 +1276,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ mov(temp, context);
}
__ bind(&next);
- // Terminate at global context.
+ // Terminate at native context.
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->global_context_map()));
+ Immediate(isolate()->factory()->native_context_map()));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
@@ -1559,7 +1564,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1585,7 +1590,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1797,11 +1802,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1857,14 +1862,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1885,7 +1890,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1970,7 +1976,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -2097,7 +2104,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2153,7 +2160,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2196,7 +2203,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2220,6 +2227,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForAccumulatorValue(expr->obj());
__ mov(edx, result_register());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(eax);
} else {
VisitForStackValue(expr->obj());
@@ -2234,7 +2242,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
__ call(code, rmode, ast_id);
}
@@ -2258,7 +2266,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2290,7 +2298,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2310,20 +2318,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(ebx, cell);
- }
+ // Record call targets in unoptimized code.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ mov(ebx, cell);
CallFunctionStub stub(arg_count, flags);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->id());
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
@@ -2495,24 +2501,18 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ SafeSet(eax, Immediate(arg_count));
+ __ Set(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(ebx, cell);
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ mov(ebx, cell);
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
@@ -2653,7 +2653,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(eax);
+ if (generate_debug_code_) __ AbortIfSmi(eax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2681,9 +2681,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
- __ add(ebx,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@@ -2692,7 +2690,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ add(ebx, Immediate(kPointerSize));
+ __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmp(ebx, ecx);
__ j(not_equal, &loop);
@@ -2705,9 +2703,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
__ JumpIfSmi(ecx, if_false);
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(edx,
- FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ FieldOperand(edx, GlobalObject::kNativeContextOffset));
__ cmp(ecx,
ContextOperand(edx,
Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
@@ -2853,7 +2851,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2865,7 +2863,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
Label exit;
// Get the number of formal parameters.
- __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2878,7 +2876,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ if (generate_debug_code_) __ AbortIfNotSmi(eax);
context()->Plug(eax);
}
@@ -2982,8 +2980,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ bind(&heapnumber_allocated);
__ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -3070,19 +3068,18 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = eax;
Register result = eax;
Register scratch = ecx;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ __ j(not_equal, &not_date_object);
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3098,8 +3095,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(result);
}
@@ -3370,10 +3371,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
- __ j(equal, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &runtime);
// InvokeFunction requires the function in edi. Move it in there.
__ mov(edi, result_register());
@@ -3383,7 +3385,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(eax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3413,7 +3415,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ mov(eax, isolate()->factory()->undefined_value());
@@ -3426,9 +3428,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = eax;
Register cache = ebx;
Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
__ mov(cache,
- FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+ FieldOperand(cache, GlobalObject::kNativeContextOffset));
__ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ mov(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
@@ -3498,7 +3500,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ AbortIfNotString(eax);
}
@@ -3523,9 +3525,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(eax);
- }
+ __ AbortIfNotString(eax);
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
__ IndexFromHash(eax, eax);
@@ -3599,7 +3599,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmp(index, array_length);
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@@ -3827,7 +3827,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3985,7 +3985,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(eax);
}
@@ -4043,7 +4044,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4106,7 +4107,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4140,7 +4141,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4157,7 +4158,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4365,7 +4366,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4447,7 +4448,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4489,6 +4490,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(edx, Operand::StaticVariable(has_pending_message));
+ __ SmiTag(edx);
__ push(edx);
ExternalReference pending_message_script =
@@ -4507,6 +4509,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
__ mov(Operand::StaticVariable(pending_message_script), edx);
__ pop(edx);
+ __ SmiUntag(edx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(Operand::StaticVariable(has_pending_message), edx);
@@ -4554,7 +4557,6 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
return previous_;
}
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index a091ff1aa6..52d8fa1452 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -943,7 +943,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate* isolate = masm->isolate();
isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 2c6a916b04..2b42b13a1a 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -81,7 +81,7 @@ bool LCodeGen::GenerateCode() {
dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
!chunk()->graph()->is_recursive()) ||
- info()->osr_ast_id() != AstNode::kNoNumber;
+ !info()->osr_ast_id().IsNone();
return GeneratePrologue() &&
GenerateBody() &&
@@ -99,17 +99,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -135,6 +126,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -360,24 +353,24 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
@@ -420,7 +413,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -428,11 +423,19 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@@ -444,7 +447,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -452,18 +456,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
+ false,
false);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
@@ -472,6 +481,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -485,6 +496,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -492,8 +505,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -552,9 +565,9 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
} else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
} else if (context->IsConstantOperand()) {
- Handle<Object> literal =
- chunk_->LookupLiteral(LConstantOperand::cast(context));
- __ LoadHeapObject(esi, Handle<Context>::cast(literal));
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
} else {
UNREACHABLE();
}
@@ -674,13 +687,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -1020,6 +1033,109 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ ASSERT(instr->InputAt(1)->IsConstantOperand());
+
+ Register dividend = ToRegister(instr->InputAt(0));
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+ Register result = ToRegister(instr->result());
+
+ switch (divisor) {
+ case 0:
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+
+ case 1:
+ __ Move(result, dividend);
+ return;
+
+ case -1:
+ __ Move(result, dividend);
+ __ neg(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ return;
+ }
+
+ uint32_t divisor_abs = abs(divisor);
+ if (IsPowerOf2(divisor_abs)) {
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (divisor < 0) {
+ // Input[dividend] is clobbered.
+ // The sequence is tedious because neg(dividend) might overflow.
+ __ mov(result, dividend);
+ __ sar(dividend, 31);
+ __ neg(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ shl(dividend, 32 - power);
+ __ sar(result, power);
+ __ not_(dividend);
+ // Clear result.sign if dividend.sign is set.
+ __ and_(result, dividend);
+ } else {
+ __ Move(result, dividend);
+ __ sar(result, power);
+ }
+ } else {
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ ASSERT(ToRegister(instr->result()).is(edx));
+ Register scratch = ToRegister(instr->TempAt(0));
+
+ // Find b which: 2^b < divisor_abs < 2^(b+1).
+ unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
+ unsigned shift = 32 + b; // Precision +1bit (effectively).
+ double multiplier_f =
+ static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
+ int64_t multiplier;
+ if (multiplier_f - floor(multiplier_f) < 0.5) {
+ multiplier = static_cast<int64_t>(floor(multiplier_f));
+ } else {
+ multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
+ }
+ // The multiplier is a uint32.
+ ASSERT(multiplier > 0 &&
+ multiplier < (static_cast<int64_t>(1) << 32));
+ __ mov(scratch, dividend);
+ if (divisor < 0 &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ mov(edx, static_cast<int32_t>(multiplier));
+ __ imul(edx);
+ if (static_cast<int32_t>(multiplier) < 0) {
+ __ add(edx, scratch);
+ }
+ Register reg_lo = eax;
+ Register reg_byte_scratch = scratch;
+ if (!reg_byte_scratch.is_byte_register()) {
+ __ xchg(reg_lo, reg_byte_scratch);
+ reg_lo = scratch;
+ reg_byte_scratch = eax;
+ }
+ if (divisor < 0) {
+ __ xor_(reg_byte_scratch, reg_byte_scratch);
+ __ cmp(reg_lo, 0x40000000);
+ __ setcc(above, reg_byte_scratch);
+ __ neg(edx);
+ __ sub(edx, reg_byte_scratch);
+ } else {
+ __ xor_(reg_byte_scratch, reg_byte_scratch);
+ __ cmp(reg_lo, 0xC0000000);
+ __ setcc(above_equal, reg_byte_scratch);
+ __ add(edx, reg_byte_scratch);
+ }
+ __ sar(edx, shift - 32);
+ }
+}
+
+
void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->InputAt(0));
LOperand* right = instr->InputAt(1);
@@ -1283,6 +1399,13 @@ void LCodeGen::DoFixedArrayBaseLength(
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->InputAt(0));
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
@@ -1326,11 +1449,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(object.is(result));
ASSERT(object.is(eax));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ test(object, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1390,6 +1512,67 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Label return_left;
+ Condition condition = (operation == HMathMinMax::kMathMin)
+ ? less_equal
+ : greater_equal;
+ if (right->IsConstantOperand()) {
+ Operand left_op = ToOperand(left);
+ Immediate right_imm = ToInteger32Immediate(right);
+ __ cmp(left_op, right_imm);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_op, right_imm);
+ } else {
+ Register left_reg = ToRegister(left);
+ Operand right_op = ToOperand(right);
+ __ cmp(left_reg, right_op);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_reg, right_op);
+ }
+ __ bind(&return_left);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+ XMMRegister left_reg = ToDoubleRegister(left);
+ XMMRegister right_reg = ToDoubleRegister(right);
+ __ ucomisd(left_reg, right_reg);
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(left_reg, xmm_scratch);
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ orpd(left_reg, right_reg);
+ } else {
+ // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+ __ addsd(left_reg, right_reg);
+ }
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ ucomisd(left_reg, left_reg); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+ __ bind(&return_right);
+ __ movsd(left_reg, right_reg);
+
+ __ bind(&return_left);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->InputAt(0));
XMMRegister right = ToDoubleRegister(instr->InputAt(1));
@@ -1891,9 +2074,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AbortIfNotString(input);
__ mov(result, FieldOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2305,9 +2486,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
+ type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsFound() && lookup.type() == FIELD) {
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2319,7 +2500,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
@@ -2366,11 +2547,10 @@ static bool CompactEmit(SmallMapList* list,
Handle<Map> map = list->at(i);
// If the map has ElementsKind transitions, we will generate map checks
// for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->elements_transition_map() != NULL) return false;
+ if (map->HasElementsTransition()) return false;
LookupResult lookup(isolate);
- map->LookupInDescriptors(NULL, *name, &lookup);
- return lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION);
+ map->LookupDescriptor(NULL, *name, &lookup);
+ return lookup.IsField() || lookup.IsConstantFunction();
}
@@ -2544,6 +2724,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
__ mov(result,
BuildFastArrayOperand(instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
@@ -2570,6 +2751,7 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
@@ -2580,6 +2762,7 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -2590,11 +2773,19 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bound check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_representation.IsTagged() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
@@ -2616,11 +2807,19 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
@@ -2648,11 +2847,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(result, operand);
- __ test(result, Operand(result));
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(negative, instr->environment());
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ test(result, Operand(result));
+ DeoptimizeIf(negative, instr->environment());
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
@@ -2748,12 +2946,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ j(not_equal, &receiver_ok);
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
@@ -2773,7 +2971,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// if it's better to use it than to explicitly fetch it from the context
// here.
__ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ mov(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
@@ -2835,7 +3033,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -2865,7 +3063,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(result,
+ Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
}
@@ -2892,17 +3091,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(edi, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
-
- if (change_context) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- } else {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point.
@@ -3065,8 +3255,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
} else {
- Label negative_sign;
- Label done;
+ Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
@@ -3092,9 +3281,9 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DeoptimizeIf(equal, instr->environment());
__ jmp(&done, Label::kNear);
- // Non-zero negative reaches here
+ // Non-zero negative reaches here.
__ bind(&negative_sign);
- // Truncate, then compare and compensate
+ // Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
__ cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
@@ -3243,11 +3432,11 @@ void LCodeGen::DoRandom(LRandom* instr) {
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the global context's random seeds
+ // ebx: FixedArray of the native context's random seeds
// Load state[0].
__ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
@@ -3559,10 +3748,36 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ if (operand->IsRegister()) {
+ __ test(ToRegister(operand), Immediate(kSmiTagMask));
+ } else {
+ __ test(ToOperand(operand), Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(not_zero, environment);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
if (instr->index()->IsConstantOperand()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ cmp(ToOperand(instr->length()),
+ Immediate(Smi::FromInt(constant_index)));
+ } else {
+ __ cmp(ToOperand(instr->length()), Immediate(constant_index));
+ }
DeoptimizeIf(below_equal, instr->environment());
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
@@ -3574,11 +3789,19 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
@@ -3625,6 +3848,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Operand operand = BuildFastArrayOperand(
instr->object(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -3666,6 +3890,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -3865,12 +4090,27 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->InputAt(0);
+ LOperand* output = instr->result();
+ LOperand* temp = instr->TempAt(0);
+
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->InputAt(0),
+ SIGNED_INT32);
+ }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
@@ -3887,21 +4127,56 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->InputAt(0),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmp(reg, Immediate(Smi::kMaxValue));
+ __ j(above, deferred->entry());
+ __ SmiTag(reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness) {
Label slow;
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
Label done;
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ __ SmiUntag(reg);
+ __ xor_(reg, 0x80000000);
+ __ cvtsi2sd(xmm0, Operand(reg));
+ } else {
+ __ LoadUint32(xmm0, reg, xmm1);
+ }
+
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, no_reg, &slow);
__ jmp(&done, Label::kNear);
@@ -4582,7 +4857,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
@@ -4603,12 +4878,11 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
// Set up the parameters to the stub/runtime call.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
+ __ push(Immediate(isolate()->factory()->empty_fixed_array()));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4712,8 +4986,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<FixedDoubleArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(FieldOperand(result, total_offset), Immediate(value_low));
@@ -4827,15 +5101,13 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Label materialized;
// Registers will be used as follows:
- // edi = JS function.
// ecx = literals array.
// ebx = regexp literal.
// eax = regexp literal clone.
// esi = context.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(ecx, instr->hydrogen()->literals());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, factory()->undefined_value());
__ j(not_equal, &materialized, Label::kNear);
@@ -5191,11 +5463,20 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Immediate(Smi::FromInt(0)));
+ __ j(not_equal, &load_cache);
+ __ mov(result, isolate()->factory()->empty_fixed_array());
+ __ jmp(&done);
+
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ mov(result,
- FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ bind(&done);
__ test(result, result);
DeoptimizeIf(equal, instr->environment());
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index b241aafb96..9058ede0eb 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -46,26 +46,25 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
- Zone* zone)
- : chunk_(chunk),
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4, zone),
- deoptimization_literals_(8, zone),
+ deoptimizations_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- translations_(zone),
- deferred_(8, zone),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
- safepoints_(zone),
- zone_(zone),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -106,7 +105,12 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -151,7 +155,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -167,7 +171,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -234,7 +238,8 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -247,6 +252,7 @@ class LCodeGen BASE_EMBEDDED {
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index = 0);
@@ -284,6 +290,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -330,7 +340,8 @@ class LCodeGen BASE_EMBEDDED {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
- LChunk* const chunk_;
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -352,8 +363,6 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
- Zone* zone_;
-
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 60366f7447..f576e37e4f 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -366,7 +366,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) {
spill_slot_count_++;
@@ -377,7 +377,7 @@ int LChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index, zone());
@@ -387,42 +387,6 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
}
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
@@ -474,84 +438,9 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap, zone());
- index = instructions_.length();
- instructions_.Add(instr, zone());
- } else {
- index = instructions_.length();
- instructions_.Add(instr, zone());
- instructions_.Add(gap, zone());
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map(), zone());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id(), zone());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(from, to, zone());
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
@@ -572,17 +461,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -753,7 +633,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -851,13 +731,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -991,8 +874,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result =
@@ -1017,7 +900,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1347,12 +1232,57 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
+HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
return NULL;
}
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
+ LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
+ int32_t divisor_si = HConstant::cast(right)->Integer32Value();
+ if (divisor_si == 0) {
+ LOperand* dividend = UseRegister(instr->left());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
+ } else if (IsPowerOf2(abs(divisor_si))) {
+ // use dividend as temp if divisor < 0 && divisor != -1
+ LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
+ UseRegisterAtStart(instr->left());
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ } else {
+ // needs edx:eax, plus a temp
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineFixed(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ }
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1461,6 +1391,26 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ return DefineSameAsFirst(minmax);
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1637,6 +1587,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1654,7 +1610,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* date = UseFixed(instr->value(), eax);
LDateField* result =
new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1744,14 +1700,24 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
+ }
}
}
UNREACHABLE();
@@ -1966,7 +1932,8 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
@@ -1978,7 +1945,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@@ -1997,11 +1965,17 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstant(instr->key());
+
LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@@ -2027,7 +2001,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegister(instr->object());
LOperand* val = needs_write_barrier
@@ -2044,7 +2019,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@@ -2065,10 +2041,10 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* val = NULL;
if (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
@@ -2078,7 +2054,11 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
} else {
val = UseRegister(instr->value());
}
-
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstant(instr->key());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
key,
val);
@@ -2348,7 +2328,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
+ if (!pending_deoptimization_ast_id_.IsNone()) {
ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
@@ -2357,7 +2337,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2386,7 +2366,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index cd20631805..83a6c6f32b 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -109,7 +110,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -126,11 +126,15 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -157,6 +161,7 @@ class LCodeGen;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -252,8 +257,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
@@ -265,6 +268,11 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -284,7 +292,6 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
@@ -294,6 +301,9 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ virtual int InputCount() { return I; }
};
@@ -546,6 +556,21 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -856,6 +881,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -994,6 +1020,16 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
@@ -1071,6 +1107,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1261,6 +1309,19 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
};
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return key_representation.IsTagged() &&
+ (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS);
+}
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
@@ -1628,6 +1689,17 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1638,6 +1710,17 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
@@ -2303,74 +2386,19 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
- public:
- LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- num_double_slots_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) { }
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+class LPlatformChunk: public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph),
+ num_double_slots_(0) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
int num_double_slots() const { return num_double_slots_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure, zone());
- }
-
- Zone* zone() const { return graph_->zone(); }
private:
- int spill_slot_count_;
int num_double_slots_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2389,16 +2417,19 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
private:
enum Status {
UNUSED,
@@ -2407,7 +2438,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2417,7 +2448,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2511,7 +2542,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2523,7 +2554,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 2012a5ad9d..9c5f31e2cf 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -85,7 +85,7 @@ void MacroAssembler::RememberedSetHelper(
SaveFPRegsMode save_fp,
MacroAssembler::RememberedSetFinalAction and_then) {
Label done;
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
@@ -134,10 +134,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
Set(result_reg, Immediate(0));
ucomisd(input_reg, scratch_reg);
j(below, &done, Label::kNear);
- ExternalReference half_ref = ExternalReference::address_of_one_half();
- movdbl(scratch_reg, Operand::StaticVariable(half_ref));
- addsd(scratch_reg, input_reg);
- cvttsd2si(result_reg, Operand(scratch_reg));
+ cvtsd2si(result_reg, input_reg);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
Set(result_reg, Immediate(255));
@@ -155,6 +152,24 @@ void MacroAssembler::ClampUint8(Register reg) {
}
+static double kUint32Bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+
+
+void MacroAssembler::LoadUint32(XMMRegister dst,
+ Register src,
+ XMMRegister scratch) {
+ Label done;
+ cmp(src, Immediate(0));
+ movdbl(scratch,
+ Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE));
+ cvtsi2sd(dst, src);
+ j(not_sign, &done, Label::kNear);
+ addsd(dst, scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -317,7 +332,7 @@ void MacroAssembler::RecordWrite(Register object,
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -982,23 +997,24 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
cmp(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
push(scratch);
- // Read the first word and compare to global_context_map.
+ // Read the first word and compare to native_context_map.
mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ cmp(scratch, isolate()->factory()->native_context_map());
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
pop(scratch);
}
// Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
@@ -1009,18 +1025,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check that the security token in the calling global object is
// compatible with the security token in the receiving global
// object.
- mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ mov(holder_reg,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
cmp(holder_reg, isolate()->factory()->null_value());
Check(not_equal, "JSGlobalProxy::context() should not be null.");
push(holder_reg);
- // Read the first word and compare to global_context_map(),
+ // Read the first word and compare to native_context_map(),
mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ cmp(holder_reg, isolate()->factory()->native_context_map());
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg);
}
@@ -1707,7 +1724,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -1922,16 +1939,53 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ cmp(map, isolate()->factory()->heap_number_map());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->undefined_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->true_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->false_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->null_value());
+ j(equal, &ok, Label::kNear);
+
+ Abort("API call returned invalid object");
+
+ bind(&ok);
+#endif
+
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
bind(&empty_handle);
// It was zero; the result is undefined.
mov(eax, isolate()->factory()->undefined_value());
jmp(&prologue);
+ bind(&promote_scheduled_exception);
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
ExternalReference::delete_handle_scope_extensions(isolate());
@@ -2169,7 +2223,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
mov(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
@@ -2218,8 +2272,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
mov(scratch, Operand(scratch,
@@ -2264,10 +2318,11 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ mov(function,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
@@ -2518,12 +2573,19 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- mov(descriptors,
- FieldOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi);
+ Register temp = descriptors;
+ mov(temp, FieldOperand(map, Map::kTransitionsOrBackPointerOffset));
+
+ Label ok, fail;
+ CheckMap(temp,
+ isolate()->factory()->fixed_array_map(),
+ &fail,
+ DONT_DO_SMI_CHECK);
+ mov(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
+ jmp(&ok);
+ bind(&fail);
mov(descriptors, isolate()->factory()->empty_descriptor_array());
- bind(&not_smi);
+ bind(&ok);
}
@@ -2786,7 +2848,7 @@ void MacroAssembler::EnsureNotWhite(
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
@@ -2861,7 +2923,7 @@ void MacroAssembler::EnsureNotWhite(
and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
length);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
Check(less_equal, "Live Bytes Count overflow chunk size");
@@ -2871,40 +2933,43 @@ void MacroAssembler::EnsureNotWhite(
}
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
+}
+
+
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
- Label next;
+ Label next, start;
mov(ecx, eax);
- bind(&next);
- // Check that there are no elements. Register ecx contains the
- // current JS object we've reached through the prototype chain.
- cmp(FieldOperand(ecx, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- j(not_equal, call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(edx, call_runtime);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (edx). This is the case if the next enumeration
- // index field does not contain a smi.
- mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(edx, call_runtime);
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
+ bind(&next);
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmp(ecx, eax);
- j(equal, &check_prototype, Label::kNear);
- mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmp(edx, isolate()->factory()->empty_fixed_array());
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(0)));
+ j(not_equal, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ cmp(ecx, isolate()->factory()->empty_fixed_array());
j(not_equal, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 5c7a6d6d26..7d475e7d7e 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -239,8 +239,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -467,6 +467,8 @@ class MacroAssembler: public Assembler {
j(not_carry, is_smi);
}
+ void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
Label* smi_label,
@@ -490,7 +492,15 @@ class MacroAssembler: public Assembler {
}
void LoadInstanceDescriptors(Register map, Register descriptors);
-
+ void EnumLength(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int full_shift = Field::kShift + kSmiTagSize;
+ static const int low_mask = Field::kMask >> Field::kShift;
+ sar(reg, full_shift);
+ and_(reg, Immediate(low_mask));
+ }
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
// Abort execution if argument is not a number. Used in debug code.
@@ -688,7 +698,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
@@ -961,7 +971,7 @@ inline Operand ContextOperand(Register context, int index) {
inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_INDEX);
+ return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 07782cc809..622dc4254d 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -316,6 +316,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// uncaptured. In either case succeed immediately.
__ j(equal, &fallthrough);
+ // Check that there are sufficient characters left in the input.
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+
if (mode_ == ASCII) {
Label success;
Label fail;
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 760fadc77d..7aea385863 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -34,14 +34,7 @@
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerIA32() { }
- virtual ~RegExpMacroAssemblerIA32() { }
-};
-
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 0e4ce20bd6..f5e2d05892 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -276,12 +276,12 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
// Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
- masm->isolate()->global());
+ __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
+ masm->isolate()->global_object());
__ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->global_context()->get(index)));
+ JSFunction::cast(masm->isolate()->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
@@ -1052,6 +1052,58 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+ Register dictionary = scratch1;
+ bool must_preserve_dictionary_reg = receiver.is(dictionary);
+
+ // Load the properties dictionary.
+ if (must_preserve_dictionary_reg) {
+ __ push(dictionary);
+ }
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done, pop_and_miss;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &pop_and_miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&pop_and_miss);
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ jmp(miss);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch2 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch2;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(scratch3,
+ Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ cmp(scratch3, callback);
+ __ j(not_equal, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1059,6 +1111,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1069,6 +1122,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch1, scratch2, scratch3, callback, name, miss);
+ }
+
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3.is(reg));
__ pop(scratch3); // Get return address to place it below.
@@ -1157,7 +1215,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
@@ -1242,7 +1300,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), eax, holder_reg,
@@ -1415,7 +1473,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1962,7 +2020,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2092,7 +2150,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2197,7 +2255,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2444,7 +2502,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2505,7 +2563,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2536,14 +2594,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2551,19 +2612,14 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- esp[0] : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed, preserving the value register.
+ __ push(eax);
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
+ __ pop(eax); // restore value
- // Check that the map of the object hasn't changed.
- __ CheckMap(edx, Handle<Map>(object->map()),
- &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ pop(ebx); // remove the return address
__ push(edx); // receiver
@@ -2579,42 +2635,46 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Handle store cache miss.
__ bind(&miss);
+ __ pop(eax);
Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<JSObject> receiver,
- Handle<JSFunction> setter,
- Handle<String> name) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
-
{
- FrameScope scope(masm(), StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(eax);
- // Call the JavaScript getter with the receiver and the value on the stack.
- __ push(edx);
- __ push(eax);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ push(edx);
+ __ push(eax);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
// We have to return the passed value, not the return value of the setter.
__ pop(eax);
@@ -2623,13 +2683,41 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed, preserving the name register.
+ __ push(ecx);
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(receiver, edx, holder, ebx, ecx, edi, name, &miss);
+ __ pop(ecx);
+
+ GenerateStoreViaSetter(masm(), setter);
__ bind(&miss);
+ __ pop(ecx);
Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2675,7 +2763,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2723,7 +2811,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2762,7 +2850,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -2785,7 +2875,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -2820,7 +2910,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -2860,7 +2950,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2880,7 +2970,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2896,16 +2986,53 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, callback,
- name, &miss);
+ GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, no_reg,
+ callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(edx);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
}
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
@@ -2922,25 +3049,13 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
__ JumpIfSmi(edx, &miss);
CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
- {
- FrameScope scope(masm(), StackFrame::INTERNAL);
-
- // Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
+ GenerateLoadViaGetter(masm(), getter);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2960,7 +3075,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2986,7 +3101,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3034,7 +3149,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -3063,7 +3178,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -3086,15 +3201,15 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(ecx, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, callback,
- name, &miss);
+ GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, no_reg,
+ callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3124,7 +3239,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -3155,7 +3270,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3181,7 +3296,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3207,7 +3322,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3233,7 +3348,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3253,7 +3368,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3280,7 +3395,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index d169993e3b..b902b5386d 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -320,13 +320,17 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
target->ic_state());
// Not all Code objects have TypeFeedbackInfo.
- if (delta != 0 && host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ if (host->type_feedback_info()->IsTypeFeedbackInfo() && delta != 0) {
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
- info->set_ic_with_type_info_count(
- info->ic_with_type_info_count() + delta);
+ info->change_ic_with_type_info_count(delta);
}
}
+ if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info =
+ TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_own_type_change_checksum();
+ }
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
Isolate::Current()->runtime_profiler()->NotifyICChanged();
@@ -435,9 +439,7 @@ static void LookupForRead(Handle<Object> object,
// Besides normal conditions (property not found or it's not
// an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsFound()
- || (lookup->type() != INTERCEPTOR)
- || !lookup->IsCacheable()) {
+ if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
return;
}
@@ -447,14 +449,14 @@ static void LookupForRead(Handle<Object> object,
}
holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsProperty()) {
- ASSERT(lookup->type() != INTERCEPTOR);
+ if (lookup->IsFound()) {
+ ASSERT(!lookup->IsInterceptor());
return;
}
Handle<Object> proto(holder->GetPrototype());
if (proto->IsNull()) {
- lookup->NotFound();
+ ASSERT(!lookup->IsFound());
return;
}
@@ -535,7 +537,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
LookupResult lookup(isolate());
LookupForRead(object, name, &lookup);
- if (!lookup.IsProperty()) {
+ if (!lookup.IsFound()) {
// If the object does not have the requested property, check which
// exception we need to throw.
return IsContextual(object)
@@ -554,7 +556,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
Object::GetProperty(object, object, &lookup, name, &attr);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
- if (lookup.type() == INTERCEPTOR && attr == ABSENT) {
+ if (lookup.IsInterceptor() && attr == ABSENT) {
// If the object does not have the requested property, check which
// exception we need to throw.
return IsContextual(object)
@@ -902,7 +904,7 @@ MaybeObject* LoadIC::Load(State state,
LookupForRead(object, name, &lookup);
// If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty()) {
+ if (!lookup.IsFound()) {
if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
@@ -915,8 +917,7 @@ MaybeObject* LoadIC::Load(State state,
}
PropertyAttributes attr;
- if (lookup.IsFound() &&
- (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
+ if (lookup.IsInterceptor() || lookup.IsHandler()) {
// Get the property.
Handle<Object> result =
Object::GetProperty(object, object, &lookup, name, &attr);
@@ -992,7 +993,6 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
if (callback->IsAccessorInfo()) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
if (v8::ToCData<Address>(info->getter()) == 0) return;
- if (!holder->HasFastProperties()) return;
if (!info->IsCompatibleReceiver(*receiver)) return;
code = isolate()->stub_cache()->ComputeLoadCallback(
name, receiver, holder, info);
@@ -1169,7 +1169,7 @@ MaybeObject* KeyedLoadIC::Load(State state,
LookupForRead(object, name, &lookup);
// If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty() && IsContextual(object)) {
+ if (!lookup.IsFound() && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
@@ -1178,7 +1178,7 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
PropertyAttributes attr;
- if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsInterceptor()) {
// Get the property.
Handle<Object> result =
Object::GetProperty(object, object, &lookup, name, &attr);
@@ -1269,7 +1269,6 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
Handle<AccessorInfo> callback =
Handle<AccessorInfo>::cast(callback_object);
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- if (!holder->HasFastProperties()) return;
if (!callback->IsCompatibleReceiver(*receiver)) return;
code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
name, receiver, holder, callback);
@@ -1303,15 +1302,16 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
- if (!lookup->IsFound() || lookup->type() == NULL_DESCRIPTOR) return false;
+ if (!lookup->IsFound()) return false;
// Bail out if inline caching is not allowed.
if (!lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current state.
- if (lookup->IsReadOnly()) return false;
-
- return true;
+ if (lookup->IsTransition()) {
+ return !lookup->GetTransitionDetails().IsReadOnly();
+ }
+ return !lookup->IsReadOnly();
}
@@ -1319,11 +1319,16 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
LookupResult* lookup) {
receiver->LocalLookup(*name, lookup);
+ if (!lookup->IsFound()) {
+ receiver->map()->LookupTransition(*receiver, *name, lookup);
+ }
if (!StoreICableLookup(lookup)) {
- return false;
+ // 2nd chance: There can be accessors somewhere in the prototype chain.
+ receiver->Lookup(*name, lookup);
+ return lookup->IsPropertyCallbacks() && StoreICableLookup(lookup);
}
- if (lookup->type() == INTERCEPTOR &&
+ if (lookup->IsInterceptor() &&
receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
receiver->LocalLookupRealNamedProperty(*name, lookup);
return StoreICableLookup(lookup);
@@ -1390,12 +1395,13 @@ MaybeObject* StoreIC::Store(State state,
}
// Lookup the property locally in the receiver.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+ if (!receiver->IsJSGlobalProxy()) {
LookupResult lookup(isolate());
if (LookupForWrite(receiver, name, &lookup)) {
- // Generate a stub for this store.
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ if (FLAG_use_ic) { // Generate a stub for this store.
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ }
} else {
// Strict mode doesn't allow setting non-existent global property
// or an assignment to a read only property.
@@ -1439,10 +1445,10 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Object> value) {
ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(StoreICableLookup(lookup));
+ ASSERT(lookup->IsFound());
+
// These are not cacheable, so we never see such LookupResults here.
- ASSERT(lookup->type() != HANDLER);
- // We get only called for properties or transitions, see StoreICableLookup.
- ASSERT(lookup->type() != NULL_DESCRIPTOR);
+ ASSERT(!lookup->IsHandler());
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1462,14 +1468,6 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Map>::null(),
strict_mode);
break;
- case MAP_TRANSITION: {
- if (lookup->GetAttributes() != NONE) return;
- Handle<Map> transition(lookup->GetTransitionMap());
- int index = transition->PropertyIndexFor(*name);
- code = isolate()->stub_cache()->ComputeStoreField(
- name, receiver, index, transition, strict_mode);
- break;
- }
case NORMAL:
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
@@ -1487,20 +1485,20 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
case CALLBACKS: {
Handle<Object> callback(lookup->GetCallbackObject());
if (callback->IsAccessorInfo()) {
- ASSERT(*holder == *receiver); // LookupForWrite checks this.
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
if (v8::ToCData<Address>(info->setter()) == 0) return;
if (!holder->HasFastProperties()) return;
- ASSERT(info->IsCompatibleReceiver(*receiver));
+ if (!info->IsCompatibleReceiver(*receiver)) return;
code = isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, info, strict_mode);
+ name, receiver, holder, info, strict_mode);
} else if (callback->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter());
if (!setter->IsJSFunction()) return;
if (holder->IsGlobalObject()) return;
if (!holder->HasFastProperties()) return;
code = isolate()->stub_cache()->ComputeStoreViaSetter(
- name, receiver, Handle<JSFunction>::cast(setter), strict_mode);
+ name, receiver, holder, Handle<JSFunction>::cast(setter),
+ strict_mode);
} else {
ASSERT(callback->IsForeign());
// No IC support for old-style native accessors.
@@ -1514,10 +1512,24 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
name, receiver, strict_mode);
break;
case CONSTANT_FUNCTION:
- case CONSTANT_TRANSITION:
return;
+ case TRANSITION: {
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
+
+ DescriptorArray* target_descriptors = transition->instance_descriptors();
+ PropertyDetails details = target_descriptors->GetDetails(descriptor);
+
+ if (details.type() != FIELD || details.attributes() != NONE) return;
+
+ int field_index = target_descriptors->GetFieldIndex(descriptor);
+ code = isolate()->stub_cache()->ComputeStoreField(
+ name, receiver, field_index, transition, strict_mode);
+
+ break;
+ }
+ case NONEXISTENT:
case HANDLER:
- case NULL_DESCRIPTOR:
UNREACHABLE();
return;
}
@@ -1590,7 +1602,7 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != NORMAL) {
+ if (target()->type() != Code::NORMAL) {
TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
return generic_stub;
}
@@ -1943,10 +1955,10 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
Handle<Object> value) {
ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(StoreICableLookup(lookup));
+ ASSERT(lookup->IsFound());
+
// These are not cacheable, so we never see such LookupResults here.
- ASSERT(lookup->type() != HANDLER);
- // We get only called for properties or transitions, see StoreICableLookup.
- ASSERT(lookup->type() != NULL_DESCRIPTOR);
+ ASSERT(!lookup->IsHandler());
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1964,20 +1976,25 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
name, receiver, lookup->GetFieldIndex(),
Handle<Map>::null(), strict_mode);
break;
- case MAP_TRANSITION:
- if (lookup->GetAttributes() == NONE) {
- Handle<Map> transition(lookup->GetTransitionMap());
- int index = transition->PropertyIndexFor(*name);
+ case TRANSITION: {
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
+
+ DescriptorArray* target_descriptors = transition->instance_descriptors();
+ PropertyDetails details = target_descriptors->GetDetails(descriptor);
+
+ if (details.type() == FIELD && details.attributes() == NONE) {
+ int field_index = target_descriptors->GetFieldIndex(descriptor);
code = isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, index, transition, strict_mode);
+ name, receiver, field_index, transition, strict_mode);
break;
}
// fall through.
+ }
case NORMAL:
case CONSTANT_FUNCTION:
case CALLBACKS:
case INTERCEPTOR:
- case CONSTANT_TRANSITION:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
code = (strict_mode == kStrictMode)
@@ -1985,7 +2002,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
: generic_stub();
break;
case HANDLER:
- case NULL_DESCRIPTOR:
+ case NONEXISTENT:
UNREACHABLE();
return;
}
@@ -2120,7 +2137,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
// The length property has to be a writable callback property.
LookupResult debug_lookup(isolate);
receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
- ASSERT(debug_lookup.type() == CALLBACKS && !debug_lookup.IsReadOnly());
+ ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
Object* result;
@@ -2561,7 +2578,8 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
Token::Value CompareIC::ComputeOperation(Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(target->compare_operation());
+ return static_cast<Token::Value>(
+ target->compare_operation() + Token::EQ);
}
@@ -2571,7 +2589,7 @@ const char* CompareIC::GetStateName(State state) {
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
- case KNOWN_OBJECTS: return "OBJECTS";
+ case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
case SYMBOLS: return "SYMBOLS";
case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h
index 2dae6f207d..5c2c8ab431 100644
--- a/deps/v8/src/incremental-marking-inl.h
+++ b/deps/v8/src/incremental-marking-inl.h
@@ -107,7 +107,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
// trace it. In this case we switch to non-incremental marking in
// order to finish off this marking phase.
if (FLAG_trace_gc) {
- PrintF("Hurrying incremental marking because of lack of progress\n");
+ PrintPID("Hurrying incremental marking because of lack of progress\n");
}
allocation_marking_factor_ = kMaxAllocationMarkingFactor;
}
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 94afffa733..57f18b2e75 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -31,6 +31,8 @@
#include "code-stubs.h"
#include "compilation-cache.h"
+#include "objects-visiting.h"
+#include "objects-visiting-inl.h"
#include "v8conversions.h"
namespace v8 {
@@ -160,93 +162,82 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
}
-class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
+class IncrementalMarkingMarkingVisitor
+ : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
- IncrementalMarkingMarkingVisitor(Heap* heap,
- IncrementalMarking* incremental_marking)
- : heap_(heap),
- incremental_marking_(incremental_marking) {
- }
+ static void Initialize() {
+ StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- if (target->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- MarkObject(target);
- }
- }
+ table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_age() != heap_->global_ic_age())) {
- IC::Clear(rinfo->pc());
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- }
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
- MarkObject(target);
- }
+ table_.Register(kVisitJSFunction, &VisitJSFunction);
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
- MarkObject(target);
+ table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
- void VisitCodeEntry(Address entry_address) {
- Object* target = Code::GetObjectFromEntryAddress(entry_address);
- heap_->mark_compact_collector()->
- RecordCodeEntrySlot(entry_address, Code::cast(target));
- MarkObject(target);
+ static void VisitJSWeakMap(Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ VisitPointers(heap,
+ HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
+ HeapObject::RawField(object, JSWeakMap::kSize));
}
- void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
- if (shared->ic_age() != heap_->global_ic_age()) {
- shared->ResetForNewContext(heap_->global_ic_age());
+ static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+ if (shared->ic_age() != heap->global_ic_age()) {
+ shared->ResetForNewContext(heap->global_ic_age());
}
- }
-
- void VisitPointer(Object** p) {
+ FixedBodyVisitor<IncrementalMarkingMarkingVisitor,
+ SharedFunctionInfo::BodyDescriptor,
+ void>::Visit(map, object);
+ }
+
+ static inline void VisitJSFunction(Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ // Iterate over all fields in the body but take care in dealing with
+ // the code entry and skip weak fields.
+ VisitPointers(heap,
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
+ VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
+ VisitPointers(heap,
+ HeapObject::RawField(object,
+ JSFunction::kCodeEntryOffset + kPointerSize),
+ HeapObject::RawField(object,
+ JSFunction::kNonWeakFieldsEndOffset));
+ }
+
+ INLINE(static void VisitPointer(Heap* heap, Object** p)) {
Object* obj = *p;
if (obj->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordSlot(p, p, obj);
- MarkObject(obj);
+ heap->mark_compact_collector()->RecordSlot(p, p, obj);
+ MarkObject(heap, obj);
}
}
- void VisitPointers(Object** start, Object** end) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
for (Object** p = start; p < end; p++) {
Object* obj = *p;
if (obj->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordSlot(start, p, obj);
- MarkObject(obj);
+ heap->mark_compact_collector()->RecordSlot(start, p, obj);
+ MarkObject(heap, obj);
}
}
}
- private:
- // Mark object pointed to by p.
- INLINE(void MarkObject(Object* obj)) {
+ INLINE(static void MarkObject(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+ if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
heap_object->Size());
}
} else if (Marking::IsWhite(mark_bit)) {
- incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
}
}
-
- Heap* heap_;
- IncrementalMarking* incremental_marking_;
};
@@ -290,6 +281,11 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
};
+void IncrementalMarking::Initialize() {
+ IncrementalMarkingMarkingVisitor::Initialize();
+}
+
+
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking,
bool is_compacting) {
@@ -623,24 +619,6 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
}
-void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
- v->VisitPointers(
- HeapObject::RawField(
- ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
- HeapObject::RawField(
- ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
-
- MarkCompactCollector* collector = heap_->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::GLOBAL_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
- }
-}
-
-
void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
@@ -651,8 +629,7 @@ void IncrementalMarking::Hurry() {
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
Map* filler_map = heap_->one_pointer_filler_map();
- Map* global_context_map = heap_->global_context_map();
- IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+ Map* native_context_map = heap_->native_context_map();
while (!marking_deque_.IsEmpty()) {
HeapObject* obj = marking_deque_.Pop();
@@ -661,9 +638,9 @@ void IncrementalMarking::Hurry() {
Map* map = obj->map();
if (map == filler_map) {
continue;
- } else if (map == global_context_map) {
- // Global contexts have weak fields.
- VisitGlobalContext(Context::cast(obj), &marking_visitor);
+ } else if (map == native_context_map) {
+ // Native contexts have weak fields.
+ IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
@@ -676,12 +653,17 @@ void IncrementalMarking::Hurry() {
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
- marking_visitor.VisitPointers(
+ IncrementalMarkingMarkingVisitor::VisitPointers(
+ heap_,
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else {
- obj->Iterate(&marking_visitor);
+ MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+ if (Marking::IsWhite(map_mark_bit)) {
+ WhiteToGreyAndPush(map, map_mark_bit);
+ }
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
MarkBit mark_bit = Marking::MarkBitFrom(obj);
@@ -704,7 +686,7 @@ void IncrementalMarking::Hurry() {
PolymorphicCodeCache::kSize);
}
- Object* context = heap_->global_contexts_list();
+ Object* context = heap_->native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -814,8 +796,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
}
} else if (state_ == MARKING) {
Map* filler_map = heap_->one_pointer_filler_map();
- Map* global_context_map = heap_->global_context_map();
- IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+ Map* native_context_map = heap_->native_context_map();
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
HeapObject* obj = marking_deque_.Pop();
@@ -832,15 +813,15 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
}
// TODO(gc) switch to static visitor instead of normal visitor.
- if (map == global_context_map) {
- // Global contexts have weak fields.
+ if (map == native_context_map) {
+ // Native contexts have weak fields.
Context* ctx = Context::cast(obj);
// We will mark cache black with a separate pass
// when we finish marking.
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
- VisitGlobalContext(ctx, &marking_visitor);
+ IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
@@ -853,25 +834,13 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
- marking_visitor.VisitPointers(
+ IncrementalMarkingMarkingVisitor::VisitPointers(
+ heap_,
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
- } else if (map->instance_type() == JS_FUNCTION_TYPE) {
- marking_visitor.VisitPointers(
- HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
- HeapObject::RawField(obj, JSFunction::kCodeEntryOffset));
-
- marking_visitor.VisitCodeEntry(
- obj->address() + JSFunction::kCodeEntryOffset);
-
- marking_visitor.VisitPointers(
- HeapObject::RawField(obj,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(obj,
- JSFunction::kNonWeakFieldsEndOffset));
} else {
- obj->IterateBody(map->instance_type(), size, &marking_visitor);
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
@@ -892,8 +861,8 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
if (FLAG_trace_gc) {
- PrintF("Speed up marking after %d steps\n",
- static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+ PrintPID("Speed up marking after %d steps\n",
+ static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
}
speed_up = true;
}
@@ -907,7 +876,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
if (space_left_is_very_small ||
only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+ if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
speed_up = true;
}
@@ -918,7 +887,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
if (size_of_old_space_multiplied_by_n_during_marking) {
speed_up = true;
if (FLAG_trace_gc) {
- PrintF("Speed up marking because of heap size increase\n");
+ PrintPID("Speed up marking because of heap size increase\n");
}
}
@@ -930,7 +899,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
// We try to scan at at least twice the speed that we are allocating.
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
if (FLAG_trace_gc) {
- PrintF("Speed up marking because marker was not keeping up\n");
+ PrintPID("Speed up marking because marker was not keeping up\n");
}
speed_up = true;
}
@@ -938,7 +907,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
if (speed_up) {
if (state_ != MARKING) {
if (FLAG_trace_gc) {
- PrintF("Postponing speeding up marking until marking starts\n");
+ PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
@@ -946,7 +915,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
Min(kMaxAllocationMarkingFactor,
static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
if (FLAG_trace_gc) {
- PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+ PrintPID("Marking speed increased to %d\n", allocation_marking_factor_);
}
}
}
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index 39e8daed68..4cb356de8f 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -53,6 +53,8 @@ class IncrementalMarking {
explicit IncrementalMarking(Heap* heap);
+ static void Initialize();
+
void TearDown();
State state() {
@@ -215,8 +217,9 @@ class IncrementalMarking {
if (IsMarking()) {
if (allocation_marking_factor_ < kFastMarking) {
if (FLAG_trace_gc) {
- PrintF("Increasing marking speed to %d due to high promotion rate\n",
- static_cast<int>(kFastMarking));
+ PrintPID("Increasing marking speed to %d "
+ "due to high promotion rate\n",
+ static_cast<int>(kFastMarking));
}
allocation_marking_factor_ = kFastMarking;
}
@@ -258,8 +261,6 @@ class IncrementalMarking {
void EnsureMarkingDequeIsCommitted();
- void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
-
Heap* heap_;
State state_;
diff --git a/deps/v8/src/interface.cc b/deps/v8/src/interface.cc
index 86bb9d0bf4..336be82c60 100644
--- a/deps/v8/src/interface.cc
+++ b/deps/v8/src/interface.cc
@@ -124,8 +124,16 @@ void Interface::Unify(Interface* that, Zone* zone, bool* ok) {
*ok = true;
if (this == that) return;
- if (this->IsValue()) return that->MakeValue(ok);
- if (that->IsValue()) return this->MakeValue(ok);
+ if (this->IsValue()) {
+ that->MakeValue(ok);
+ if (*ok && this->IsConst()) that->MakeConst(ok);
+ return;
+ }
+ if (that->IsValue()) {
+ this->MakeValue(ok);
+ if (*ok && that->IsConst()) this->MakeConst(ok);
+ return;
+ }
#ifdef DEBUG
if (FLAG_print_interface_details) {
@@ -214,6 +222,8 @@ void Interface::Print(int n) {
if (IsUnknown()) {
PrintF("unknown\n");
+ } else if (IsConst()) {
+ PrintF("const\n");
} else if (IsValue()) {
PrintF("value\n");
} else if (IsModule()) {
diff --git a/deps/v8/src/interface.h b/deps/v8/src/interface.h
index 2670e7428d..94ef11ba5c 100644
--- a/deps/v8/src/interface.h
+++ b/deps/v8/src/interface.h
@@ -36,25 +36,41 @@ namespace internal {
// This class implements the following abstract grammar of interfaces
// (i.e. module types):
-// interface ::= UNDETERMINED | VALUE | MODULE(exports)
+// interface ::= UNDETERMINED | VALUE | CONST | MODULE(exports)
// exports ::= {name : interface, ...}
-// A frozen module type is one that is fully determined. Unification does not
-// allow adding additional exports to frozen interfaces.
-// Otherwise, unifying modules merges their exports.
+// A frozen type is one that is fully determined. Unification does not
+// allow to turn non-const values into const, or adding additional exports to
+// frozen interfaces. Otherwise, unifying modules merges their exports.
// Undetermined types are unification variables that can be unified freely.
+// There is a natural subsort lattice that reflects the increase of knowledge:
+//
+// undetermined
+// // | \\ .
+// value (frozen) module
+// // \\ / \ //
+// const fr.value fr.module
+// \\ /
+// fr.const
+//
+// where the bold lines are the only transitions allowed.
class Interface : public ZoneObject {
public:
// ---------------------------------------------------------------------------
// Factory methods.
+ static Interface* NewUnknown(Zone* zone) {
+ return new(zone) Interface(NONE);
+ }
+
static Interface* NewValue() {
static Interface value_interface(VALUE + FROZEN); // Cached.
return &value_interface;
}
- static Interface* NewUnknown(Zone* zone) {
- return new(zone) Interface(NONE);
+ static Interface* NewConst() {
+ static Interface value_interface(VALUE + CONST + FROZEN); // Cached.
+ return &value_interface;
}
static Interface* NewModule(Zone* zone) {
@@ -80,6 +96,12 @@ class Interface : public ZoneObject {
if (*ok) Chase()->flags_ |= VALUE;
}
+ // Determine this interface to be an immutable interface.
+ void MakeConst(bool* ok) {
+ *ok = !IsModule() && (IsConst() || !IsFrozen());
+ if (*ok) Chase()->flags_ |= VALUE + CONST;
+ }
+
// Determine this interface to be a module interface.
void MakeModule(bool* ok) {
*ok = !IsValue();
@@ -107,6 +129,9 @@ class Interface : public ZoneObject {
// Check whether this is a value type.
bool IsValue() { return Chase()->flags_ & VALUE; }
+ // Check whether this is a constant type.
+ bool IsConst() { return Chase()->flags_ & CONST; }
+
// Check whether this is a module type.
bool IsModule() { return Chase()->flags_ & MODULE; }
@@ -161,8 +186,9 @@ class Interface : public ZoneObject {
enum Flags { // All flags are monotonic
NONE = 0,
VALUE = 1, // This type describes a value
- MODULE = 2, // This type describes a module
- FROZEN = 4 // This type is fully determined
+ CONST = 2, // This type describes a constant
+ MODULE = 4, // This type describes a module
+ FROZEN = 8 // This type is fully determined
};
int flags_;
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 8fcb370c3e..75e15a4541 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -477,6 +477,14 @@ void Isolate::Iterate(ObjectVisitor* v) {
Iterate(v, current_t);
}
+void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
+ for (DeferredHandles* deferred = deferred_handles_head_;
+ deferred != NULL;
+ deferred = deferred->next_) {
+ deferred->Iterate(visitor);
+ }
+}
+
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
// The ARM simulator has a separate JS stack. We therefore register
@@ -527,6 +535,24 @@ Handle<String> Isolate::StackTraceString() {
}
+void Isolate::PushStackTraceAndDie(unsigned int magic,
+ Object* object,
+ Map* map,
+ unsigned int magic2) {
+ const int kMaxStackTraceSize = 8192;
+ Handle<String> trace = StackTraceString();
+ char buffer[kMaxStackTraceSize];
+ int length = Min(kMaxStackTraceSize - 1, trace->length());
+ String::WriteToFlat(*trace, buffer, 0, length);
+ buffer[length] = '\0';
+ OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
+ magic, magic2,
+ static_cast<void*>(object), static_cast<void*>(map),
+ buffer);
+ OS::Abort();
+}
+
+
void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
@@ -781,16 +807,17 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
if (isolate->bootstrapper()->IsActive()) return YES;
if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+ Object* receiver_context = JSGlobalProxy::cast(receiver)->native_context();
if (!receiver_context->IsContext()) return NO;
- // Get the global context of current top context.
- // avoid using Isolate::global_context() because it uses Handle.
- Context* global_context = isolate->context()->global()->global_context();
- if (receiver_context == global_context) return YES;
+ // Get the native context of current top context.
+ // avoid using Isolate::native_context() because it uses Handle.
+ Context* native_context =
+ isolate->context()->global_object()->native_context();
+ if (receiver_context == native_context) return YES;
if (Context::cast(receiver_context)->security_token() ==
- global_context->security_token())
+ native_context->security_token())
return YES;
}
@@ -1136,12 +1163,12 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
"Extension or internal compilation error: %s in %s at line %d.\n",
*String::cast(exception)->ToCString(),
*String::cast(location->script()->name())->ToCString(),
- line_number);
+ line_number + 1);
} else {
OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
*String::cast(location->script()->name())->ToCString(),
- line_number);
+ line_number + 1);
}
}
}
@@ -1205,7 +1232,7 @@ void Isolate::ReportPendingMessages() {
PropagatePendingExceptionToExternalTryCatch();
// If the pending exception is OutOfMemoryException set out_of_memory in
- // the global context. Note: We have to mark the global context here
+ // the native context. Note: We have to mark the native context here
// since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
// set it.
HandleScope scope;
@@ -1315,20 +1342,26 @@ bool Isolate::is_out_of_memory() {
}
+Handle<Context> Isolate::native_context() {
+ GlobalObject* global = thread_local_top()->context_->global_object();
+ return Handle<Context>(global->native_context());
+}
+
+
Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global();
+ GlobalObject* global = thread_local_top()->context_->global_object();
return Handle<Context>(global->global_context());
}
-Handle<Context> Isolate::GetCallingGlobalContext() {
+Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it;
#ifdef ENABLE_DEBUGGER_SUPPORT
if (debug_->InDebugger()) {
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
- if (context->global_context() == *debug_->debug_context()) {
+ if (context->native_context() == *debug_->debug_context()) {
it.Advance();
} else {
break;
@@ -1339,7 +1372,7 @@ Handle<Context> Isolate::GetCallingGlobalContext() {
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
- return Handle<Context>(context->global_context());
+ return Handle<Context>(context->native_context());
}
@@ -1470,6 +1503,7 @@ Isolate::Isolate()
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
+ runtime_zone_(this),
in_use_list_(0),
free_list_(0),
preallocated_storage_preallocated_(false),
@@ -1483,14 +1517,15 @@ Isolate::Isolate()
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
- context_exit_happened_(false) {
+ context_exit_happened_(false),
+ deferred_handles_head_(NULL),
+ optimizing_compiler_thread_(this) {
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
heap_.isolate_ = this;
- zone_.isolate_ = this;
stack_guard_.isolate_ = this;
// ThreadManager is initialized early to support locking an isolate
@@ -1547,6 +1582,11 @@ void Isolate::TearDown() {
thread_data_table_->RemoveAllThreads(this);
}
+ if (serialize_partial_snapshot_cache_ != NULL) {
+ delete[] serialize_partial_snapshot_cache_;
+ serialize_partial_snapshot_cache_ = NULL;
+ }
+
if (!IsDefaultIsolate()) {
delete this;
}
@@ -1560,6 +1600,8 @@ void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
+ if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
+
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
// We must stop the logger before we tear down other components.
@@ -1595,6 +1637,26 @@ void Isolate::Deinit() {
}
+void Isolate::PushToPartialSnapshotCache(Object* obj) {
+ int length = serialize_partial_snapshot_cache_length();
+ int capacity = serialize_partial_snapshot_cache_capacity();
+
+ if (length >= capacity) {
+ int new_capacity = static_cast<int>((capacity + 10) * 1.2);
+ Object** new_array = new Object*[new_capacity];
+ for (int i = 0; i < length; i++) {
+ new_array[i] = serialize_partial_snapshot_cache()[i];
+ }
+ if (capacity != 0) delete[] serialize_partial_snapshot_cache();
+ set_serialize_partial_snapshot_cache(new_array);
+ set_serialize_partial_snapshot_cache_capacity(new_capacity);
+ }
+
+ serialize_partial_snapshot_cache()[length] = obj;
+ set_serialize_partial_snapshot_cache_length(length + 1);
+}
+
+
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
Thread::SetThreadLocal(isolate_key_, isolate);
@@ -1606,7 +1668,7 @@ Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
// Has to be called while counters_ are still alive.
- zone_.DeleteKeptSegment();
+ runtime_zone_.DeleteKeptSegment();
delete[] assembler_spare_buffer_;
assembler_spare_buffer_ = NULL;
@@ -1743,10 +1805,8 @@ bool Isolate::Init(Deserializer* des) {
ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
-#ifdef DEBUG
// The initialization process does not handle memory exhaustion.
DisallowAllocationFailure disallow_allocation_failure;
-#endif
InitializeLoggingAndCounters();
@@ -1778,7 +1838,7 @@ bool Isolate::Init(Deserializer* des) {
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper();
handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this, zone());
+ stub_cache_ = new StubCache(this, runtime_zone());
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
@@ -1812,6 +1872,11 @@ bool Isolate::Init(Deserializer* des) {
return false;
}
+ if (create_heap_objects) {
+ // Terminate the cache array with the sentinel so we can iterate.
+ PushToPartialSnapshotCache(heap_.undefined_value());
+ }
+
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
@@ -1838,7 +1903,7 @@ bool Isolate::Init(Deserializer* des) {
#endif
// If we are deserializing, read the state into the now-empty heap.
- if (des != NULL) {
+ if (!create_heap_objects) {
des->Deserialize();
}
stub_cache_->Initialize();
@@ -1853,7 +1918,7 @@ bool Isolate::Init(Deserializer* des) {
heap_.SetStackLimits();
// Quiet the heap NaN if needed on target platform.
- if (des != NULL) Assembler::QuietNaN(heap_.nan_value());
+ if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
@@ -1861,7 +1926,8 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
- if (des != NULL && (FLAG_log_code || FLAG_ll_prof)) {
+ if (create_heap_objects &&
+ (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
HandleScope scope;
LOG(this, LogCodeObjects());
LOG(this, LogCompiledFunctions());
@@ -1876,6 +1942,7 @@ bool Isolate::Init(Deserializer* des) {
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
+ if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
return true;
}
@@ -1959,6 +2026,36 @@ void Isolate::Exit() {
}
+void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
+ deferred->next_ = deferred_handles_head_;
+ if (deferred_handles_head_ != NULL) {
+ deferred_handles_head_->previous_ = deferred;
+ }
+ deferred_handles_head_ = deferred;
+}
+
+
+void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
+#ifdef DEBUG
+ // In debug mode assert that the linked list is well-formed.
+ DeferredHandles* deferred_iterator = deferred;
+ while (deferred_iterator->previous_ != NULL) {
+ deferred_iterator = deferred_iterator->previous_;
+ }
+ ASSERT(deferred_handles_head_ == deferred_iterator);
+#endif
+ if (deferred_handles_head_ == deferred) {
+ deferred_handles_head_ = deferred_handles_head_->next_;
+ }
+ if (deferred->next_ != NULL) {
+ deferred->next_->previous_ = deferred->previous_;
+ }
+ if (deferred->previous_ != NULL) {
+ deferred->previous_->next_ = deferred->next_;
+ }
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 5ca2b87f0e..1d7bc6fc6d 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -41,6 +41,7 @@
#include "handles.h"
#include "hashmap.h"
#include "heap.h"
+#include "optimizing-compiler-thread.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "runtime.h"
@@ -307,8 +308,7 @@ class ThreadLocalTop BASE_EMBEDDED {
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
- V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
- V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
+ V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
@@ -320,6 +320,8 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
V(int, serialize_partial_snapshot_cache_length, 0) \
+ V(int, serialize_partial_snapshot_cache_capacity, 0) \
+ V(Object**, serialize_partial_snapshot_cache, NULL) \
/* Assembler state. */ \
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
@@ -327,7 +329,7 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the global context. */ \
+ /* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
V(bool, always_allow_natives_syntax, false) \
@@ -527,6 +529,11 @@ class Isolate {
thread_local_top_.save_context_ = save;
}
+ // Access to the map of "new Object()".
+ Map* empty_object_map() {
+ return context()->native_context()->object_function()->map();
+ }
+
// Access to current thread id.
ThreadId thread_id() { return thread_local_top_.thread_id_; }
void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
@@ -610,6 +617,9 @@ class Isolate {
(exception != heap()->termination_exception());
}
+ // Serializer.
+ void PushToPartialSnapshotCache(Object* obj);
+
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
@@ -634,8 +644,8 @@ class Isolate {
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
- Handle<GlobalObject> global() {
- return Handle<GlobalObject>(context()->global());
+ Handle<GlobalObject> global_object() {
+ return Handle<GlobalObject>(context()->global_object());
}
// Returns the global proxy object of the current context.
@@ -697,6 +707,10 @@ class Isolate {
void PrintStack(StringStream* accumulator);
void PrintStack();
Handle<String> StackTraceString();
+ NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
+ Object* object,
+ Map* map,
+ unsigned int magic2));
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
@@ -754,12 +768,13 @@ class Isolate {
void IterateThread(ThreadVisitor* v, char* t);
- // Returns the current global context.
+ // Returns the current native and global context.
+ Handle<Context> native_context();
Handle<Context> global_context();
- // Returns the global context of the calling JavaScript code. That
- // is, the global context of the top-most JavaScript frame.
- Handle<Context> GetCallingGlobalContext();
+ // Returns the native context of the calling JavaScript code. That
+ // is, the native context of the top-most JavaScript frame.
+ Handle<Context> GetCallingNativeContext();
void RegisterTryCatchHandler(v8::TryCatch* that);
void UnregisterTryCatchHandler(v8::TryCatch* that);
@@ -793,12 +808,12 @@ class Isolate {
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
-#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> name() { \
- return Handle<type>(context()->global_context()->name()); \
+ return Handle<type>(context()->native_context()->name()); \
}
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
+#undef NATIVE_CONTEXT_FIELD_ACCESSOR
Bootstrapper* bootstrapper() { return bootstrapper_; }
Counters* counters() {
@@ -850,7 +865,7 @@ class Isolate {
ASSERT(handle_scope_implementer_);
return handle_scope_implementer_;
}
- Zone* zone() { return &zone_; }
+ Zone* runtime_zone() { return &runtime_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -1046,6 +1061,14 @@ class Isolate {
date_cache_ = date_cache;
}
+ void IterateDeferredHandles(ObjectVisitor* visitor);
+ void LinkDeferredHandles(DeferredHandles* deferred_handles);
+ void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
+
+ OptimizingCompilerThread* optimizing_compiler_thread() {
+ return &optimizing_compiler_thread_;
+ }
+
private:
Isolate();
@@ -1196,7 +1219,7 @@ class Isolate {
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
- Zone zone_;
+ Zone runtime_zone_;
PreallocatedStorage in_use_list_;
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
@@ -1269,8 +1292,13 @@ class Isolate {
#undef ISOLATE_FIELD_OFFSET
#endif
+ DeferredHandles* deferred_handles_head_;
+ OptimizingCompilerThread optimizing_compiler_thread_;
+
friend class ExecutionAccess;
+ friend class HandleScopeImplementer;
friend class IsolateInitializer;
+ friend class OptimizingCompilerThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
@@ -1411,15 +1439,15 @@ class PostponeInterruptsScope BASE_EMBEDDED {
#define LOGGER (v8::internal::Isolate::Current()->logger())
-// Tells whether the global context is marked with out of memory.
+// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
- return global_context()->out_of_memory()->IsTrue();
+ return native_context()->out_of_memory()->IsTrue();
}
-// Mark the global context with out of memory.
+// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
- global_context()->set_out_of_memory(HEAP->true_value());
+ native_context()->set_out_of_memory(HEAP->true_value());
}
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 7265165ac1..a4db130e25 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -290,7 +290,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Handle<JSFunction> object_constructor(
- isolate()->global_context()->object_function());
+ isolate()->native_context()->object_function());
Handle<JSObject> json_object =
isolate()->factory()->NewJSObject(object_constructor);
ASSERT_EQ(c0_, '{');
@@ -326,7 +326,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// Parse a JSON array. Position must be right at '['.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
- ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
ZoneList<Handle<Object> > elements(4, zone());
ASSERT_EQ(c0_, '[');
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index ccef4456d6..85224b0f05 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -247,22 +247,23 @@ function BasicSerializeObject(value, stack, builder) {
}
builder.push("{");
var first = true;
- for (var p in value) {
- if (%HasLocalProperty(value, p)) {
- if (!first) {
- builder.push(%QuoteJSONStringComma(p));
- } else {
- builder.push(%QuoteJSONString(p));
- }
- builder.push(":");
- var before = builder.length;
- BasicJSONSerialize(p, value[p], stack, builder);
- if (before == builder.length) {
- builder.pop();
- builder.pop();
- } else {
- first = false;
- }
+ var keys = %ObjectKeys(value);
+ var len = keys.length;
+ for (var i = 0; i < len; i++) {
+ var p = keys[i];
+ if (!first) {
+ builder.push(%QuoteJSONStringComma(p));
+ } else {
+ builder.push(%QuoteJSONString(p));
+ }
+ builder.push(":");
+ var before = builder.length;
+ BasicJSONSerialize(p, value[p], stack, builder);
+ if (before == builder.length) {
+ builder.pop();
+ builder.pop();
+ } else {
+ first = false;
}
}
stack.pop();
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index cd51db80a4..e59170d5a3 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -167,7 +167,9 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flag_str) {
+ Handle<String> flag_str,
+ Zone* zone) {
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
Isolate* isolate = re->GetIsolate();
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -181,12 +183,11 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
return re;
}
pattern = FlattenGetString(pattern);
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result)) {
+ &parse_result, zone)) {
// Throw an exception if we fail to parse the pattern.
ThrowRegExpException(re,
pattern,
@@ -231,14 +232,13 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
- Handle<JSArray> last_match_info,
- Zone* zone) {
+ Handle<JSArray> last_match_info) {
switch (regexp->TypeTag()) {
case JSRegExp::ATOM:
return AtomExec(regexp, subject, index, last_match_info);
case JSRegExp::IRREGEXP: {
Handle<Object> result =
- IrregexpExec(regexp, subject, index, last_match_info, zone);
+ IrregexpExec(regexp, subject, index, last_match_info);
ASSERT(!result.is_null() ||
regexp->GetIsolate()->has_pending_exception());
return result;
@@ -278,11 +278,12 @@ static void SetAtomLastCapture(FixedArray* array,
}
-Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
- Isolate* isolate = re->GetIsolate();
+int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size) {
+ Isolate* isolate = regexp->GetIsolate();
ASSERT(0 <= index);
ASSERT(index <= subject->length());
@@ -290,15 +291,16 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
if (!subject->IsFlat()) FlattenString(subject);
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- String* needle = String::cast(re->DataAt(JSRegExp::kAtomPatternIndex));
+ String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
int needle_len = needle->length();
ASSERT(needle->IsFlat());
+ ASSERT_LT(0, needle_len);
- if (needle_len != 0) {
- if (index + needle_len > subject->length()) {
- return isolate->factory()->null_value();
- }
+ if (index + needle_len > subject->length()) {
+ return RegExpImpl::RE_FAILURE;
+ }
+ for (int i = 0; i < output_size; i += 2) {
String::FlatContent needle_content = needle->GetFlatContent();
String::FlatContent subject_content = subject->GetFlatContent();
ASSERT(needle_content.IsFlat());
@@ -323,15 +325,36 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
subject_content.ToUC16Vector(),
needle_content.ToUC16Vector(),
index)));
- if (index == -1) return isolate->factory()->null_value();
+ if (index == -1) {
+ return i / 2; // Return number of matches.
+ } else {
+ output[i] = index;
+ output[i+1] = index + needle_len;
+ index += needle_len;
+ }
}
- ASSERT(last_match_info->HasFastObjectElements());
+ return output_size / 2;
+}
- {
- NoHandleAllocation no_handles;
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- SetAtomLastCapture(array, *subject, index, index + needle_len);
- }
+
+Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
+ Handle<String> subject,
+ int index,
+ Handle<JSArray> last_match_info) {
+ Isolate* isolate = re->GetIsolate();
+
+ static const int kNumRegisters = 2;
+ STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
+ int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
+
+ int res = AtomExecRaw(re, subject, index, output_registers, kNumRegisters);
+
+ if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
+
+ ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
+ NoHandleAllocation no_handles;
+ FixedArray* array = FixedArray::cast(last_match_info->elements());
+ SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
return last_match_info;
}
@@ -345,8 +368,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
// If compilation fails, an exception is thrown and this function
// returns false.
bool RegExpImpl::EnsureCompiledIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii,
- Zone* zone) {
+ Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii) {
Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
#ifdef V8_INTERPRETED_REGEXP
if (compiled_code->IsByteArray()) return true;
@@ -362,7 +384,7 @@ bool RegExpImpl::EnsureCompiledIrregexp(
ASSERT(compiled_code->IsSmi());
return true;
}
- return CompileIrregexp(re, sample_subject, is_ascii, zone);
+ return CompileIrregexp(re, sample_subject, is_ascii);
}
@@ -384,11 +406,10 @@ static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
Handle<String> sample_subject,
- bool is_ascii,
- Zone* zone) {
+ bool is_ascii) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -419,8 +440,10 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
if (!pattern->IsFlat()) FlattenString(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
+ Zone* zone = isolate->runtime_zone();
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &compile_data)) {
+ &compile_data,
+ zone)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
ThrowRegExpException(re,
@@ -502,17 +525,20 @@ void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Zone* zone) {
+ Handle<String> subject) {
if (!subject->IsFlat()) FlattenString(subject);
// Check the asciiness of the underlying storage.
bool is_ascii = subject->IsAsciiRepresentationUnderneath();
- if (!EnsureCompiledIrregexp(regexp, subject, is_ascii, zone)) return -1;
+ if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
#ifdef V8_INTERPRETED_REGEXP
// Byte-code regexp needs space allocated for all its registers.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
+ // The result captures are copied to the start of the registers array
+ // if the match succeeds. This way those registers are not clobbered
+ // when we set the last match info from last successful match.
+ return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
+ (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
#else // V8_INTERPRETED_REGEXP
// Native regexp only needs room to output captures. Registers are handled
// internally.
@@ -521,28 +547,11 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
}
-int RegExpImpl::GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
- int registers_per_match,
- int* max_matches) {
-#ifdef V8_INTERPRETED_REGEXP
- // Global loop in interpreted regexp is not implemented. Therefore we choose
- // the size of the offsets vector so that it can only store one match.
- *max_matches = 1;
- return registers_per_match;
-#else // V8_INTERPRETED_REGEXP
- int size = Max(registers_per_match, OffsetsVector::kStaticOffsetsVectorSize);
- *max_matches = size / registers_per_match;
- return size;
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-int RegExpImpl::IrregexpExecRaw(
- Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> output,
- Zone* zone) {
+int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size) {
Isolate* isolate = regexp->GetIsolate();
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
@@ -554,15 +563,19 @@ int RegExpImpl::IrregexpExecRaw(
bool is_ascii = subject->IsAsciiRepresentationUnderneath();
#ifndef V8_INTERPRETED_REGEXP
- ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
- EnsureCompiledIrregexp(regexp, subject, is_ascii, zone);
+ EnsureCompiledIrregexp(regexp, subject, is_ascii);
Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
+ // The stack is used to allocate registers for the compiled regexp code.
+ // This means that in case of failure, the output registers array is left
+ // untouched and contains the capture results from the previous successful
+ // match. We can use that to set the last match info lazily.
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,
subject,
- output.start(),
- output.length(),
+ output,
+ output_size,
index,
isolate);
if (res != NativeRegExpMacroAssembler::RETRY) {
@@ -582,29 +595,36 @@ int RegExpImpl::IrregexpExecRaw(
// the, potentially, different subject (the string can switch between
// being internal and external, and even between being ASCII and UC16,
// but the characters are always the same).
- IrregexpPrepare(regexp, subject, zone);
+ IrregexpPrepare(regexp, subject);
is_ascii = subject->IsAsciiRepresentationUnderneath();
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
#else // V8_INTERPRETED_REGEXP
- ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
+ ASSERT(output_size >= IrregexpNumberOfRegisters(*irregexp));
// We must have done EnsureCompiledIrregexp, so we can get the number of
// registers.
- int* register_vector = output.start();
int number_of_capture_registers =
(IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+ int32_t* raw_output = &output[number_of_capture_registers];
+ // We do not touch the actual capture result registers until we know there
+ // has been a match so that we can use those capture results to set the
+ // last match info.
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- register_vector[i] = -1;
+ raw_output[i] = -1;
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
IrregexpResult result = IrregexpInterpreter::Match(isolate,
byte_codes,
subject,
- register_vector,
+ raw_output,
index);
+ if (result == RE_SUCCESS) {
+ // Copy capture results to the start of the registers array.
+ memcpy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
+ }
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
isolate->StackOverflow();
@@ -614,52 +634,44 @@ int RegExpImpl::IrregexpExecRaw(
}
-Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
+Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int previous_index,
- Handle<JSArray> last_match_info,
- Zone* zone) {
- Isolate* isolate = jsregexp->GetIsolate();
- ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
+ Handle<JSArray> last_match_info) {
+ Isolate* isolate = regexp->GetIsolate();
+ ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
// Prepare space for the return values.
-#ifdef V8_INTERPRETED_REGEXP
-#ifdef DEBUG
+#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
if (FLAG_trace_regexp_bytecodes) {
- String* pattern = jsregexp->Pattern();
+ String* pattern = regexp->Pattern();
PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
}
#endif
-#endif
- int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject, zone);
+ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
ASSERT(isolate->has_pending_exception());
return Handle<Object>::null();
}
- OffsetsVector registers(required_registers, isolate);
+ int32_t* output_registers = NULL;
+ if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ output_registers = NewArray<int32_t>(required_registers);
+ }
+ SmartArrayPointer<int32_t> auto_release(output_registers);
+ if (output_registers == NULL) {
+ output_registers = isolate->jsregexp_static_offsets_vector();
+ }
- int res = RegExpImpl::IrregexpExecRaw(jsregexp, subject, previous_index,
- Vector<int>(registers.vector(),
- registers.length()),
- zone);
+ int res = RegExpImpl::IrregexpExecRaw(
+ regexp, subject, previous_index, output_registers, required_registers);
if (res == RE_SUCCESS) {
- int capture_register_count =
- (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
- int* register_vector = registers.vector();
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- for (int i = 0; i < capture_register_count; i += 2) {
- SetCapture(array, i, register_vector[i]);
- SetCapture(array, i + 1, register_vector[i + 1]);
- }
- SetLastCaptureCount(array, capture_register_count);
- SetLastSubject(array, *subject);
- SetLastInput(array, *subject);
- return last_match_info;
+ int capture_count =
+ IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
+ return SetLastMatchInfo(
+ last_match_info, subject, capture_count, output_registers);
}
if (res == RE_EXCEPTION) {
ASSERT(isolate->has_pending_exception());
@@ -670,6 +682,146 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
}
+Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
+ Handle<String> subject,
+ int capture_count,
+ int32_t* match) {
+ int capture_register_count = (capture_count + 1) * 2;
+ last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
+ AssertNoAllocation no_gc;
+ FixedArray* array = FixedArray::cast(last_match_info->elements());
+ if (match != NULL) {
+ for (int i = 0; i < capture_register_count; i += 2) {
+ SetCapture(array, i, match[i]);
+ SetCapture(array, i + 1, match[i + 1]);
+ }
+ }
+ SetLastCaptureCount(array, capture_register_count);
+ SetLastSubject(array, *subject);
+ SetLastInput(array, *subject);
+ return last_match_info;
+}
+
+
+RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ bool is_global,
+ Isolate* isolate)
+ : register_array_(NULL),
+ register_array_size_(0),
+ regexp_(regexp),
+ subject_(subject) {
+#ifdef V8_INTERPRETED_REGEXP
+ bool interpreted = true;
+#else
+ bool interpreted = false;
+#endif // V8_INTERPRETED_REGEXP
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ static const int kAtomRegistersPerMatch = 2;
+ registers_per_match_ = kAtomRegistersPerMatch;
+ // There is no distinction between interpreted and native for atom regexps.
+ interpreted = false;
+ } else {
+ registers_per_match_ = RegExpImpl::IrregexpPrepare(regexp_, subject_);
+ if (registers_per_match_ < 0) {
+ num_matches_ = -1; // Signal exception.
+ return;
+ }
+ }
+
+ if (is_global && !interpreted) {
+ register_array_size_ =
+ Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ max_matches_ = register_array_size_ / registers_per_match_;
+ } else {
+ // Global loop in interpreted regexp is not implemented. We choose
+ // the size of the offsets vector so that it can only store one match.
+ register_array_size_ = registers_per_match_;
+ max_matches_ = 1;
+ }
+
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ register_array_ = NewArray<int32_t>(register_array_size_);
+ } else {
+ register_array_ = isolate->jsregexp_static_offsets_vector();
+ }
+
+ // Set state so that fetching the results the first time triggers a call
+ // to the compiled regexp.
+ current_match_index_ = max_matches_ - 1;
+ num_matches_ = max_matches_;
+ ASSERT(registers_per_match_ >= 2); // Each match has at least one capture.
+ ASSERT_GE(register_array_size_, registers_per_match_);
+ int32_t* last_match =
+ &register_array_[current_match_index_ * registers_per_match_];
+ last_match[0] = -1;
+ last_match[1] = 0;
+}
+
+
+RegExpImpl::GlobalCache::~GlobalCache() {
+ // Deallocate the register array if we allocated it in the constructor
+ // (as opposed to using the existing jsregexp_static_offsets_vector).
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ DeleteArray(register_array_);
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::FetchNext() {
+ current_match_index_++;
+ if (current_match_index_ >= num_matches_) {
+ // Current batch of results exhausted.
+ // Fail if last batch was not even fully filled.
+ if (num_matches_ < max_matches_) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+
+ int32_t* last_match =
+ &register_array_[(current_match_index_ - 1) * registers_per_match_];
+ int last_end_index = last_match[1];
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ } else {
+ int last_start_index = last_match[0];
+ if (last_start_index == last_end_index) last_end_index++;
+ if (last_end_index > subject_->length()) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+ num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ }
+
+ if (num_matches_ <= 0) return NULL;
+ current_match_index_ = 0;
+ return register_array_;
+ } else {
+ return &register_array_[current_match_index_ * registers_per_match_];
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
+ int index = current_match_index_ * registers_per_match_;
+ if (num_matches_ == 0) {
+ // After a failed match we shift back by one result.
+ index -= registers_per_match_;
+ }
+ return &register_array_[index];
+}
+
+
// -------------------------------------------------------------------
// Implementation of the Irregexp regular expression engine.
//
@@ -5987,7 +6139,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#else // V8_INTERPRETED_REGEXP
// Interpreted regexp implementation.
EmbeddedVector<byte, 1024> codes;
- RegExpMacroAssemblerIrregexp macro_assembler(codes);
+ RegExpMacroAssemblerIrregexp macro_assembler(codes, zone);
#endif // V8_INTERPRETED_REGEXP
// Inserted here, instead of in Assembler, because it depends on information
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 782c5b0b20..96825cef21 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -71,15 +71,15 @@ class RegExpImpl {
// Returns false if compilation fails.
static Handle<Object> Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flags);
+ Handle<String> flags,
+ Zone* zone);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
static Handle<Object> Exec(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
- Handle<JSArray> lastMatchInfo,
- Zone* zone);
+ Handle<JSArray> lastMatchInfo);
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Handle<JSRegExp> re,
@@ -93,6 +93,14 @@ class RegExpImpl {
JSRegExp::Flags flags,
Handle<String> match_pattern);
+
+ static int AtomExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size);
+
+
static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
@@ -105,17 +113,10 @@ class RegExpImpl {
// This ensures that the regexp is compiled for the subject, and that
// the subject is flat.
// Returns the number of integer spaces required by IrregexpExecOnce
- // as its "registers" argument. If the regexp cannot be compiled,
+ // as its "registers" argument. If the regexp cannot be compiled,
// an exception is set as pending, and this function returns negative.
static int IrregexpPrepare(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Zone* zone);
-
- // Calculate the size of offsets vector for the case of global regexp
- // and the number of matches this vector is able to store.
- static int GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
- int registers_per_match,
- int* max_matches);
+ Handle<String> subject);
// Execute a regular expression on the subject, starting from index.
// If matching succeeds, return the number of matches. This can be larger
@@ -126,18 +127,56 @@ class RegExpImpl {
static int IrregexpExecRaw(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
- Vector<int> registers,
- Zone* zone);
+ int32_t* output,
+ int output_size);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing
- // captured positions. On a failure, the result is the null value.
+ // captured positions. On a failure, the result is the null value.
// Returns an empty handle in case of an exception.
static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
- Handle<JSArray> lastMatchInfo,
- Zone* zone);
+ Handle<JSArray> lastMatchInfo);
+
+ // Set last match info. If match is NULL, then setting captures is omitted.
+ static Handle<JSArray> SetLastMatchInfo(Handle<JSArray> last_match_info,
+ Handle<String> subject,
+ int capture_count,
+ int32_t* match);
+
+
+ class GlobalCache {
+ public:
+ GlobalCache(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ bool is_global,
+ Isolate* isolate);
+
+ ~GlobalCache();
+
+ // Fetch the next entry in the cache for global regexp match results.
+ // This does not set the last match info. Upon failure, NULL is returned.
+ // The cause can be checked with Result(). The previous
+ // result is still in available in memory when a failure happens.
+ int32_t* FetchNext();
+
+ int32_t* LastSuccessfulMatch();
+
+ inline bool HasException() { return num_matches_ < 0; }
+
+ private:
+ int num_matches_;
+ int max_matches_;
+ int current_match_index_;
+ int registers_per_match_;
+ // Pointer to the last set of captures.
+ int32_t* register_array_;
+ int register_array_size_;
+ Handle<JSRegExp> regexp_;
+ Handle<String> subject_;
+ };
+
// Array index in the lastMatchInfo array.
static const int kLastCaptureCount = 0;
@@ -198,32 +237,10 @@ class RegExpImpl {
static const int kRegWxpCompiledLimit = 1 * MB;
private:
- static String* last_ascii_string_;
- static String* two_byte_cached_string_;
-
static bool CompileIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii,
- Zone* zone);
+ Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
static inline bool EnsureCompiledIrregexp(
- Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii,
- Zone* zone);
-
-
- // Set the subject cache. The previous string buffer is not deleted, so the
- // caller should ensure that it doesn't leak.
- static void SetSubjectCache(String* subject,
- char* utf8_subject,
- int uft8_length,
- int character_position,
- int utf8_position);
-
- // A one element cache of the last utf8_subject string and its length. The
- // subject JS String object is cached in the heap. We also cache a
- // translation between position and utf8 position.
- static char* utf8_subject_cache_;
- static int utf8_length_cache_;
- static int utf8_position_;
- static int character_position_;
+ Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
};
@@ -1627,40 +1644,6 @@ class RegExpEngine: public AllStatic {
};
-class OffsetsVector {
- public:
- inline OffsetsVector(int num_registers, Isolate* isolate)
- : offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- vector_ = NewArray<int>(offsets_vector_length_);
- } else {
- vector_ = isolate->jsregexp_static_offsets_vector();
- }
- }
- inline ~OffsetsVector() {
- if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(vector_);
- vector_ = NULL;
- }
- }
- inline int* vector() { return vector_; }
- inline int length() { return offsets_vector_length_; }
-
- static const int kStaticOffsetsVectorSize =
- Isolate::kJSRegexpStaticOffsetsVectorSize;
-
- private:
- static Address static_offsets_vector_address(Isolate* isolate) {
- return reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector());
- }
-
- int* vector_;
- int offsets_vector_length_;
-
- friend class ExternalReference;
-};
-
-
} } // namespace v8::internal
#endif // V8_JSREGEXP_H_
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index bbc405ba0b..91a98112b6 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -1064,7 +1064,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
bool LAllocator::Allocate(LChunk* chunk) {
ASSERT(chunk_ == NULL);
- chunk_ = chunk;
+ chunk_ = static_cast<LPlatformChunk*>(chunk);
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index d47e33595a..5b05263575 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -48,7 +48,7 @@ class BitVector;
class StringStream;
class LArgument;
-class LChunk;
+class LPlatformChunk;
class LOperand;
class LUnallocated;
class LConstantOperand;
@@ -455,7 +455,7 @@ class LAllocator BASE_EMBEDDED {
return &fixed_double_live_ranges_;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -598,7 +598,7 @@ class LAllocator BASE_EMBEDDED {
Zone* zone_;
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
// During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed.
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index fd8b7965f1..eb2198d854 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -27,6 +27,23 @@
#include "v8.h"
#include "lithium.h"
+#include "scopes.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+#else
+#error "Unknown architecture."
+#endif
namespace v8 {
namespace internal {
@@ -156,7 +173,7 @@ void LParallelMove::PrintDataTo(StringStream* stream) const {
void LEnvironment::PrintTo(StringStream* stream) {
- stream->Add("[id=%d|", ast_id());
+ stream->Add("[id=%d|", ast_id().ToInt());
stream->Add("[parameters=%d|", parameter_count());
stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
@@ -240,4 +257,183 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
}
+LLabel* LChunk::GetLabel(int block_id) const {
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ int first_instruction = block->first_instruction_index();
+ return LLabel::cast(instructions_[first_instruction]);
+}
+
+
+int LChunk::LookupDestination(int block_id) const {
+ LLabel* cur = GetLabel(block_id);
+ while (cur->replacement() != NULL) {
+ cur = cur->replacement();
+ }
+ return cur->block_id();
+}
+
+Label* LChunk::GetAssemblyLabel(int block_id) const {
+ LLabel* label = GetLabel(block_id);
+ ASSERT(!label->HasReplacement());
+ return label->label();
+}
+
+void LChunk::MarkEmptyBlocks() {
+ HPhase phase("L_Mark empty blocks", this);
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ int first = block->first_instruction_index();
+ int last = block->last_instruction_index();
+ LInstruction* first_instr = instructions()->at(first);
+ LInstruction* last_instr = instructions()->at(last);
+
+ LLabel* label = LLabel::cast(first_instr);
+ if (last_instr->IsGoto()) {
+ LGoto* goto_instr = LGoto::cast(last_instr);
+ if (label->IsRedundant() &&
+ !label->is_loop_header()) {
+ bool can_eliminate = true;
+ for (int i = first + 1; i < last && can_eliminate; ++i) {
+ LInstruction* cur = instructions()->at(i);
+ if (cur->IsGap()) {
+ LGap* gap = LGap::cast(cur);
+ if (!gap->IsRedundant()) {
+ can_eliminate = false;
+ }
+ } else {
+ can_eliminate = false;
+ }
+ }
+
+ if (can_eliminate) {
+ label->set_replacement(GetLabel(goto_instr->block_id()));
+ }
+ }
+ }
+ }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+ LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+ int index = -1;
+ if (instr->IsControl()) {
+ instructions_.Add(gap, zone());
+ index = instructions_.length();
+ instructions_.Add(instr, zone());
+ } else {
+ index = instructions_.length();
+ instructions_.Add(instr, zone());
+ instructions_.Add(gap, zone());
+ }
+ if (instr->HasPointerMap()) {
+ pointer_maps_.Add(instr->pointer_map(), zone());
+ instr->pointer_map()->set_lithium_position(index);
+ }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+ return LConstantOperand::Create(constant->id(), zone());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+ // The receiver is at index 0, the first parameter at index 1, so we
+ // shift all parameter indexes down by the number of parameters, and
+ // make sure they end up negative so they are distinguishable from
+ // spill slots.
+ int result = index - info()->scope()->num_parameters() - 1;
+ ASSERT(result < 0);
+ return result;
+}
+
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+ ASSERT(-1 <= index); // -1 is the receiver.
+ return (1 + info()->scope()->num_parameters() - index) *
+ kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+ return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+ return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+ while (!IsGapAt(index)) index--;
+ return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+ GetGapAt(index)->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(from, to, zone());
+}
+
+
+HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
+ return HConstant::cast(graph_->LookupValue(operand->index()));
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+ LConstantOperand* operand) const {
+ return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunk::NewChunk(HGraph* graph) {
+ NoHandleAllocation no_handles;
+ AssertNoAllocation no_gc;
+
+ int values = graph->GetMaximumValueID();
+ CompilationInfo* info = graph->info();
+ if (values > LUnallocated::kMaxVirtualRegisters) {
+ info->set_bailout_reason("not enough virtual registers for values");
+ return NULL;
+ }
+ LAllocator allocator(values, graph);
+ LChunkBuilder builder(info, graph, &allocator);
+ LChunk* chunk = builder.Build();
+ if (chunk == NULL) return NULL;
+
+ if (!allocator.Allocate(chunk)) {
+ info->set_bailout_reason("not enough virtual registers (regalloc)");
+ return NULL;
+ }
+
+ return chunk;
+}
+
+
+Handle<Code> LChunk::Codegen() {
+ MacroAssembler assembler(info()->isolate(), NULL, 0);
+ LCodeGen generator(this, &assembler, info());
+
+ MarkEmptyBlocks();
+
+ if (generator.GenerateCode()) {
+ if (FLAG_trace_codegen) {
+ PrintF("Crankshaft Compiler - ");
+ }
+ CodeGenerator::MakeCodePrologue(info());
+ Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ Handle<Code> code =
+ CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+ generator.FinishCode(code);
+ CodeGenerator::PrintCode(code, info());
+ return code;
+ }
+ return Handle<Code>::null();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 1f42b686a7..923a1594c9 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -455,7 +455,7 @@ class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
- int ast_id,
+ BailoutId ast_id,
int parameter_count,
int argument_count,
int value_count,
@@ -470,7 +470,8 @@ class LEnvironment: public ZoneObject {
parameter_count_(parameter_count),
pc_offset_(-1),
values_(value_count, zone),
- is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
+ is_tagged_(value_count, zone),
+ is_uint32_(value_count, zone),
spilled_registers_(NULL),
spilled_double_registers_(NULL),
outer_(outer),
@@ -481,7 +482,7 @@ class LEnvironment: public ZoneObject {
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
+ BailoutId ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
LOperand** spilled_registers() const { return spilled_registers_; }
@@ -491,17 +492,28 @@ class LEnvironment: public ZoneObject {
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
- void AddValue(LOperand* operand, Representation representation) {
+ void AddValue(LOperand* operand,
+ Representation representation,
+ bool is_uint32) {
values_.Add(operand, zone());
if (representation.IsTagged()) {
+ ASSERT(!is_uint32);
is_tagged_.Add(values_.length() - 1);
}
+
+ if (is_uint32) {
+ is_uint32_.Add(values_.length() - 1);
+ }
}
bool HasTaggedValueAt(int index) const {
return is_tagged_.Contains(index);
}
+ bool HasUint32ValueAt(int index) const {
+ return is_uint32_.Contains(index);
+ }
+
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
@@ -530,11 +542,12 @@ class LEnvironment: public ZoneObject {
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
- int ast_id_;
+ BailoutId ast_id_;
int parameter_count_;
int pc_offset_;
ZoneList<LOperand*> values_;
BitVector is_tagged_;
+ BitVector is_uint32_;
// Allocation index indexed arrays of spill slot operands for registers
// that are also in spill slots at an OSR entry. NULL for environments
@@ -622,6 +635,69 @@ class DeepIterator BASE_EMBEDDED {
};
+class LPlatformChunk;
+class LGap;
+class LLabel;
+
+// Superclass providing data and behavior common to all the
+// arch-specific LPlatformChunk classes.
+class LChunk: public ZoneObject {
+ public:
+ static LChunk* NewChunk(HGraph* graph);
+
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ LConstantOperand* DefineConstantOperand(HConstant* constant);
+ HConstant* LookupConstant(LConstantOperand* operand) const;
+ Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+ int ParameterAt(int index);
+ int GetParameterStackSlot(int index) const;
+ int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+ const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+ void AddGapMove(int index, LOperand* from, LOperand* to);
+ LGap* GetGapAt(int index) const;
+ bool IsGapAt(int index) const;
+ int NearestGapPos(int index) const;
+ void MarkEmptyBlocks();
+ const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+ LLabel* GetLabel(int block_id) const;
+ int LookupDestination(int block_id) const;
+ Label* GetAssemblyLabel(int block_id) const;
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+ return &inlined_closures_;
+ }
+
+ void AddInlinedClosure(Handle<JSFunction> closure) {
+ inlined_closures_.Add(closure, zone());
+ }
+
+ Zone* zone() const { return info_->zone(); }
+
+ Handle<Code> Codegen();
+
+ protected:
+ LChunk(CompilationInfo* info, HGraph* graph)
+ : spill_slot_count_(0),
+ info_(info),
+ graph_(graph),
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) { }
+
+ int spill_slot_count_;
+
+ private:
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ ZoneList<LInstruction*> instructions_;
+ ZoneList<LPointerMap*> pointer_maps_;
+ ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
int ElementsKindToShiftSize(ElementsKind elements_kind);
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index 4463c93e2a..cfcdb818c9 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,6 +47,8 @@ Debug.LiveEdit = new function() {
// Forward declaration for minifier.
var FunctionStatus;
+ var NEEDS_STEP_IN_PROPERTY_NAME = "stack_update_needs_step_in";
+
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
@@ -161,7 +163,7 @@ Debug.LiveEdit = new function() {
// Our current implementation requires client to manually issue "step in"
// command for correct stack state.
- preview_description.stack_update_needs_step_in =
+ preview_description[NEEDS_STEP_IN_PROPERTY_NAME] =
preview_description.stack_modified;
// Start with breakpoints. Convert their line/column positions and
@@ -1078,6 +1080,18 @@ Debug.LiveEdit = new function() {
return ProcessOldNode(old_code_tree);
}
+ // Restarts call frame and returns value similar to what LiveEdit returns.
+ function RestartFrame(frame_mirror) {
+ var result = frame_mirror.restart();
+ if (IS_STRING(result)) {
+ throw new Failure("Failed to restart frame: " + result);
+ }
+ var result = {};
+ result[NEEDS_STEP_IN_PROPERTY_NAME] = true;
+ return result;
+ }
+ // Function is public.
+ this.RestartFrame = RestartFrame;
// Functions are public for tests.
this.TestApi = {
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index e670b442b6..f35315438d 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -601,7 +601,7 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
PostponeInterruptsScope postpone(isolate);
// Build AST.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.MarkAsGlobal();
// Parse and don't allow skipping lazy functions.
if (ParserApi::Parse(&info, kNoParsingFlags)) {
@@ -670,6 +670,7 @@ class JSArrayBasedStruct {
}
int GetSmiValueField(int field_position) {
Object* res = GetField(field_position);
+ CHECK(res->IsSmi());
return Smi::cast(res)->value();
}
@@ -714,14 +715,17 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
return this->GetSmiValueField(kParentIndexOffset_);
}
Handle<Code> GetFunctionCode() {
- Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
- JSValue::cast(this->GetField(kCodeOffset_))));
+ Object* element = this->GetField(kCodeOffset_);
+ CHECK(element->IsJSValue());
+ Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+ CHECK(raw_result->IsCode());
return Handle<Code>::cast(raw_result);
}
Handle<Object> GetCodeScopeInfo() {
- Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
- JSValue::cast(this->GetField(kCodeScopeInfoOffset_))));
- return raw_result;
+ Object* element = this->GetField(kCodeScopeInfoOffset_);
+ CHECK(element->IsJSValue());
+ return UnwrapJSValue(Handle<JSValue>(JSValue::cast(element)));
}
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
@@ -771,8 +775,10 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
}
Handle<SharedFunctionInfo> GetInfo() {
Object* element = this->GetField(kSharedInfoOffset_);
+ CHECK(element->IsJSValue());
Handle<JSValue> value_wrapper(JSValue::cast(element));
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+ CHECK(raw_result->IsSharedFunctionInfo());
return Handle<SharedFunctionInfo>::cast(raw_result);
}
@@ -894,7 +900,6 @@ class FunctionInfoListener {
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
Isolate* isolate = Isolate::Current();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
FunctionInfoListener listener;
Handle<Object> original_source = Handle<Object>(script->source());
@@ -923,37 +928,35 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
}
-// Visitor that collects all references to a particular code object,
-// including "CODE_TARGET" references in other code objects.
-// It works in context of ZoneScope.
-class ReferenceCollectorVisitor : public ObjectVisitor {
+// Visitor that finds all references to a particular code object,
+// including "CODE_TARGET" references in other code objects and replaces
+// them on the fly.
+class ReplacingVisitor : public ObjectVisitor {
public:
- ReferenceCollectorVisitor(Code* original, Zone* zone)
- : original_(original),
- rvalues_(10, zone),
- reloc_infos_(10, zone),
- code_entries_(10, zone),
- zone_(zone) {
+ explicit ReplacingVisitor(Code* original, Code* substitution)
+ : original_(original), substitution_(substitution) {
}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (*p == original_) {
- rvalues_.Add(p, zone_);
+ *p = substitution_;
}
}
}
virtual void VisitCodeEntry(Address entry) {
if (Code::GetObjectFromEntryAddress(entry) == original_) {
- code_entries_.Add(entry, zone_);
+ Address substitution_entry = substitution_->instruction_start();
+ Memory::Address_at(entry) = substitution_entry;
}
}
virtual void VisitCodeTarget(RelocInfo* rinfo) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
- reloc_infos_.Add(*rinfo, zone_);
+ Address substitution_entry = substitution_->instruction_start();
+ rinfo->set_target_address(substitution_entry);
}
}
@@ -961,57 +964,40 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
VisitCodeTarget(rinfo);
}
- // Post-visiting method that iterates over all collected references and
- // modifies them.
- void Replace(Code* substitution) {
- for (int i = 0; i < rvalues_.length(); i++) {
- *(rvalues_[i]) = substitution;
- }
- Address substitution_entry = substitution->instruction_start();
- for (int i = 0; i < reloc_infos_.length(); i++) {
- reloc_infos_[i].set_target_address(substitution_entry);
- }
- for (int i = 0; i < code_entries_.length(); i++) {
- Address entry = code_entries_[i];
- Memory::Address_at(entry) = substitution_entry;
- }
- }
-
private:
Code* original_;
- ZoneList<Object**> rvalues_;
- ZoneList<RelocInfo> reloc_infos_;
- ZoneList<Address> code_entries_;
- Zone* zone_;
+ Code* substitution_;
};
// Finds all references to original and replaces them with substitution.
-static void ReplaceCodeObject(Code* original, Code* substitution) {
- ASSERT(!HEAP->InNewSpace(substitution));
+static void ReplaceCodeObject(Handle<Code> original,
+ Handle<Code> substitution) {
+ // Perform a full GC in order to ensure that we are not in the middle of an
+ // incremental marking phase when we are replacing the code object.
+ // Since we are not in an incremental marking phase we can write pointers
+ // to code objects (that are never in new space) without worrying about
+ // write barriers.
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "liveedit.cc ReplaceCodeObject");
+
+ ASSERT(!HEAP->InNewSpace(*substitution));
- HeapIterator iterator;
AssertNoAllocation no_allocations_please;
- // A zone scope for ReferenceCollectorVisitor.
- ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
-
- ReferenceCollectorVisitor visitor(original, Isolate::Current()->zone());
+ ReplacingVisitor visitor(*original, *substitution);
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
- {
- HEAP->IterateStrongRoots(&visitor, VISIT_ALL);
- }
+ HEAP->IterateRoots(&visitor, VISIT_ALL);
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
+ HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
-
- visitor.Replace(substitution);
}
@@ -1095,8 +1081,8 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
- ReplaceCodeObject(shared_info->code(), *code);
- Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
+ ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
+ Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
@@ -1147,6 +1133,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<Object> script_handle) {
Handle<SharedFunctionInfo> shared_info =
Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
+ CHECK(script_handle->IsScript() || script_handle->IsUndefined());
shared_info->set_script(*script_handle);
Isolate::Current()->compilation_cache()->Remove(shared_info);
@@ -1338,7 +1325,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
// on stack (it is safe to substitute the code object on stack, because
// we only change the structure of rinfo and leave instructions
// untouched).
- ReplaceCodeObject(info->code(), *patched_code);
+ ReplaceCodeObject(Handle<Code>(info->code()), patched_code);
}
}
@@ -1497,7 +1484,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
isolate->builtins()->builtin(
Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
- *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+ pre_top_frame = frames[top_frame_index - 2];
+ top_frame = frames[top_frame_index - 1];
+ *mode = Debug::CURRENTLY_SET_MODE;
frame_has_padding = false;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
@@ -1512,6 +1501,15 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Here the stub is CEntry, it's not debug-only and can't be padded.
// If anyone would complain, a proxy padded stub could be added.
frame_has_padding = false;
+ } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) {
+ // This must be adaptor that remain from the frame dropping that
+ // is still on stack. A frame dropper frame must be above it.
+ ASSERT(frames[top_frame_index - 2]->LookupCode() ==
+ isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
+ pre_top_frame = frames[top_frame_index - 3];
+ top_frame = frames[top_frame_index - 2];
+ *mode = Debug::CURRENTLY_SET_MODE;
+ frame_has_padding = false;
} else {
return "Unknown structure of stack above changing function";
}
@@ -1595,17 +1593,36 @@ static bool IsDropableFrame(StackFrame* frame) {
return !frame->is_exit();
}
-// Fills result array with statuses of functions. Modifies the stack
-// removing all listed function if possible and if do_drop is true.
-static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
- Zone* zone) {
+
+// Describes a set of call frames that execute any of listed functions.
+// Finding no such frames does not mean error.
+class MultipleFunctionTarget {
+ public:
+ MultipleFunctionTarget(Handle<JSArray> shared_info_array,
+ Handle<JSArray> result)
+ : m_shared_info_array(shared_info_array),
+ m_result(result) {}
+ bool MatchActivation(StackFrame* frame,
+ LiveEdit::FunctionPatchabilityStatus status) {
+ return CheckActivation(m_shared_info_array, m_result, frame, status);
+ }
+ const char* GetNotFoundMessage() {
+ return NULL;
+ }
+ private:
+ Handle<JSArray> m_shared_info_array;
+ Handle<JSArray> m_result;
+};
+
+// Drops all call frame matched by target and all frames above them.
+template<typename TARGET>
+static const char* DropActivationsInActiveThreadImpl(
+ TARGET& target, bool do_drop, Zone* zone) {
Isolate* isolate = Isolate::Current();
Debug* debug = isolate->debug();
- ZoneScope scope(isolate, DELETE_ON_EXIT);
+ ZoneScope scope(zone, DELETE_ON_EXIT);
Vector<StackFrame*> frames = CreateStackMap(zone);
- int array_len = Smi::cast(shared_info_array->length())->value();
int top_frame_index = -1;
int frame_index = 0;
@@ -1615,8 +1632,8 @@ static const char* DropActivationsInActiveThread(
top_frame_index = frame_index;
break;
}
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+ if (target.MatchActivation(
+ frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
// We are still above break_frame. It is not a target frame,
// it is a problem.
return "Debugger mark-up on stack is not found";
@@ -1625,7 +1642,7 @@ static const char* DropActivationsInActiveThread(
if (top_frame_index == -1) {
// We haven't found break frame, but no function is blocking us anyway.
- return NULL;
+ return target.GetNotFoundMessage();
}
bool target_frame_found = false;
@@ -1638,8 +1655,8 @@ static const char* DropActivationsInActiveThread(
c_code_found = true;
break;
}
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+ if (target.MatchActivation(
+ frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
target_frame_found = true;
bottom_js_frame_index = frame_index;
}
@@ -1651,8 +1668,8 @@ static const char* DropActivationsInActiveThread(
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
if (frame->is_java_script()) {
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+ if (target.MatchActivation(
+ frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
// Cannot drop frame under C frames.
return NULL;
}
@@ -1667,7 +1684,7 @@ static const char* DropActivationsInActiveThread(
if (!target_frame_found) {
// Nothing to drop.
- return NULL;
+ return target.GetNotFoundMessage();
}
Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
@@ -1690,6 +1707,23 @@ static const char* DropActivationsInActiveThread(
}
debug->FramesHaveBeenDropped(new_id, drop_mode,
restarter_frame_function_pointer);
+ return NULL;
+}
+
+// Fills result array with statuses of functions. Modifies the stack
+// removing all listed function if possible and if do_drop is true.
+static const char* DropActivationsInActiveThread(
+ Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
+ Zone* zone) {
+ MultipleFunctionTarget target(shared_info_array, result);
+
+ const char* message =
+ DropActivationsInActiveThreadImpl(target, do_drop, zone);
+ if (message) {
+ return message;
+ }
+
+ int array_len = Smi::cast(shared_info_array->length())->value();
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
@@ -1766,6 +1800,50 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
}
+// Describes a single callframe a target. Not finding this frame
+// means an error.
+class SingleFrameTarget {
+ public:
+ explicit SingleFrameTarget(JavaScriptFrame* frame)
+ : m_frame(frame),
+ m_saved_status(LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {}
+
+ bool MatchActivation(StackFrame* frame,
+ LiveEdit::FunctionPatchabilityStatus status) {
+ if (frame->fp() == m_frame->fp()) {
+ m_saved_status = status;
+ return true;
+ }
+ return false;
+ }
+ const char* GetNotFoundMessage() {
+ return "Failed to found requested frame";
+ }
+ LiveEdit::FunctionPatchabilityStatus saved_status() {
+ return m_saved_status;
+ }
+ private:
+ JavaScriptFrame* m_frame;
+ LiveEdit::FunctionPatchabilityStatus m_saved_status;
+};
+
+
+// Finds a drops required frame and all frames above.
+// Returns error message or NULL.
+const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
+ SingleFrameTarget target(frame);
+
+ const char* result = DropActivationsInActiveThreadImpl(target, true, zone);
+ if (result != NULL) {
+ return result;
+ }
+ if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) {
+ return "Function is blocked under native code";
+ }
+ return NULL;
+}
+
+
LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
FunctionLiteral* fun)
: isolate_(isolate) {
@@ -1816,7 +1894,8 @@ LiveEditFunctionTracker::~LiveEditFunctionTracker() {
void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
+ Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
+ Zone* zone) {
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 424c24e351..5b12854d8c 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -123,6 +123,10 @@ class LiveEdit : AllStatic {
static Handle<JSArray> CheckAndDropActivations(
Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
+ // Restarts the call frame and completely drops all frames above it.
+ // Return error message or NULL.
+ static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
+
// A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus {
FUNCTION_AVAILABLE_FOR_PATCH = 1,
diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc
index 1aabc59814..6b89cf6839 100644
--- a/deps/v8/src/liveobjectlist.cc
+++ b/deps/v8/src/liveobjectlist.cc
@@ -74,7 +74,7 @@ typedef int (*RawComparer)(const void*, const void*);
v(SeqAsciiString, "unexpected: SeqAsciiString") \
v(SeqString, "unexpected: SeqString") \
v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
- v(GlobalContext, "unexpected: GlobalContext") \
+ v(NativeContext, "unexpected: NativeContext") \
v(MapCache, "unexpected: MapCache") \
v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \
v(CompilationCacheTable, "unexpected: CompilationCacheTable") \
@@ -1951,7 +1951,7 @@ MaybeObject* LiveObjectList::GetObjRetainers(int obj_id,
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
- isolate->context()->global_context()->arguments_boilerplate();
+ isolate->context()->native_context()->arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index d93a9d82b1..b049ffe4eb 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -145,7 +145,7 @@ class Profiler: public Thread {
//
// StackTracer implementation
//
-void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
+DISABLE_ASAN void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
ASSERT(isolate->IsInitialized());
// Avoid collecting traces while doing GC.
@@ -526,6 +526,7 @@ Logger::Logger()
name_buffer_(new NameBuffer),
address_to_name_map_(NULL),
is_initialized_(false),
+ code_event_handler_(NULL),
last_address_(NULL),
prev_sp_(NULL),
prev_function_(NULL),
@@ -541,6 +542,52 @@ Logger::~Logger() {
}
+void Logger::IssueCodeAddedEvent(Code* code,
+ const char* name,
+ size_t name_len) {
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = code->instruction_start();
+ event.code_len = code->instruction_size();
+ event.name.str = name;
+ event.name.len = name_len;
+
+ code_event_handler_(&event);
+}
+
+
+void Logger::IssueCodeMovedEvent(Address from, Address to) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_MOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ // Calculate the header size.
+ const size_t header_size =
+ from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
+
+ // Calculate the new start address of the instructions.
+ event.new_code_start =
+ reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
+
+ code_event_handler_(&event);
+}
+
+
+void Logger::IssueCodeRemovedEvent(Address from) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_REMOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ code_event_handler_(&event);
+}
+
+
#define DECLARE_EVENT(ignore1, name) name,
static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
@@ -864,13 +911,17 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) {
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendBytes(comment);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -899,13 +950,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
String* name) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendString(name);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -940,14 +995,18 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendString(name);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -981,8 +1040,8 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
@@ -993,6 +1052,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendByte(':');
name_buffer_->AppendInt(line);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -1022,13 +1085,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendInt(args_count);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -1055,13 +1122,17 @@ void Logger::CodeMovingGCEvent() {
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
name_buffer_->AppendByte(':');
name_buffer_->AppendString(source);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -1083,6 +1154,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
+ if (code_event_handler_ != NULL) IssueCodeMovedEvent(from, to);
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
@@ -1093,6 +1165,7 @@ void Logger::CodeMoveEvent(Address from, Address to) {
void Logger::CodeDeleteEvent(Address from) {
+ if (code_event_handler_ != NULL) IssueCodeRemovedEvent(from);
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
@@ -1392,7 +1465,7 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code || FLAG_ll_prof) {
+ if (FLAG_log_code || FLAG_ll_prof || is_logging_code_events()) {
Code* code_object = Code::cast(object);
LogEventsAndTags tag = Logger::STUB_TAG;
const char* description = "Unknown code from the snapshot";
@@ -1676,6 +1749,18 @@ bool Logger::SetUp() {
}
+void Logger::SetCodeEventHandler(uint32_t options,
+ JitCodeEventHandler event_handler) {
+ code_event_handler_ = event_handler;
+
+ if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
+ HandleScope scope;
+ LogCodeObjects();
+ LogCompiledFunctions();
+ }
+}
+
+
Sampler* Logger::sampler() {
return ticker_;
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 03c7b3b670..33f359a7f9 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -86,6 +86,15 @@ class Ticker;
logger->Call; \
} while (false)
+#define LOG_CODE_EVENT(isolate, Call) \
+ do { \
+ v8::internal::Logger* logger = \
+ (isolate)->logger(); \
+ if (logger->is_logging_code_events()) \
+ logger->Call; \
+ } while (false)
+
+
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
V(CODE_MOVE_EVENT, "code-move") \
@@ -151,6 +160,10 @@ class Logger {
// Acquires resources for logging if the right flags are set.
bool SetUp();
+ // Sets the current code event handler.
+ void SetCodeEventHandler(uint32_t options,
+ JitCodeEventHandler event_handler);
+
void EnsureTickerStarted();
void EnsureTickerStopped();
@@ -274,6 +287,10 @@ class Logger {
return logging_nesting_ > 0;
}
+ bool is_logging_code_events() {
+ return is_logging() || code_event_handler_ != NULL;
+ }
+
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
@@ -312,6 +329,11 @@ class Logger {
Logger();
~Logger();
+ // Issue code notifications.
+ void IssueCodeAddedEvent(Code* code, const char* name, size_t name_len);
+ void IssueCodeMovedEvent(Address from, Address to);
+ void IssueCodeRemovedEvent(Address from);
+
// Emits the profiler's first message.
void ProfilerBeginEvent();
@@ -413,6 +435,9 @@ class Logger {
// 'true' between SetUp() and TearDown().
bool is_initialized_;
+ // The code event handler - if any.
+ JitCodeEventHandler code_event_handler_;
+
// Support for 'incremental addresses' in compressed logs:
// LogMessageBuilder::AppendAddress(Address addr)
Address last_address_;
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 6f2b559637..df4739ea04 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -64,7 +64,6 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
abort_incremental_marking_(false),
compacting_(false),
was_marked_incrementally_(false),
- flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
@@ -223,6 +222,99 @@ static void VerifyEvacuation(Heap* heap) {
VerifyEvacuationVisitor visitor;
heap->IterateStrongRoots(&visitor, VISIT_ALL);
}
+
+
+class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
+ public:
+ VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ if (object->IsString()) continue;
+ switch (object->map()->instance_type()) {
+ case JS_FUNCTION_TYPE:
+ CheckContext(JSFunction::cast(object)->context());
+ break;
+ case JS_GLOBAL_PROXY_TYPE:
+ CheckContext(JSGlobalProxy::cast(object)->native_context());
+ break;
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ CheckContext(GlobalObject::cast(object)->native_context());
+ break;
+ case JS_ARRAY_TYPE:
+ case JS_DATE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_REGEXP_TYPE:
+ VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
+ break;
+ case MAP_TYPE:
+ VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
+ VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
+ break;
+ case FIXED_ARRAY_TYPE:
+ if (object->IsContext()) {
+ CheckContext(object);
+ } else {
+ FixedArray* array = FixedArray::cast(object);
+ int length = array->length();
+ // Set array length to zero to prevent cycles while iterating
+ // over array bodies, this is easier than intrusive marking.
+ array->set_length(0);
+ array->IterateBody(
+ FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
+ array->set_length(length);
+ }
+ break;
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case JS_PROXY_TYPE:
+ case JS_VALUE_TYPE:
+ case TYPE_FEEDBACK_INFO_TYPE:
+ object->Iterate(this);
+ break;
+ case ACCESSOR_INFO_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case CALL_HANDLER_INFO_TYPE:
+ case CODE_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case INTERCEPTOR_INFO_TYPE:
+ case ODDBALL_TYPE:
+ case SCRIPT_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ private:
+ void CheckContext(Object* context) {
+ if (!context->IsContext()) return;
+ Context* native_context = Context::cast(context)->native_context();
+ if (current_native_context_ == NULL) {
+ current_native_context_ = native_context;
+ } else {
+ CHECK_EQ(current_native_context_, native_context);
+ }
+ }
+
+ Context* current_native_context_;
+};
+
+
+static void VerifyNativeContextSeparation(Heap* heap) {
+ HeapObjectIterator it(heap->code_space());
+
+ for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+ VerifyNativeContextSeparationVisitor visitor;
+ Code::cast(object)->CodeIterateBody(&visitor);
+ }
+}
#endif
@@ -296,6 +388,12 @@ void MarkCompactCollector::CollectGarbage() {
if (!FLAG_collect_maps) ReattachInitialMaps();
+#ifdef DEBUG
+ if (FLAG_verify_native_context_separation) {
+ VerifyNativeContextSeparation(heap_);
+ }
+#endif
+
Finish();
tracer_ = NULL;
@@ -503,7 +601,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
int max_evacuation_candidates =
- static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1);
+ static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
@@ -533,29 +631,28 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50;
- if (over_reserved >= 2 * space->AreaSize()) {
+ if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
// If reduction of memory footprint was requested, we are aggressive
// about choosing pages to free. We expect that half-empty pages
// are easier to compact so slightly bump the limit.
- if (reduce_memory_footprint_) {
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates += 2;
- }
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates += 2;
+ }
+
+ if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
// If over-usage is very high (more than a third of the space), we
// try to free all mostly empty pages. We expect that almost empty
// pages are even easier to compact so bump the limit even more.
- if (over_reserved > reserved / 3) {
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates *= 2;
- }
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates *= 2;
+ }
- if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold));
- }
+ if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
+ static_cast<double>(over_reserved) / MB,
+ static_cast<double>(reserved) / MB,
+ static_cast<int>(kFreenessThreshold));
}
intptr_t estimated_release = 0;
@@ -577,7 +674,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
p->ClearEvacuationCandidate();
if (FLAG_stress_compaction) {
- int counter = space->heap()->ms_count();
+ unsigned int counter = space->heap()->ms_count();
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
@@ -669,12 +766,6 @@ void MarkCompactCollector::AbortCompaction() {
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
- // Monomorphic ICs are preserved when possible, but need to be flushed
- // when they might be keeping a Context alive, or when the heap is about
- // to be serialized.
- flush_monomorphic_ics_ =
- heap()->isolate()->context_exit_happened() || Serializer::enabled();
-
// Rather than passing the tracer around we stash it in a static member
// variable.
tracer_ = tracer;
@@ -938,81 +1029,24 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
-class StaticMarkingVisitor : public StaticVisitorBase {
+class MarkCompactMarkingVisitor
+ : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static inline void IterateBody(Map* map, HeapObject* obj) {
- table_.GetVisitor(map)(map, obj);
- }
-
- static void Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticMarkingVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticMarkingVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticMarkingVisitor,
- SlicedString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticMarkingVisitor,
- FixedArray::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitGlobalContext, &VisitGlobalContext);
-
- table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
-
- table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
-
- table_.Register(kVisitOddball,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Oddball::BodyDescriptor,
- void>::Visit);
- table_.Register(kVisitMap,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Map::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitCode, &VisitCode);
-
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
+ static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
+ Map* map, HeapObject* obj);
- table_.Register(kVisitJSFunction,
- &VisitJSFunctionAndFlushCode);
+ static void ObjectStatsCountFixedArray(
+ FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type);
- table_.Register(kVisitJSRegExp,
- &VisitRegExpAndFlushCode);
-
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticMarkingVisitor,
- JSGlobalPropertyCell::BodyDescriptor,
- void>::Visit);
-
- table_.RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
+ template<MarkCompactMarkingVisitor::VisitorId id>
+ class ObjectStatsTracker {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj);
+ };
- table_.RegisterSpecializations<StructObjectVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
- }
+ static void Initialize();
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
MarkObjectByPointer(heap->mark_compact_collector(), p, p);
@@ -1031,50 +1065,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
}
- static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(rinfo->target_cell());
- MarkBit mark = Marking::MarkBitFrom(cell);
- heap->mark_compact_collector()->MarkObject(cell, mark);
- }
-
- static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- // TODO(mstarzinger): We do not short-circuit cons strings here, verify
- // that there can be no such embedded pointers and add assertion here.
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
MarkBit mark = Marking::MarkBitFrom(object);
heap->mark_compact_collector()->MarkObject(object, mark);
}
- static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_state() == MEGAMORPHIC ||
- heap->mark_compact_collector()->flush_monomorphic_ics_ ||
- target->ic_age() != heap->global_ic_age())) {
- IC::Clear(rinfo->pc());
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- }
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- }
-
- static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- }
-
// Mark object pointed to by p.
INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
Object** anchor_slot,
@@ -1127,29 +1122,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
return true;
}
- static inline void VisitExternalReference(Address* p) { }
- static inline void VisitExternalReference(RelocInfo* rinfo) { }
- static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
-
- private:
- class DataObjectVisitor {
- public:
- template<int size>
- static void VisitSpecialized(Map* map, HeapObject* object) {
- }
-
- static void Visit(Map* map, HeapObject* object) {
- }
- };
-
- typedef FlexibleBodyVisitor<StaticMarkingVisitor,
- JSObject::BodyDescriptor,
- void> JSObjectVisitor;
-
- typedef FlexibleBodyVisitor<StaticMarkingVisitor,
- StructBodyDescriptor,
- void> StructObjectVisitor;
-
static void VisitJSWeakMap(Map* map, HeapObject* object) {
MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
@@ -1162,12 +1134,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip visiting the backing hash table containing the mappings.
int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
+ BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
JSWeakMap::BodyDescriptor::kStartOffset,
JSWeakMap::kTableOffset);
- BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
+ BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
JSWeakMap::kTableOffset + kPointerSize,
@@ -1187,14 +1159,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
ASSERT(MarkCompactCollector::IsMarked(table->map()));
}
- static void VisitCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- Code* code = reinterpret_cast<Code*>(object);
- if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
- }
- code->CodeIterateBody<StaticMarkingVisitor>(heap);
- }
+ private:
+ template<int id>
+ static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
// Code flushing support.
@@ -1302,16 +1269,14 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline bool IsValidNotBuiltinContext(Object* ctx) {
return ctx->IsContext() &&
- !Context::cast(ctx)->global()->IsJSBuiltinsObject();
+ !Context::cast(ctx)->global_object()->IsJSBuiltinsObject();
}
static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
- if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+ SharedFunctionInfo::cast(object)->BeforeVisitingPointers();
- FixedBodyVisitor<StaticMarkingVisitor,
+ FixedBodyVisitor<MarkCompactMarkingVisitor,
SharedFunctionInfo::BodyDescriptor,
void>::Visit(map, object);
}
@@ -1379,7 +1344,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
- VisitJSRegExpFields(map, object);
+ VisitJSRegExp(map, object);
return;
}
JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
@@ -1387,7 +1352,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
UpdateRegExpCodeAgeAndFlush(heap, re, true);
UpdateRegExpCodeAgeAndFlush(heap, re, false);
// Visit the fields of the RegExp, including the updated FixedArray.
- VisitJSRegExpFields(map, object);
+ VisitJSRegExp(map, object);
}
@@ -1413,7 +1378,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
- if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+ shared->BeforeVisitingPointers();
if (!known_flush_code_candidate) {
known_flush_code_candidate = IsFlushable(heap, shared);
@@ -1426,29 +1391,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
- static void VisitCodeEntry(Heap* heap, Address entry_address) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- MarkBit mark = Marking::MarkBitFrom(code);
- heap->mark_compact_collector()->MarkObject(code, mark);
- heap->mark_compact_collector()->
- RecordCodeEntrySlot(entry_address, code);
- }
-
- static void VisitGlobalContext(Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticMarkingVisitor,
- Context::MarkCompactBodyDescriptor,
- void>::Visit(map, object);
-
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::GLOBAL_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
- }
- }
-
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
MarkCompactCollector* collector = heap->mark_compact_collector();
@@ -1487,10 +1429,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
-
static inline void VisitJSFunctionFields(Map* map,
JSFunction* object,
bool flush_code_candidate) {
@@ -1526,44 +1464,186 @@ class StaticMarkingVisitor : public StaticVisitorBase {
heap,
HeapObject::RawField(object,
JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object,
- JSFunction::kNonWeakFieldsEndOffset));
- }
-
- static inline void VisitJSRegExpFields(Map* map,
- HeapObject* object) {
- int last_property_offset =
- JSRegExp::kSize + kPointerSize * map->inobject_properties();
- VisitPointers(map->GetHeap(),
- SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
- SLOT_ADDR(object, last_property_offset));
+ HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
}
static void VisitSharedFunctionInfoFields(Heap* heap,
HeapObject* object,
bool flush_code_candidate) {
- VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
+ VisitPointer(heap,
+ HeapObject::RawField(object, SharedFunctionInfo::kNameOffset));
if (!flush_code_candidate) {
- VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+ VisitPointer(heap,
+ HeapObject::RawField(object,
+ SharedFunctionInfo::kCodeOffset));
}
- VisitPointers(heap,
- SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
- SLOT_ADDR(object, SharedFunctionInfo::kSize));
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ SharedFunctionInfo::kOptimizedCodeMapOffset),
+ HeapObject::RawField(object, SharedFunctionInfo::kSize));
+ }
+
+ static VisitorDispatchTable<Callback> non_count_table_;
+};
+
+
+void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
+ FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type) {
+ Heap* heap = fixed_array->map()->GetHeap();
+ if (fixed_array->map() != heap->fixed_cow_array_map() &&
+ fixed_array->map() != heap->fixed_double_array_map() &&
+ fixed_array != heap->empty_fixed_array()) {
+ if (fixed_array->IsDictionary()) {
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ dictionary_type,
+ fixed_array->Size());
+ } else {
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ fast_type,
+ fixed_array->Size());
+ }
+ }
+}
+
+
+void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
+ MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ int object_size = obj->Size();
+ heap->RecordObjectStats(map->instance_type(), -1, object_size);
+ non_count_table_.GetVisitorById(id)(map, obj);
+ if (obj->IsJSObject()) {
+ JSObject* object = JSObject::cast(obj);
+ ObjectStatsCountFixedArray(object->elements(),
+ DICTIONARY_ELEMENTS_SUB_TYPE,
+ FAST_ELEMENTS_SUB_TYPE);
+ ObjectStatsCountFixedArray(object->properties(),
+ DICTIONARY_PROPERTIES_SUB_TYPE,
+ FAST_PROPERTIES_SUB_TYPE);
}
+}
+
- #undef SLOT_ADDR
+template<MarkCompactMarkingVisitor::VisitorId id>
+void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
+ Map* map, HeapObject* obj) {
+ ObjectStatsVisitBase(id, map, obj);
+}
- typedef void (*Callback)(Map* map, HeapObject* object);
- static VisitorDispatchTable<Callback> table_;
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitMap> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ Map* map_obj = Map::cast(obj);
+ ASSERT(map->instance_type() == MAP_TYPE);
+ DescriptorArray* array = map_obj->instance_descriptors();
+ if (array != heap->empty_descriptor_array()) {
+ int fixed_array_size = array->Size();
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->HasTransitionArray()) {
+ int fixed_array_size = map_obj->transitions()->Size();
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->code_cache() != heap->empty_fixed_array()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ MAP_CODE_CACHE_SUB_TYPE,
+ FixedArray::cast(map_obj->code_cache())->Size());
+ }
+ ObjectStatsVisitBase(kVisitMap, map, obj);
+ }
};
-VisitorDispatchTable<StaticMarkingVisitor::Callback>
- StaticMarkingVisitor::table_;
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitCode> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ int object_size = obj->Size();
+ ASSERT(map->instance_type() == CODE_TYPE);
+ heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
+ ObjectStatsVisitBase(kVisitCode, map, obj);
+ }
+};
+
+
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ if (sfi->scope_info() != heap->empty_fixed_array()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ SCOPE_INFO_SUB_TYPE,
+ FixedArray::cast(sfi->scope_info())->Size());
+ }
+ ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
+ }
+};
+
+
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitFixedArray> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ if (fixed_array == heap->symbol_table()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TABLE_SUB_TYPE,
+ fixed_array->Size());
+ }
+ ObjectStatsVisitBase(kVisitFixedArray, map, obj);
+ }
+};
+
+
+void MarkCompactMarkingVisitor::Initialize() {
+ StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
+
+ table_.Register(kVisitSharedFunctionInfo,
+ &VisitSharedFunctionInfoAndFlushCode);
+
+ table_.Register(kVisitJSFunction,
+ &VisitJSFunctionAndFlushCode);
+
+ table_.Register(kVisitJSRegExp,
+ &VisitRegExpAndFlushCode);
+
+ if (FLAG_track_gc_object_stats) {
+ // Copy the visitor table to make call-through possible.
+ non_count_table_.CopyFrom(&table_);
+#define VISITOR_ID_COUNT_FUNCTION(id) \
+ table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
+ VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
+#undef VISITOR_ID_COUNT_FUNCTION
+ }
+}
+
+
+VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
+ MarkCompactMarkingVisitor::non_count_table_;
class MarkingVisitor : public ObjectVisitor {
@@ -1571,11 +1651,11 @@ class MarkingVisitor : public ObjectVisitor {
explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
void VisitPointer(Object** p) {
- StaticMarkingVisitor::VisitPointer(heap_, p);
+ MarkCompactMarkingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
- StaticMarkingVisitor::VisitPointers(heap_, start, end);
+ MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
}
private:
@@ -1734,7 +1814,7 @@ class RootMarkingVisitor : public ObjectVisitor {
// Mark the map pointer and body, and push them on the marking stack.
MarkBit map_mark = Marking::MarkBitFrom(map);
collector_->MarkObject(map, map_mark);
- StaticMarkingVisitor::IterateBody(map, object);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
@@ -1825,45 +1905,27 @@ template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
template <class T>
void Marker<T>::MarkMapContents(Map* map) {
- // Mark prototype transitions array but don't push it into marking stack.
- // This will make references from it weak. We will clean dead prototype
- // transitions in ClearNonLiveTransitions.
- Object** proto_trans_slot =
- HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
- HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
- if (prototype_transitions->IsFixedArray()) {
- mark_compact_collector()->RecordSlot(proto_trans_slot,
- proto_trans_slot,
- prototype_transitions);
- MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
- if (!mark.Get()) {
- mark.Set();
- MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
- prototype_transitions->Size());
- }
- }
-
// Make sure that the back pointer stored either in the map itself or inside
- // its prototype transitions array is marked. Treat pointers in the descriptor
- // array as weak and also mark that array to prevent visiting it later.
+ // its transitions array is marked. Treat pointers in the transitions array as
+ // weak and also mark that array to prevent visiting it later.
base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
- Object** descriptor_array_slot =
- HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
- Object* descriptor_array = *descriptor_array_slot;
- if (!descriptor_array->IsSmi()) {
- MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
- }
-
- // Mark the Object* fields of the Map. Since the descriptor array has been
- // marked already, it is fine that one of these fields contains a pointer
- // to it. But make sure to skip back pointer and prototype transitions.
- STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
- Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
- Object** start_slot = HeapObject::RawField(
- map, Map::kPointerFieldsBeginOffset);
- Object** end_slot = HeapObject::RawField(
- map, Map::kPrototypeTransitionsOrBackPointerOffset);
+ Object** transitions_slot =
+ HeapObject::RawField(map, Map::kTransitionsOrBackPointerOffset);
+ Object* transitions = *transitions_slot;
+ if (transitions->IsTransitionArray()) {
+ MarkTransitionArray(reinterpret_cast<TransitionArray*>(transitions));
+ } else {
+ // Already marked by marking map->GetBackPointer().
+ ASSERT(transitions->IsMap() || transitions->IsUndefined());
+ }
+
+ // Mark the Object* fields of the Map. Since the transitions array has been
+ // marked already, it is fine that one of these fields contains a pointer to
+ // it.
+ Object** start_slot =
+ HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
+ Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
for (Object** slot = start_slot; slot < end_slot; slot++) {
Object* obj = *slot;
if (!obj->NonFailureIsHeapObject()) continue;
@@ -1874,94 +1936,37 @@ void Marker<T>::MarkMapContents(Map* map) {
template <class T>
-void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
- // Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
-
- if (!base_marker()->MarkObjectWithoutPush(descriptors)) return;
- Object** descriptor_start = descriptors->data_start();
-
- // Since the descriptor array itself is not pushed for scanning, all fields
- // that point to objects manually have to be pushed, marked, and their slots
- // recorded.
- if (descriptors->HasEnumCache()) {
- Object** enum_cache_slot = descriptors->GetEnumCacheSlot();
- Object* enum_cache = *enum_cache_slot;
- base_marker()->MarkObjectAndPush(
- reinterpret_cast<HeapObject*>(enum_cache));
- mark_compact_collector()->RecordSlot(descriptor_start,
- enum_cache_slot,
- enum_cache);
- }
-
- // TODO(verwaest) Make sure we free unused transitions.
- if (descriptors->elements_transition_map() != NULL) {
- Object** transitions_slot = descriptors->GetTransitionsSlot();
- Object* transitions = *transitions_slot;
- base_marker()->MarkObjectAndPush(
- reinterpret_cast<HeapObject*>(transitions));
- mark_compact_collector()->RecordSlot(descriptor_start,
- transitions_slot,
- transitions);
- }
-
- // If the descriptor contains a transition (value is a Map), we don't mark the
- // value as live. It might be set to the NULL_DESCRIPTOR in
- // ClearNonLiveTransitions later.
- for (int i = 0; i < descriptors->number_of_descriptors(); ++i) {
- Object** key_slot = descriptors->GetKeySlot(i);
+void Marker<T>::MarkTransitionArray(TransitionArray* transitions) {
+ if (!base_marker()->MarkObjectWithoutPush(transitions)) return;
+ Object** transitions_start = transitions->data_start();
+
+ DescriptorArray* descriptors = transitions->descriptors();
+ base_marker()->MarkObjectAndPush(descriptors);
+ mark_compact_collector()->RecordSlot(
+ transitions_start, transitions->GetDescriptorsSlot(), descriptors);
+
+ if (transitions->HasPrototypeTransitions()) {
+ // Mark prototype transitions array but don't push it into marking stack.
+ // This will make references from it weak. We will clean dead prototype
+ // transitions in ClearNonLiveTransitions.
+ Object** proto_trans_slot = transitions->GetPrototypeTransitionsSlot();
+ HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
+ base_marker()->MarkObjectWithoutPush(prototype_transitions);
+ mark_compact_collector()->RecordSlot(
+ transitions_start, proto_trans_slot, prototype_transitions);
+ }
+
+ for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+ Object** key_slot = transitions->GetKeySlot(i);
Object* key = *key_slot;
if (key->IsHeapObject()) {
- base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(key));
- mark_compact_collector()->RecordSlot(descriptor_start, key_slot, key);
- }
-
- Object** value_slot = descriptors->GetValueSlot(i);
- if (!(*value_slot)->IsHeapObject()) continue;
- HeapObject* value = HeapObject::cast(*value_slot);
-
- mark_compact_collector()->RecordSlot(descriptor_start,
- value_slot,
- value);
-
- PropertyDetails details(descriptors->GetDetails(i));
-
- switch (details.type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- base_marker()->MarkObjectAndPush(value);
- break;
- case CALLBACKS:
- if (!value->IsAccessorPair()) {
- base_marker()->MarkObjectAndPush(value);
- } else if (base_marker()->MarkObjectWithoutPush(value)) {
- AccessorPair* accessors = AccessorPair::cast(value);
- MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
- MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
- }
- break;
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- break;
+ base_marker()->MarkObjectAndPush(HeapObject::cast(key));
+ mark_compact_collector()->RecordSlot(transitions_start, key_slot, key);
}
}
}
-template <class T>
-void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
- Object** slot = HeapObject::RawField(accessors, offset);
- HeapObject* accessor = HeapObject::cast(*slot);
- if (accessor->IsMap()) return;
- mark_compact_collector()->RecordSlot(slot, slot, accessor);
- base_marker()->MarkObjectAndPush(accessor);
-}
-
-
// Fill the marking stack with overflowed objects returned by the given
// iterator. Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
@@ -2198,7 +2203,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
MarkBit map_mark = Marking::MarkBitFrom(map);
MarkObject(map, map_mark);
- StaticMarkingVisitor::IterateBody(map, object);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
}
// Process encountered weak maps, mark objects only reachable by those
@@ -2337,7 +2342,7 @@ void MarkCompactCollector::MarkLiveObjects() {
ASSERT(cell->IsJSGlobalPropertyCell());
if (IsMarked(cell)) {
int offset = JSGlobalPropertyCell::kValueOffset;
- StaticMarkingVisitor::VisitPointer(
+ MarkCompactMarkingVisitor::VisitPointer(
heap(),
reinterpret_cast<Object**>(cell->address() + offset));
}
@@ -2410,11 +2415,15 @@ void MarkCompactCollector::AfterMarking() {
// Clean up dead objects from the runtime profiler.
heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
}
+
+ if (FLAG_track_gc_object_stats) {
+ heap()->CheckpointObjectStats();
+ }
}
void MarkCompactCollector::ProcessMapCaches() {
- Object* raw_context = heap()->global_contexts_list_;
+ Object* raw_context = heap()->native_contexts_list_;
while (raw_context != heap()->undefined_value()) {
Context* context = reinterpret_cast<Context*>(raw_context);
if (IsMarked(context)) {
@@ -2514,7 +2523,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->prototype_transitions();
+ FixedArray* prototype_transitions = map->GetPrototypeTransitions();
int new_number_of_transitions = 0;
const int header = Map::kProtoTransitionHeaderSize;
@@ -2592,7 +2601,8 @@ void MarkCompactCollector::ProcessWeakMaps() {
Object** value_slot =
HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
ObjectHashTable::EntryToValueIndex(i)));
- StaticMarkingVisitor::MarkObjectByPointer(this, anchor, value_slot);
+ MarkCompactMarkingVisitor::MarkObjectByPointer(
+ this, anchor, value_slot);
}
}
weak_map_obj = weak_map->next();
@@ -3254,6 +3264,8 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ Heap::RelocationLock relocation_lock(heap());
+
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
@@ -3392,8 +3404,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- // Update pointer from the global contexts list.
- updating_visitor.VisitPointer(heap_->global_contexts_list_address());
+ // Update pointer from the native contexts list.
+ updating_visitor.VisitPointer(heap_->native_contexts_list_address());
heap_->symbol_table()->Iterate(&updating_visitor);
@@ -3991,7 +4003,8 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
void MarkCompactCollector::Initialize() {
- StaticMarkingVisitor::Initialize();
+ MarkCompactMarkingVisitor::Initialize();
+ IncrementalMarking::Initialize();
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index dbc28697f0..0154380629 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -394,8 +394,7 @@ template<class BaseMarker> class Marker {
// Mark pointers in a Map and its DescriptorArray together, possibly
// treating transitions or back pointers weak.
void MarkMapContents(Map* map);
- void MarkDescriptorArray(DescriptorArray* descriptors);
- void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
+ void MarkTransitionArray(TransitionArray* transitions);
private:
BaseMarker* base_marker() {
@@ -612,8 +611,6 @@ class MarkCompactCollector {
bool was_marked_incrementally_;
- bool flush_monomorphic_ics_;
-
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
@@ -636,7 +633,7 @@ class MarkCompactCollector {
friend class RootMarkingVisitor;
friend class MarkingVisitor;
- friend class StaticMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class Marker<IncrementalMarking>;
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 2a00ba8469..b819724a10 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -50,7 +50,12 @@ function FormatString(format, message) {
try {
str = ToDetailString(args[arg_num]);
} catch (e) {
- str = "#<error>";
+ if (%IsJSModule(args[arg_num]))
+ str = "module";
+ else if (IS_SPEC_OBJECT(args[arg_num]))
+ str = "object";
+ else
+ str = "#<error>";
}
}
}
@@ -197,6 +202,7 @@ function FormatMessage(message) {
"proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"],
"proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
"invalid_weakmap_key", ["Invalid value used as weak map key"],
+ "not_date_object", ["this is not a Date object."],
// RangeError
"invalid_array_length", ["Invalid array length"],
"stack_overflow", ["Maximum call stack size exceeded"],
@@ -251,6 +257,7 @@ function FormatMessage(message) {
"harmony_const_assign", ["Assignment to constant variable."],
"invalid_module_path", ["Module does not export '", "%0", "', or export is not itself a module"],
"module_type_error", ["Module '", "%0", "' used improperly"],
+ "module_export_undefined", ["Export '", "%0", "' is not defined in module"],
];
var messages = { __proto__ : null };
for (var i = 0; i < messagesDictionary.length; i += 2) {
@@ -760,18 +767,18 @@ function DefineOneShotAccessor(obj, name, fun) {
// Note that the accessors consistently operate on 'obj', not 'this'.
// Since the object may occur in someone else's prototype chain we
// can't rely on 'this' being the same as 'obj'.
- var hasBeenSet = false;
var value;
+ var value_factory = fun;
var getter = function() {
- if (hasBeenSet) {
+ if (value_factory == null) {
return value;
}
- hasBeenSet = true;
- value = fun(obj);
+ value = value_factory(obj);
+ value_factory = null;
return value;
};
var setter = function(v) {
- hasBeenSet = true;
+ value_factory = null;
value = v;
};
%DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
@@ -853,9 +860,9 @@ function CallSiteGetMethodName() {
}
var name = null;
for (var prop in this.receiver) {
- if (this.receiver.__lookupGetter__(prop) === this.fun ||
- this.receiver.__lookupSetter__(prop) === this.fun ||
- (!this.receiver.__lookupGetter__(prop) &&
+ if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun ||
+ %_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun ||
+ (!%_CallFunction(this.receiver, prop, ObjectLookupGetter) &&
this.receiver[prop] === this.fun)) {
// If we find more than one match bail out to avoid confusion.
if (name) {
@@ -921,17 +928,25 @@ function CallSiteToString() {
var fileLocation = "";
if (this.isNative()) {
fileLocation = "native";
- } else if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- }
} else {
- fileName = this.getFileName();
- }
+ if (this.isEval()) {
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
+ }
+ } else {
+ fileName = this.getFileName();
+ }
- if (fileName) {
- fileLocation += fileName;
+ if (fileName) {
+ fileLocation += fileName;
+ } else {
+ // Source code does not originate from a file and is not native, but we
+ // can still get the source position inside the source string, e.g. in
+ // an eval string.
+ fileLocation += "<anonymous>";
+ }
var lineNumber = this.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
@@ -942,9 +957,6 @@ function CallSiteToString() {
}
}
- if (!fileLocation) {
- fileLocation = "unknown source";
- }
var line = "";
var functionName = this.getFunctionName();
var addSuffix = true;
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 2ff4710eff..522bc78687 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -208,10 +208,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index f347fdc576..801ca2c152 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -273,6 +273,7 @@ static const int kMinimalBufferSize = 4 * KB;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
+ recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
@@ -2046,7 +2047,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ RelocInfo reloc_info_with_ast_id(pc_,
+ rmode,
+ RecordedAstId().ToInt(),
+ NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 84714e507e..7163770178 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -525,6 +525,9 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Dummy for cross platform compatibility.
+ void set_predictable_code_size(bool value) { }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -909,17 +912,17 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void SetRecordedAstId(unsigned ast_id) {
- ASSERT(recorded_ast_id_ == kNoASTId);
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
- unsigned RecordedAstId() {
- ASSERT(recorded_ast_id_ != kNoASTId);
+ TypeFeedbackId RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
- void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1016,7 +1019,7 @@ class Assembler : public AssemblerBase {
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned recorded_ast_id_;
+ TypeFeedbackId recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 5a2074e652..0342e6505d 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -79,12 +79,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the global context.
+ // Load the native context.
- __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the InternalArray function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
__ lw(result,
MemOperand(result,
Context::SlotOffset(
@@ -94,12 +95,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
+ // Load the native context.
- __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
__ lw(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@@ -713,6 +715,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ __ push(a1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -1393,9 +1432,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
__ lw(a2, FieldMemOperand(a2, kGlobalIndex));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
@@ -1586,9 +1625,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ lw(a0, FieldMemOperand(a0, kGlobalOffset));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index a7c259732a..a5c80b8471 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -87,6 +87,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
// Pop the function info from the stack.
@@ -100,32 +102,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
+
int map_index = (language_mode_ == CLASSIC_MODE)
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
- __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
+ __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
+ __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
__ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
__ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
__ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ lw(a1,
+ FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ And(at, a1, a1);
+ __ Branch(&check_optimized, ne, at, Operand(zero_reg));
+ }
+ __ bind(&install_unoptimized);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -133,6 +147,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ Ret();
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
+
+ // a2 holds native context, a1 points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into t0.
+ __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
+ __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
+ __ Branch(&install_optimized, eq, a2, Operand(t1));
+
+ // Iterate through the rest of map backwards. t0 holds an index as a Smi.
+ Label loop;
+ __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ // Do not double check first entry.
+
+ __ Branch(&install_unoptimized, eq, t0,
+ Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ Subu(t0, t0, Operand(
+ Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, t1, Operand(at));
+ __ lw(t1, MemOperand(t1));
+ __ Branch(&loop, ne, a2, Operand(t1));
+ // Hit: fetch the optimized code.
+ __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, t1, Operand(at));
+ __ Addu(t1, t1, Operand(kPointerSize));
+ __ lw(t0, MemOperand(t1));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(),
+ 1, t2, t3);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+ // Now link a function into a list of optimized functions.
+ __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+ // No need for write barrier as JSFunction (eax) is in the new space.
+
+ __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Store JSFunction (eax) into edx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ mov(t0, v0);
+ __ RecordWriteContextSlot(
+ a2,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ t0,
+ a1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(t0, Heap::kFalseValueRootIndex);
@@ -164,12 +244,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
// Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
@@ -211,9 +291,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(a3, &after_sentinel);
@@ -222,16 +302,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(eq, message, a3, Operand(zero_reg));
}
__ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
+ __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
@@ -3453,23 +3533,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1,
1);
} else {
- if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+ ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
- // Register a0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ Push(cache_entry, a2, a3);
+ // a0: precalculated cache entry address.
+ // a2 and a3: parts of the double value.
+ // Store a0, a2 and a3 on stack for later before calling C function.
+ __ Push(a3, a2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ Pop(cache_entry, a2, a3);
+ __ Pop(a3, a2, cache_entry);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -4566,14 +4646,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// v0 = address of new object(s) (tagged)
// a2 = argument count (tagged)
- // Get the arguments boilerplate from the current (global) context into t0.
+ // Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
@@ -4761,9 +4841,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
- // Get the arguments boilerplate from the current (global) context.
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
@@ -4897,7 +4977,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a2, a2, Operand(2)); // a2 was a smi.
// Check that the static offsets vector buffer is large enough.
- __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ __ Branch(
+ &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
// a2: Number of capture registers
// regexp_data: RegExp data (FixedArray)
@@ -5296,10 +5377,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ Addu(a3, v0, Operand(JSRegExpResult::kSize));
__ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
__ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
__ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
@@ -5408,7 +5489,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&call, ne, t0, Operand(at));
// Patch the receiver on the stack with the global receiver object.
- __ lw(a3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a3,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
__ sw(a3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
@@ -7380,6 +7462,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
+ // FastNewClosureStub::Generate
+ { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7684,6 +7768,66 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ __ push(ra);
+ __ CallStub(&stub);
+ __ pop(ra);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push ra" instruction, followed by a call.
+ // Note: on MIPS "push" is 2 instruction
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+
+ // Save live volatile registers.
+ __ Push(ra, t1, a1);
+ const int32_t kNumSavedRegs = 3;
+
+ // Compute the function's address for the first argument.
+ __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(t1, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ And(sp, sp, Operand(-frame_alignment));
+ }
+
+#if defined(V8_HOST_ARCH_MIPS)
+ __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
+ __ lw(at, MemOperand(at));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ Address trampoline_address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
+ ApiFunction dispatcher(trampoline_address);
+ __ li(at, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+#endif
+ __ Call(at);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, t1);
+ }
+
+ __ Pop(ra, t1, a1);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 62f3155eb3..371d120887 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -48,6 +48,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@@ -96,8 +100,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ // Iterate over all the functions which share the same code object
+ // and make them use unoptimized version.
+ Context* context = function->context()->native_context();
+ Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ SharedFunctionInfo* shared = function->shared();
+ while (!element->IsUndefined()) {
+ JSFunction* func = JSFunction::cast(element);
+ // Grab element before code replacement as ReplaceCode alters the list.
+ element = func->next_function_link();
+ if (func->code() == code) {
+ func->ReplaceCode(shared->code());
+ }
+ }
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -186,11 +201,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -209,7 +224,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -229,9 +244,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -342,8 +357,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@@ -567,19 +582,145 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type,
+ // code object, see MacroAssembler::EnterFrame). For a setter stub frame we
+ // need one additional entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 263656ea01..bfa24252b9 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -143,6 +143,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -190,10 +192,13 @@ void FullCodeGenerator::Generate() {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in a1.
+ Comment cmnt(masm_, "[ Allocate context");
+ // Argument to NewContext is the function, which is still in a1.
__ push(a1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
@@ -270,7 +275,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -285,7 +290,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
@@ -332,7 +337,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
- reset_value = 10;
+ reset_value = FLAG_interrupt_budget >> 4;
}
__ li(a2, Operand(profiling_counter_));
__ li(a3, Operand(Smi::FromInt(reset_value)));
@@ -340,10 +345,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
@@ -360,7 +361,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
@@ -413,7 +414,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -793,7 +794,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
@@ -848,10 +849,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -1141,25 +1141,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ mov(a2, v0);
- __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- __ Branch(&fixed_array, ne, a1, Operand(at));
+ __ Branch(&fixed_array, ne, a2, Operand(at));
// We got a map in register v0. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
- __ LoadInstanceDescriptors(v0, a1);
- __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
- __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ EnumLength(a1, v0);
+ __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadInstanceDescriptors(v0, a2, t0);
+ __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
+ __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(v0); // Map.
- __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(a2, a1, a0);
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ jmp(&exit);
+
// We got a fixed array in register v0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1168,7 +1175,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
@@ -1324,9 +1331,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ Move(next, current);
}
__ bind(&loop);
- // Terminate at global context.
+ // Terminate at native context.
__ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(t0, Heap::kNativeContextMapRootIndex);
__ Branch(&fast, eq, temp, Operand(t0));
// Check that extension is NULL.
__ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
@@ -1614,7 +1621,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1641,7 +1648,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1852,11 +1859,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1914,7 +1921,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1923,7 +1930,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1951,7 +1958,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2034,7 +2042,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -2166,7 +2175,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2225,7 +2234,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2277,7 +2286,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2301,6 +2310,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(v0);
} else {
VisitForStackValue(expr->obj());
@@ -2314,9 +2324,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId id) {
ic_total_count_++;
- __ Call(code, rmode, ast_id);
+ __ Call(code, rmode, id);
}
@@ -2337,7 +2347,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2370,7 +2380,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2390,16 +2400,14 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ li(a2, Operand(cell));
- }
+ // Record call targets.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ li(a2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2588,21 +2596,15 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ li(a2, Operand(cell));
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ li(a2, Operand(cell));
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
@@ -2743,7 +2745,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(v0);
+ if (generate_debug_code_) __ AbortIfSmi(v0);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
@@ -2759,7 +2761,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Look for valueOf symbol in the descriptor array, and indicate false if
// found. The type is not checked, so if it is a transition it is a false
// negative.
- __ LoadInstanceDescriptors(a1, t0);
+ __ LoadInstanceDescriptors(a1, t0, a3);
__ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
// t0: descriptor array
// a3: length of descriptor array
@@ -2774,8 +2776,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate location of the first key name.
__ Addu(t0,
t0,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
+ Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@@ -2786,7 +2787,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
__ Branch(if_false, eq, a3, Operand(t2));
- __ Addu(t0, t0, Operand(kPointerSize));
+ __ Addu(t0, t0, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ Branch(&loop, ne, t0, Operand(a2));
@@ -2795,8 +2796,8 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
__ JumpIfSmi(a2, if_false);
__ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ Branch(if_false, ne, a2, Operand(a3));
@@ -3076,8 +3077,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(FPU)) {
__ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(FPU);
@@ -3094,8 +3095,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
} else {
__ PrepareCallCFunction(2, a0);
__ mov(a0, s0);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalContextOffset));
+ __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -3159,21 +3160,19 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = v0;
Register result = v0;
Register scratch0 = t5;
Register scratch1 = a1;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ GetObjectType(object, scratch1, scratch1);
- __ Assert(eq, "Trying to get date field from non-date.",
- scratch1, Operand(JS_DATE_TYPE));
-#endif
+ __ Branch(&not_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3190,9 +3189,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ li(a1, Operand(index));
__ Move(a0, object);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(v0);
}
@@ -3467,10 +3469,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(v0, &runtime);
__ GetObjectType(v0, a1, a1);
- __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
@@ -3480,7 +3483,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(v0);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3509,7 +3512,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3521,8 +3524,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = v0;
Register cache = a1;
- __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ lw(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
__ lw(cache,
ContextOperand(
cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
@@ -3618,9 +3621,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(v0);
- }
+ __ AbortIfNotString(v0);
__ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
__ IndexFromHash(v0, v0);
@@ -3694,7 +3695,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
array_length, Operand(zero_reg));
}
@@ -3897,7 +3898,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -4053,7 +4054,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(v0);
}
@@ -4111,7 +4113,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4164,7 +4166,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4197,7 +4199,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4215,7 +4217,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4419,7 +4421,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4500,7 +4502,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4545,6 +4547,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_has_pending_message(isolate());
__ li(at, Operand(has_pending_message));
__ lw(a1, MemOperand(at));
+ __ SmiTag(a1);
__ push(a1);
ExternalReference pending_message_script =
@@ -4565,6 +4568,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
__ sw(a1, MemOperand(at));
__ pop(a1);
+ __ SmiUntag(a1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ li(at, Operand(has_pending_message));
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 5d530d0e9b..3f2ecb88a5 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -398,7 +398,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 68f8a3dd7d..db9748a4ae 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -89,17 +89,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -125,6 +116,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -259,7 +252,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
// TODO(plind): not clear that this will have advantage for MIPS.
// Skipping it for now. Raised issue #100 for this.
- Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+ Abort("Unimplemented: GenerateDeoptJumpTable");
return false;
}
@@ -292,7 +285,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -330,7 +324,8 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -354,9 +349,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
@@ -366,33 +361,33 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Operand(static_cast<int32_t>(literal->Number()));
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort("ToOperand Unsupported double immediate.");
}
ASSERT(r.IsTagged());
- return Operand(literal);
+ return Operand(constant->handle());
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@@ -446,7 +441,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -454,11 +452,19 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@@ -470,7 +476,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -478,18 +485,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
+ false,
false);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
@@ -498,6 +510,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -511,6 +525,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -518,8 +534,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
DoubleRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -657,13 +673,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -1223,6 +1239,13 @@ void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->InputAt(0));
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
@@ -1267,12 +1290,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ And(at, object, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- __ Assert(eq, "Trying to get date field from non-date.",
- scratch, Operand(JS_DATE_TYPE));
-#endif
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1353,6 +1374,68 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register left_reg = ToRegister(left);
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
+ ? ToOperand(right)
+ : Operand(EmitLoadRegister(right, at));
+ Register result_reg = ToRegister(instr->result());
+ Label return_right, done;
+ if (!result_reg.is(left_reg)) {
+ __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
+ __ mov(result_reg, left_reg);
+ __ Branch(&done);
+ }
+ __ Branch(&done, condition, left_reg, right_op);
+ __ bind(&return_right);
+ __ Addu(result_reg, zero_reg, right_op);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPURegister left_reg = ToDoubleRegister(left);
+ FPURegister right_reg = ToDoubleRegister(right);
+ FPURegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
+ __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
+ __ Branch(&return_right);
+
+ __ bind(&check_zero);
+ // left == right != 0.
+ __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ neg_d(left_reg, left_reg);
+ __ sub_d(result_reg, left_reg, right_reg);
+ __ neg_d(result_reg, result_reg);
+ } else {
+ __ add_d(result_reg, left_reg, right_reg);
+ }
+ __ Branch(&done);
+
+ __ bind(&check_nan_left);
+ // left == NaN.
+ __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ mov_d(result_reg, right_reg);
+ }
+ __ Branch(&done);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ mov_d(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
@@ -1888,9 +1971,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AbortIfNotString(input);
__ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2318,9 +2399,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
+ type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsFound() && lookup.type() == FIELD) {
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2332,7 +2413,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
@@ -2518,16 +2599,32 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
- Register key = EmitLoadRegister(instr->key(), scratch0());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
- // Load the result.
- __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
- __ addu(scratch, elements, scratch);
- uint32_t offset = FixedArray::kHeaderSize +
- (instr->additional_index() << kPointerSizeLog2);
- __ lw(result, FieldMemOperand(scratch, offset));
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = EmitLoadRegister(instr->key(), scratch);
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ lw(result, FieldMemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2550,8 +2647,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
DoubleRegister result = ToDoubleRegister(instr->result());
Register scratch = scratch0();
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -2564,14 +2662,15 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
if (key_is_constant) {
__ Addu(elements, elements,
- Operand(((constant_key + instr->additional_index()) << shift_size) +
+ Operand(((constant_key + instr->additional_index()) <<
+ element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch));
__ Addu(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ (instr->additional_index() << element_size_shift)));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2583,6 +2682,50 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset) {
+ if (additional_index != 0 && !key_is_constant) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ Addu(scratch0(), key, Operand(additional_index));
+ }
+
+ if (key_is_constant) {
+ return MemOperand(base,
+ (constant_key << element_size) + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (shift_size >= 0) {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), key, 1);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+ }
+
+ if (shift_size >= 0) {
+ __ sll(scratch0(), scratch0(), shift_size);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), scratch0(), 1);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+}
+
+
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
@@ -2598,14 +2741,16 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@@ -2619,24 +2764,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
} else {
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ Addu(scratch, key, instr->additional_index());
- }
- MemOperand mem_operand(zero_reg);
- if (key_is_constant) {
- mem_operand =
- MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset);
- } else {
- if (instr->additional_index() == 0) {
- __ sll(scratch, key, shift_size);
- } else {
- __ sll(scratch, scratch, shift_size);
- }
- __ Addu(scratch, scratch, external_pointer);
- mem_operand = MemOperand(scratch);
- }
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ lb(result, mem_operand);
@@ -2656,11 +2787,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ lw(result, mem_operand);
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
@@ -2846,7 +2976,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -2875,7 +3005,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
}
@@ -2902,14 +3032,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(a1, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- }
+ // Change context.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -3251,11 +3375,11 @@ void LCodeGen::DoRandom(LRandom* instr) {
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
- // a2: FixedArray of the global context's random seeds
+ // a2: FixedArray of the native context's random seeds
// Load state[0].
__ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
@@ -3544,11 +3668,47 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ if (operand->IsRegister()) {
+ __ And(at, ToRegister(operand), Operand(kSmiTagMask));
+ DeoptimizeIf(ne, environment, at, Operand(zero_reg));
+ } else {
+ __ li(at, ToOperand(operand));
+ __ And(at, at, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, environment, at, Operand(zero_reg));
+ }
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptimizeIf(hs,
- instr->environment(),
- ToRegister(instr->index()),
- Operand(ToRegister(instr->length())));
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ li(at, Operand(Smi::FromInt(constant_index)));
+ } else {
+ __ li(at, Operand(constant_index));
+ }
+ DeoptimizeIf(hs,
+ instr->environment(),
+ at,
+ Operand(ToRegister(instr->length())));
+ } else {
+ DeoptimizeIf(hs,
+ instr->environment(),
+ ToRegister(instr->index()),
+ Operand(ToRegister(instr->length())));
+ }
}
@@ -3557,32 +3717,38 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
- + FixedArray::kHeaderSize;
- __ sw(value, FieldMemOperand(elements, offset));
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
} else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- if (instr->additional_index() != 0) {
- __ Addu(scratch,
- scratch,
- instr->additional_index() << kPointerSizeLog2);
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
}
- __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
+ __ sw(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
key,
value,
@@ -3614,9 +3780,11 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << shift_size) +
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
@@ -3637,7 +3805,8 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
__ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
}
@@ -3657,14 +3826,17 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
+ __ Addu(scratch0(), external_pointer, constant_key <<
+ element_size_shift);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@@ -3678,24 +3850,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
} else {
Register value(ToRegister(instr->value()));
- Register scratch = scratch0();
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ Addu(scratch, key, instr->additional_index());
- }
- MemOperand mem_operand(zero_reg);
- if (key_is_constant) {
- mem_operand = MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size));
- } else {
- if (instr->additional_index() == 0) {
- __ sll(scratch, key, shift_size);
- } else {
- __ sll(scratch, scratch, shift_size);
- }
- __ Addu(scratch, scratch, external_pointer);
- mem_operand = MemOperand(scratch);
- }
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3918,12 +4076,26 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->InputAt(0);
+ LOperand* output = instr->result();
+
+ FPURegister dbl_scratch = double_scratch0();
+ __ mtc1(ToRegister(input), dbl_scratch);
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->InputAt(0),
+ SIGNED_INT32);
+ }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
@@ -3940,25 +4112,59 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->InputAt(0),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
+ __ SmiTag(reg, reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness) {
Label slow;
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
FPURegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
Label done;
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ Xor(src, src, Operand(0x80000000));
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ Xor(src, src, Operand(0x80000000));
+ }
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ } else {
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
}
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
+
if (FLAG_inline_new) {
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t1, a3, t0, t2, &slow);
@@ -4519,7 +4725,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
@@ -4540,12 +4746,13 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
a2,
Operand(boilerplate_elements_kind));
}
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(a3, literals);
__ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
+ __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
__ Push(a3, a2, a1);
// Pick the right runtime function or stub to call.
@@ -4640,8 +4847,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ li(a2, Operand(value_low));
@@ -4756,15 +4963,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // a3 = JS function.
// t3 = literals array.
// a1 = regexp literal.
// a0 = regexp literal clone.
// a2 and t0-t2 are used as temporaries.
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(t3, instr->hydrogen()->literals());
__ lw(a1, FieldMemOperand(t3, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&materialized, ne, a1, Operand(at));
@@ -5163,12 +5368,22 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
- __ LoadInstanceDescriptors(map, result);
+ Register scratch = ToRegister(instr->scratch());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+ __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ jmp(&done);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result, scratch);
__ lw(result,
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+
+ __ bind(&done);
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 32a696bc30..aeafbcd74b 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -43,26 +43,25 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
- Zone* zone)
- : chunk_(chunk),
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4, zone),
- deopt_jump_table_(4, zone),
- deoptimization_literals_(8, zone),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- translations_(zone),
- deferred_(8, zone),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
- safepoints_(zone),
- zone_(zone),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -111,7 +110,12 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -129,6 +133,15 @@ class LCodeGen BASE_EMBEDDED {
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset);
+
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -154,7 +167,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -175,7 +188,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -244,7 +257,8 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -299,6 +313,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -354,7 +372,8 @@ class LCodeGen BASE_EMBEDDED {
void EnsureSpaceForLazyDeopt();
- LChunk* const chunk_;
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -376,8 +395,6 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
- Zone* zone_;
-
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 842001ddf2..958bbc491a 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -407,24 +407,14 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index, zone());
@@ -434,120 +424,9 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
}
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap, zone());
- index = instructions_.length();
- instructions_.Add(instr, zone());
- } else {
- index = instructions_.length();
- instructions_.Add(instr, zone());
- instructions_.Add(gap, zone());
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map(), zone());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id(), zone());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(from, to, zone());
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -562,17 +441,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -741,7 +611,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -834,15 +704,20 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseRegisterAtStart(right_value);
}
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -975,8 +850,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@@ -1001,7 +876,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1417,6 +1294,25 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1582,6 +1478,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1599,12 +1501,12 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterAtStart(instr->index());
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@@ -1688,7 +1590,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LNumberTagU* result = new(zone()) LNumberTagU(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ } else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -1696,8 +1601,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else {
ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
+ }
}
}
UNREACHABLE();
@@ -1894,9 +1804,10 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
@@ -1906,7 +1817,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@@ -1926,7 +1838,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
(representation.IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
@@ -1954,7 +1867,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
@@ -1971,7 +1885,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@@ -1993,7 +1908,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
@@ -2249,7 +2165,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2275,7 +2191,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
@@ -2321,8 +2237,9 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
+ LOperand* scratch = TempRegister();
return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
+ new(zone()) LForInCacheArray(map, scratch)));
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index e21c921eec..367cf2a90c 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -108,6 +108,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -115,7 +116,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -132,10 +132,13 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -162,6 +165,7 @@ class LCodeGen;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -255,8 +259,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
@@ -268,6 +270,11 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator interface.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -288,7 +295,6 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
@@ -298,6 +304,9 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ virtual int InputCount() { return I; }
};
@@ -843,6 +852,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -977,6 +987,16 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
@@ -1048,6 +1068,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1557,6 +1589,16 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1567,6 +1609,16 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberTagU(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -2143,13 +2195,15 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LForInCacheArray(LOperand* map) {
+ explicit LForInCacheArray(LOperand* map, LOperand* scratch) {
inputs_[0] = map;
+ temps_[0] = scratch;
}
LOperand* map() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
@@ -2188,65 +2242,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure, zone());
- }
-
- Zone* zone() const { return graph_->zone(); }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2265,10 +2267,10 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
@@ -2283,7 +2285,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2293,7 +2295,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2384,7 +2386,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2396,7 +2398,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 51b3a3823f..7ded494999 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -361,28 +361,29 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
scratch, Operand(zero_reg));
#endif
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, "JSGlobalObject::native_context should be a native context.",
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
Branch(&same_contexts, eq, scratch, Operand(at));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
@@ -392,13 +393,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, "JSGlobalObject::native_context should be a native context.",
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
// Restore at to holder's context.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@@ -2559,7 +2560,7 @@ void MacroAssembler::Call(Address target,
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
@@ -2571,7 +2572,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
@@ -2580,7 +2581,7 @@ void MacroAssembler::Call(Handle<Code> code,
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
@@ -3911,7 +3912,8 @@ void MacroAssembler::CallStub(CodeStub* stub,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(),
+ cond, r1, r2, bd);
}
@@ -4281,7 +4283,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
lw(target, FieldMemOperand(target,
@@ -4450,8 +4452,9 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ lw(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
lw(scratch,
@@ -4459,7 +4462,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- Branch(no_map_match, ne, map_in_out, Operand(scratch));
+ lw(at, FieldMemOperand(scratch, offset));
+ Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
@@ -4495,11 +4499,12 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ lw(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
lw(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
lw(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -5289,55 +5294,63 @@ void MacroAssembler::EnsureNotWhite(
void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- lw(descriptors,
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi);
+ Register descriptors,
+ Register scratch) {
+ Register temp = descriptors;
+ lw(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset));
+
+ Label ok, fail;
+ CheckMap(temp,
+ scratch,
+ isolate()->factory()->fixed_array_map(),
+ &fail,
+ DONT_DO_SMI_CHECK);
+ lw(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
+ jmp(&ok);
+ bind(&fail);
LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
- bind(&not_smi);
+ bind(&ok);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
- // Preload a couple of values used in the loop.
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = t3;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- mov(a1, a0);
- bind(&next);
+ Label next, start;
+ mov(a2, a0);
- // Check that there are no elements. Register a1 contains the
- // current JS object we've reached through the prototype chain.
- lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+ EnumLength(a3, a1);
+ Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in a2 for the subsequent
- // prototype load.
- lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(a3, call_runtime);
+ jmp(&start);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (a3). This is the case if the next enumeration
- // index field does not contain a smi.
- lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(a3, call_runtime);
+ bind(&next);
+ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- Branch(&check_prototype, eq, a1, Operand(a0));
- lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
+ EnumLength(a3, a1);
+ Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+
+ bind(&start);
+
+ // Check that there are no elements. Register r2 contains the current JS
+ // object we've reached through the prototype chain.
+ lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
- Branch(&next, ne, a1, Operand(null_value));
+ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ Branch(&next, ne, a2, Operand(null_value));
}
@@ -5378,7 +5391,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// In 0-255 range, round and truncate.
bind(&in_bounds);
- round_w_d(temp_double_reg, input_reg);
+ cvt_w_d(temp_double_reg, input_reg);
mfc1(result_reg, temp_double_reg);
bind(&done);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index bb3dc01e39..2a77d6ce23 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -108,7 +108,7 @@ inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}
@@ -182,11 +182,11 @@ class MacroAssembler: public Assembler {
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -806,8 +806,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -1396,7 +1396,10 @@ class MacroAssembler: public Assembler {
DoubleRegister temp_double_reg);
- void LoadInstanceDescriptors(Register map, Register descriptors);
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors,
+ Register scratch);
+ void EnumLength(Register dst, Register map);
// Activation support.
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index d3fff0db2b..5446f52439 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -38,13 +38,7 @@
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerMIPS();
- virtual ~RegExpMacroAssemblerMIPS();
-};
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 66d0da71fa..cf87f93602 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -2068,10 +2068,15 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// Rounding modes are not yet supported.
ASSERT((FCSR_ & 3) == 0);
// In rounding mode 0 it should behave like ROUND.
- case ROUND_W_D: // Round double to word.
+ case ROUND_W_D: // Round double to word (round half to even).
{
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded = floor(fs + 0.5);
int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 967ce4a605..391f8e072b 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -270,11 +270,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
__ lw(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ __ lw(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ lw(prototype,
@@ -291,13 +292,14 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(prototype,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ASSERT(!prototype.is(at));
- __ li(at, isolate->global());
+ __ li(at, isolate->global_object());
__ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -1234,6 +1236,44 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lw(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ Branch(miss, ne, scratch2, Operand(callback));
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1241,6 +1281,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1251,6 +1292,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
@@ -1318,7 +1364,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
@@ -1391,7 +1437,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), v0, holder_reg,
@@ -1531,7 +1577,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2078,7 +2124,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2212,7 +2258,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2313,7 +2359,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2534,7 +2580,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2593,7 +2639,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2623,14 +2669,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2638,19 +2687,13 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- ra : return address
// -----------------------------------
Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(a1, a3, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(a1, a3, &miss);
- }
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(a1, &miss, a3);
+ CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
// Stub never generated for non-global objects that require access
// checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(a1); // Receiver.
__ li(a3, Operand(callback)); // Callback info.
@@ -2668,38 +2711,41 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<JSObject> receiver,
- Handle<JSFunction> setter,
- Handle<String> name) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
// -- a2 : name
// -- ra : return address
// -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
-
{
- FrameScope scope(masm(), StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(a0);
- // Call the JavaScript getter with the receiver and the value on the stack.
- __ push(a1);
- __ push(a0);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ push(a1);
+ __ push(a0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
// We have to return the passed value, not the return value of the setter.
__ pop(v0);
@@ -2708,13 +2754,38 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(a1, &miss);
+ CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2759,7 +2830,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2804,7 +2875,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2838,7 +2909,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2860,7 +2931,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2875,16 +2946,53 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- ra : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+ GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(a0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
}
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
@@ -2901,25 +3009,13 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
__ JumpIfSmi(a0, &miss);
CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
- {
- FrameScope scope(masm(), StackFrame::INTERNAL);
-
- // Call the JavaScript getter with the receiver on the stack.
- __ push(a0);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
+ GenerateLoadViaGetter(masm(), getter);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2939,7 +3035,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2962,7 +3058,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3003,7 +3099,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -3025,7 +3121,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -3044,12 +3140,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
// Check the key is the cached one.
__ Branch(&miss, ne, a0, Operand(name));
- GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
- &miss);
+ GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3073,7 +3169,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -3098,7 +3194,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3118,7 +3214,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3143,7 +3239,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3167,7 +3263,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3187,7 +3283,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3214,7 +3310,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3253,7 +3349,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -3277,7 +3375,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3315,7 +3413,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index c7f0dccb7b..a5331a014d 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -154,6 +154,7 @@ var FUNCTION_TYPE = 'function';
var REGEXP_TYPE = 'regexp';
var ERROR_TYPE = 'error';
var PROPERTY_TYPE = 'property';
+var INTERNAL_PROPERTY_TYPE = 'internalProperty';
var FRAME_TYPE = 'frame';
var SCRIPT_TYPE = 'script';
var CONTEXT_TYPE = 'context';
@@ -176,10 +177,8 @@ PropertyType.ConstantFunction = 2;
PropertyType.Callbacks = 3;
PropertyType.Handler = 4;
PropertyType.Interceptor = 5;
-PropertyType.MapTransition = 6;
-PropertyType.ExternalArrayTransition = 7;
-PropertyType.ConstantTransition = 8;
-PropertyType.NullDescriptor = 9;
+PropertyType.Transition = 6;
+PropertyType.Nonexistent = 7;
// Different attributes for a property.
@@ -214,6 +213,7 @@ var ScopeType = { Global: 0,
// - RegExpMirror
// - ErrorMirror
// - PropertyMirror
+// - InternalPropertyMirror
// - FrameMirror
// - ScriptMirror
@@ -360,6 +360,15 @@ Mirror.prototype.isProperty = function() {
/**
+ * Check whether the mirror reflects an internal property.
+ * @returns {boolean} True if the mirror reflects an internal property
+ */
+Mirror.prototype.isInternalProperty = function() {
+ return this instanceof InternalPropertyMirror;
+};
+
+
+/**
* Check whether the mirror reflects a stack frame.
* @returns {boolean} True if the mirror reflects a stack frame
*/
@@ -596,23 +605,6 @@ ObjectMirror.prototype.protoObject = function() {
};
-/**
- * Return the primitive value if this is object of Boolean, Number or String
- * type (but not Date). Otherwise return undefined.
- */
-ObjectMirror.prototype.primitiveValue = function() {
- if (!IS_STRING_WRAPPER(this.value_) && !IS_NUMBER_WRAPPER(this.value_) &&
- !IS_BOOLEAN_WRAPPER(this.value_)) {
- return void 0;
- }
- var primitiveValue = %_ValueOf(this.value_);
- if (IS_UNDEFINED(primitiveValue)) {
- return void 0;
- }
- return MakeMirror(primitiveValue);
-};
-
-
ObjectMirror.prototype.hasNamedInterceptor = function() {
// Get information on interceptors for this object.
var x = %GetInterceptorInfo(this.value_);
@@ -703,7 +695,7 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
* Return the properties for this object as an array of PropertyMirror objects.
* @param {number} kind Indicate whether named, indexed or both kinds of
* properties are requested
- * @param {number} limit Limit the number of properties returend to the
+ * @param {number} limit Limit the number of properties returned to the
specified value
* @return {Array} Property mirrors for this object
*/
@@ -718,6 +710,16 @@ ObjectMirror.prototype.properties = function(kind, limit) {
};
+/**
+ * Return the internal properties for this object as an array of
+ * InternalPropertyMirror objects.
+ * @return {Array} Property mirrors for this object
+ */
+ObjectMirror.prototype.internalProperties = function() {
+ return ObjectMirror.GetInternalProperties(this.value_);
+}
+
+
ObjectMirror.prototype.property = function(name) {
var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
if (details) {
@@ -792,6 +794,37 @@ ObjectMirror.prototype.toText = function() {
/**
+ * Return the internal properties of the value, such as [[PrimitiveValue]] of
+ * scalar wrapper objects and properties of the bound function.
+ * This method is done static to be accessible from Debug API with the bare
+ * values without mirrors.
+ * @return {Array} array (possibly empty) of InternalProperty instances
+ */
+ObjectMirror.GetInternalProperties = function(value) {
+ if (IS_STRING_WRAPPER(value) || IS_NUMBER_WRAPPER(value) ||
+ IS_BOOLEAN_WRAPPER(value)) {
+ var primitiveValue = %_ValueOf(value);
+ return [new InternalPropertyMirror("[[PrimitiveValue]]", primitiveValue)];
+ } else if (IS_FUNCTION(value)) {
+ var bindings = %BoundFunctionGetBindings(value);
+ var result = [];
+ if (bindings && IS_ARRAY(bindings)) {
+ result.push(new InternalPropertyMirror("[[TargetFunction]]",
+ bindings[0]));
+ result.push(new InternalPropertyMirror("[[BoundThis]]", bindings[1]));
+ var boundArgs = [];
+ for (var i = 2; i < bindings.length; i++) {
+ boundArgs.push(bindings[i]);
+ }
+ result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs));
+ }
+ return result;
+ }
+ return [];
+}
+
+
+/**
* Mirror object for functions.
* @param {function} value The function object reflected by this mirror.
* @constructor
@@ -1270,6 +1303,33 @@ PropertyMirror.prototype.isNative = function() {
};
+/**
+ * Mirror object for internal properties. Internal property reflects properties
+ * not accessible from user code such as [[BoundThis]] in bound function.
+ * Their names are merely symbolic.
+ * @param {string} name The name of the property
+ * @param {value} property value
+ * @constructor
+ * @extends Mirror
+ */
+function InternalPropertyMirror(name, value) {
+ %_CallFunction(this, INTERNAL_PROPERTY_TYPE, Mirror);
+ this.name_ = name;
+ this.value_ = value;
+}
+inherits(InternalPropertyMirror, Mirror);
+
+
+InternalPropertyMirror.prototype.name = function() {
+ return this.name_;
+};
+
+
+InternalPropertyMirror.prototype.value = function() {
+ return MakeMirror(this.value_, false);
+};
+
+
var kFrameDetailsFrameIdIndex = 0;
var kFrameDetailsReceiverIndex = 1;
var kFrameDetailsFunctionIndex = 2;
@@ -1750,6 +1810,15 @@ FrameMirror.prototype.localsText = function() {
};
+FrameMirror.prototype.restart = function() {
+ var result = %LiveEditRestartFrame(this.break_id_, this.index_);
+ if (IS_UNDEFINED(result)) {
+ result = "Failed to find requested frame";
+ }
+ return result;
+};
+
+
FrameMirror.prototype.toText = function(opt_locals) {
var result = '';
result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
@@ -2195,7 +2264,8 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
break;
case PROPERTY_TYPE:
- throw new Error('PropertyMirror cannot be serialized independeltly');
+ case INTERNAL_PROPERTY_TYPE:
+ throw new Error('PropertyMirror cannot be serialized independently');
break;
case FRAME_TYPE:
@@ -2271,7 +2341,8 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
* "prototypeObject":{"ref":<number>},
* "namedInterceptor":<boolean>,
* "indexedInterceptor":<boolean>,
- * "properties":[<properties>]}
+ * "properties":[<properties>],
+ * "internalProperties":[<internal properties>]}
*/
JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
details) {
@@ -2282,11 +2353,6 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
content.protoObject = this.serializeReference(mirror.protoObject());
content.prototypeObject = this.serializeReference(mirror.prototypeObject());
- var primitiveValue = mirror.primitiveValue();
- if (!IS_UNDEFINED(primitiveValue)) {
- content.primitiveValue = this.serializeReference(primitiveValue);
- }
-
// Add flags to indicate whether there are interceptors.
if (mirror.hasNamedInterceptor()) {
content.namedInterceptor = true;
@@ -2348,6 +2414,15 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
}
content.properties = p;
+
+ var internalProperties = mirror.internalProperties();
+ if (internalProperties.length > 0) {
+ var ip = [];
+ for (var i = 0; i < internalProperties.length; i++) {
+ ip.push(this.serializeInternalProperty_(internalProperties[i]));
+ }
+ content.internalProperties = ip;
+ }
};
@@ -2415,6 +2490,33 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
};
+/**
+ * Serialize internal property information to the following JSON format for
+ * building the array of properties.
+ *
+ * {"name":"<property name>",
+ * "ref":<number>}
+ *
+ * {"name":"[[BoundThis]]","ref":117}
+ *
+ * @param {InternalPropertyMirror} propertyMirror The property to serialize.
+ * @returns {Object} Protocol object representing the property.
+ */
+JSONProtocolSerializer.prototype.serializeInternalProperty_ =
+ function(propertyMirror) {
+ var result = {};
+
+ result.name = propertyMirror.name();
+ var propertyValue = propertyMirror.value();
+ if (this.inlineRefs_() && propertyValue.isValue()) {
+ result.value = this.serializeReferenceWithDisplayData_(propertyValue);
+ } else {
+ result.ref = propertyValue.handle();
+ }
+ return result;
+};
+
+
JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.index = mirror.index();
content.receiver = this.serializeReference(mirror.receiver());
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index e426a58092..275c8acc83 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <errno.h>
+#include <stdio.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include <bzlib.h>
#endif
@@ -33,6 +35,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "flags.h"
#include "natives.h"
#include "platform.h"
#include "serialize.h"
@@ -308,6 +311,62 @@ int main(int argc, char** argv) {
"\nException thrown while compiling natives - see above.\n\n");
exit(1);
}
+ if (i::FLAG_extra_code != NULL) {
+ context->Enter();
+ // Capture 100 frames if anything happens.
+ V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
+ HandleScope scope;
+ const char* name = i::FLAG_extra_code;
+ FILE* file = i::OS::FOpen(name, "rb");
+ if (file == NULL) {
+ fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
+ exit(1);
+ }
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
+ if (read < 0) {
+ fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
+ exit(1);
+ }
+ i += read;
+ }
+ fclose(file);
+ Local<String> source = String::New(chars);
+ TryCatch try_catch;
+ Local<Script> script = Script::Compile(source);
+ if (try_catch.HasCaught()) {
+ fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
+ exit(1);
+ }
+ script->Run();
+ if (try_catch.HasCaught()) {
+ fprintf(stderr, "Failure running '%s'\n", name);
+ Local<Message> message = try_catch.Message();
+ Local<String> message_string = message->Get();
+ Local<String> message_line = message->GetSourceLine();
+ int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
+ char* buf = new char(len);
+ message_string->WriteUtf8(buf);
+ fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
+ message_line->WriteUtf8(buf);
+ fprintf(stderr, "%s\n", buf);
+ int from = message->GetStartColumn();
+ int to = message->GetEndColumn();
+ int i;
+ for (i = 0; i < from; i++) fprintf(stderr, " ");
+ for ( ; i <= to; i++) fprintf(stderr, "^");
+ fprintf(stderr, "\n");
+ exit(1);
+ }
+ context->Exit();
+ }
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 5aac50319d..9761ed160b 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -302,8 +302,15 @@ void Map::MapVerify() {
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
+ DescriptorArray* descriptors = instance_descriptors();
+ for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
+ ASSERT_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
+ }
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
- SLOW_ASSERT(instance_descriptors()->IsConsistentWithBackPointers(this));
+ if (HasTransitionArray()) {
+ SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
+ SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
+ }
}
@@ -334,8 +341,8 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
- VerifyObjectField(kIcTotalCountOffset);
- VerifyObjectField(kIcWithTypeinfoCountOffset);
+ VerifyObjectField(kStorage1Offset);
+ VerifyObjectField(kStorage2Offset);
VerifyHeapPointer(type_feedback_cells());
}
@@ -371,11 +378,10 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
void JSModule::JSModuleVerify() {
- Object* v = context();
- if (v->IsHeapObject()) {
- VerifyHeapPointer(v);
- }
- CHECK(v->IsUndefined() || v->IsModuleContext());
+ VerifyObjectField(kContextOffset);
+ VerifyObjectField(kScopeInfoOffset);
+ CHECK(context()->IsUndefined() ||
+ Context::cast(context())->IsModuleContext());
}
@@ -502,6 +508,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
CHECK(IsSharedFunctionInfo());
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
+ VerifyObjectField(kOptimizedCodeMapOffset);
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kFunctionDataOffset);
@@ -513,7 +520,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
void JSGlobalProxy::JSGlobalProxyVerify() {
CHECK(IsJSGlobalProxy());
JSObjectVerify();
- VerifyObjectField(JSGlobalProxy::kContextOffset);
+ VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
CHECK(HasFastObjectElements());
@@ -898,13 +905,13 @@ bool DescriptorArray::IsSortedNoDuplicates() {
String* current_key = NULL;
uint32_t current = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
- String* key = GetKey(i);
+ String* key = GetSortedKey(i);
if (key == current_key) {
PrintDescriptors();
return false;
}
current_key = key;
- uint32_t hash = GetKey(i)->Hash();
+ uint32_t hash = GetSortedKey(i)->Hash();
if (hash < current) {
PrintDescriptors();
return false;
@@ -915,41 +922,39 @@ bool DescriptorArray::IsSortedNoDuplicates() {
}
+bool TransitionArray::IsSortedNoDuplicates() {
+ String* current_key = NULL;
+ uint32_t current = 0;
+ for (int i = 0; i < number_of_transitions(); i++) {
+ String* key = GetSortedKey(i);
+ if (key == current_key) {
+ PrintTransitions();
+ return false;
+ }
+ current_key = key;
+ uint32_t hash = GetSortedKey(i)->Hash();
+ if (hash < current) {
+ PrintTransitions();
+ return false;
+ }
+ current = hash;
+ }
+ return true;
+}
+
+
static bool CheckOneBackPointer(Map* current_map, Object* target) {
return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
}
-bool DescriptorArray::IsConsistentWithBackPointers(Map* current_map) {
- for (int i = 0; i < number_of_descriptors(); ++i) {
- switch (GetType(i)) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- if (!CheckOneBackPointer(current_map, GetValue(i))) {
- return false;
- }
- break;
- case CALLBACKS: {
- Object* object = GetValue(i);
- if (object->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(object);
- if (!CheckOneBackPointer(current_map, accessors->getter())) {
- return false;
- }
- if (!CheckOneBackPointer(current_map, accessors->setter())) {
- return false;
- }
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- break;
- }
+bool TransitionArray::IsConsistentWithBackPointers(Map* current_map) {
+ if (HasElementsTransition() &&
+ !CheckOneBackPointer(current_map, elements_transition())) {
+ return false;
+ }
+ for (int i = 0; i < number_of_transitions(); ++i) {
+ if (!CheckOneBackPointer(current_map, GetTarget(i))) return false;
}
return true;
}
@@ -996,17 +1001,16 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
}
-void Map::ZapInstanceDescriptors() {
- DescriptorArray* descriptors = instance_descriptors();
- if (descriptors == GetHeap()->empty_descriptor_array()) return;
- MemsetPointer(descriptors->data_start(),
+void Map::ZapTransitions() {
+ TransitionArray* transition_array = transitions();
+ MemsetPointer(transition_array->data_start(),
GetHeap()->the_hole_value(),
- descriptors->length());
+ transition_array->length());
}
void Map::ZapPrototypeTransitions() {
- FixedArray* proto_transitions = prototype_transitions();
+ FixedArray* proto_transitions = GetPrototypeTransitions();
MemsetPointer(proto_transitions->data_start(),
GetHeap()->the_hole_value(),
proto_transitions->length());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 4f66af28aa..3b9bb0a137 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -47,6 +47,7 @@
#include "v8memory.h"
#include "factory.h"
#include "incremental-marking.h"
+#include "transitions-inl.h"
namespace v8 {
namespace internal {
@@ -524,6 +525,11 @@ bool Object::IsDescriptorArray() {
}
+bool Object::IsTransitionArray() {
+ return IsFixedArray();
+}
+
+
bool Object::IsDeoptimizationInputData() {
// Must be a fixed array.
if (!IsFixedArray()) return false;
@@ -562,31 +568,23 @@ bool Object::IsTypeFeedbackCells() {
bool Object::IsContext() {
- if (Object::IsHeapObject()) {
- Map* map = HeapObject::cast(this)->map();
- Heap* heap = map->GetHeap();
- return (map == heap->function_context_map() ||
- map == heap->catch_context_map() ||
- map == heap->with_context_map() ||
- map == heap->global_context_map() ||
- map == heap->block_context_map() ||
- map == heap->module_context_map());
- }
- return false;
-}
-
-
-bool Object::IsGlobalContext() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->global_context_map();
+ if (!Object::IsHeapObject()) return false;
+ Map* map = HeapObject::cast(this)->map();
+ Heap* heap = map->GetHeap();
+ return (map == heap->function_context_map() ||
+ map == heap->catch_context_map() ||
+ map == heap->with_context_map() ||
+ map == heap->native_context_map() ||
+ map == heap->block_context_map() ||
+ map == heap->module_context_map() ||
+ map == heap->global_context_map());
}
-bool Object::IsModuleContext() {
+bool Object::IsNativeContext() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->module_context_map();
+ HeapObject::cast(this)->GetHeap()->native_context_map();
}
@@ -1333,8 +1331,8 @@ MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
ElementsKind from_kind = current_map->elements_kind();
if (from_kind == to_kind) return current_map;
- Context* global_context = isolate->context()->global_context();
- Object* maybe_array_maps = global_context->js_array_maps();
+ Context* native_context = isolate->context()->native_context();
+ Object* maybe_array_maps = native_context->js_array_maps();
if (maybe_array_maps->IsFixedArray()) {
FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
if (array_maps->get(from_kind) == current_map) {
@@ -1602,6 +1600,7 @@ void JSObject::InitializeBody(Map* map,
bool JSObject::HasFastProperties() {
+ ASSERT(properties()->IsDictionary() == map()->is_dictionary_map());
return !properties()->IsDictionary();
}
@@ -1665,6 +1664,23 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
}
+
+void Object::VerifyApiCallResultType() {
+#if ENABLE_EXTRA_CHECKS
+ if (!(IsSmi() ||
+ IsString() ||
+ IsSpecObject() ||
+ IsHeapNumber() ||
+ IsUndefined() ||
+ IsTrue() ||
+ IsFalse() ||
+ IsNull())) {
+ FATAL("API call returned invalid object");
+ }
+#endif // ENABLE_EXTRA_CHECKS
+}
+
+
FixedArrayBase* FixedArrayBase::cast(Object* object) {
ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
return reinterpret_cast<FixedArrayBase*>(object);
@@ -1863,7 +1879,7 @@ void FixedArray::set_unchecked(Heap* heap,
void FixedArray::set_null_unchecked(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(heap->null_value()));
+ ASSERT(!heap->InNewSpace(heap->null_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
}
@@ -1874,87 +1890,119 @@ Object** FixedArray::data_start() {
bool DescriptorArray::IsEmpty() {
- ASSERT(this->IsSmi() ||
- this->MayContainTransitions() ||
+ ASSERT(length() >= kFirstIndex ||
this == HEAP->empty_descriptor_array());
- return this->IsSmi() || length() < kFirstIndex;
+ return length() < kFirstIndex;
}
-bool DescriptorArray::MayContainTransitions() {
- return length() >= kTransitionsIndex;
-}
+// Perform a binary search in a fixed array. Low and high are entry indices. If
+// there are three entries in this array it should be called with low=0 and
+// high=2.
+template<typename T>
+int BinarySearch(T* array, String* name, int low, int high) {
+ uint32_t hash = name->Hash();
+ int limit = high;
+ ASSERT(low <= high);
-int DescriptorArray::bit_field3_storage() {
- Object* storage = READ_FIELD(this, kBitField3StorageOffset);
- return Smi::cast(storage)->value();
-}
+ while (low != high) {
+ int mid = (low + high) / 2;
+ String* mid_name = array->GetSortedKey(mid);
+ uint32_t mid_hash = mid_name->Hash();
-void DescriptorArray::set_bit_field3_storage(int value) {
- ASSERT(this->MayContainTransitions());
- WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
-}
+ if (mid_hash >= hash) {
+ high = mid;
+ } else {
+ low = mid + 1;
+ }
+ }
+ for (; low <= limit; ++low) {
+ int sort_index = array->GetSortedKeyIndex(low);
+ String* entry = array->GetKey(sort_index);
+ if (entry->Hash() != hash) break;
+ if (entry->Equals(name)) return sort_index;
+ }
-void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
- int first,
- int second) {
- Object* tmp = array->get(first);
- NoIncrementalWriteBarrierSet(array, first, array->get(second));
- NoIncrementalWriteBarrierSet(array, second, tmp);
+ return T::kNotFound;
}
+// Perform a linear search in this fixed array. len is the number of entry
+// indices that are valid.
+template<typename T>
+int LinearSearch(T* array, String* name, int len) {
+ uint32_t hash = name->Hash();
+ for (int number = 0; number < len; number++) {
+ int sorted_index = array->GetSortedKeyIndex(number);
+ String* entry = array->GetKey(sorted_index);
+ uint32_t current_hash = entry->Hash();
+ if (current_hash > hash) break;
+ if (current_hash == hash && entry->Equals(name)) return sorted_index;
+ }
+ return T::kNotFound;
+}
-int DescriptorArray::Search(String* name) {
- SLOW_ASSERT(IsSortedNoDuplicates());
- // Check for empty descriptor array.
- int nof = number_of_descriptors();
- if (nof == 0) return kNotFound;
+template<typename T>
+int Search(T* array, String* name) {
+ SLOW_ASSERT(array->IsSortedNoDuplicates());
+
+ int nof = array->number_of_entries();
+ if (nof == 0) return T::kNotFound;
// Fast case: do linear search for small arrays.
const int kMaxElementsForLinearSearch = 8;
- if (StringShape(name).IsSymbol() && nof < kMaxElementsForLinearSearch) {
- return LinearSearch(EXPECT_SORTED, name, nof);
+ if (nof < kMaxElementsForLinearSearch) {
+ return LinearSearch(array, name, nof);
}
// Slow case: perform binary search.
- return BinarySearch(name, 0, nof - 1);
+ return BinarySearch(array, name, 0, nof - 1);
+}
+
+
+int DescriptorArray::Search(String* name) {
+ return internal::Search(this, name);
}
int DescriptorArray::SearchWithCache(String* name) {
- int number = GetIsolate()->descriptor_lookup_cache()->Lookup(this, name);
+ if (number_of_descriptors() == 0) return kNotFound;
+
+ DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+ int number = cache->Lookup(this, name);
+
if (number == DescriptorLookupCache::kAbsent) {
number = Search(name);
- GetIsolate()->descriptor_lookup_cache()->Update(this, name, number);
+ cache->Update(this, name, number);
}
+
return number;
}
-Map* DescriptorArray::elements_transition_map() {
- if (!this->MayContainTransitions()) {
- return NULL;
- }
- Object* transition_map = get(kTransitionsIndex);
- if (transition_map == Smi::FromInt(0)) {
- return NULL;
- } else {
- return Map::cast(transition_map);
- }
+void Map::LookupDescriptor(JSObject* holder,
+ String* name,
+ LookupResult* result) {
+ DescriptorArray* descriptors = this->instance_descriptors();
+ int number = descriptors->SearchWithCache(name);
+ if (number == DescriptorArray::kNotFound) return result->NotFound();
+ result->DescriptorResult(holder, descriptors->GetDetails(number), number);
}
-void DescriptorArray::set_elements_transition_map(
- Map* transition_map, WriteBarrierMode mode) {
- ASSERT(this->length() > kTransitionsIndex);
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kTransitionsOffset, transition_map);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kTransitionsOffset, transition_map, mode);
- ASSERT(DescriptorArray::cast(this));
+void Map::LookupTransition(JSObject* holder,
+ String* name,
+ LookupResult* result) {
+ if (HasTransitionArray()) {
+ TransitionArray* transition_array = transitions();
+ int number = transition_array->Search(name);
+ if (number != TransitionArray::kNotFound) {
+ return result->TransitionResult(holder, number);
+ }
+ }
+ result->NotFound();
}
@@ -1972,6 +2020,23 @@ String* DescriptorArray::GetKey(int descriptor_number) {
}
+int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
+ return GetDetails(descriptor_number).pointer();
+}
+
+
+String* DescriptorArray::GetSortedKey(int descriptor_number) {
+ return GetKey(GetSortedKeyIndex(descriptor_number));
+}
+
+
+void DescriptorArray::SetSortedKey(int pointer, int descriptor_number) {
+ int details_index = ToDetailsIndex(pointer);
+ PropertyDetails details = PropertyDetails(Smi::cast(get(details_index)));
+ set_unchecked(details_index, details.set_pointer(descriptor_number).AsSmi());
+}
+
+
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
@@ -1986,12 +2051,6 @@ Object* DescriptorArray::GetValue(int descriptor_number) {
}
-void DescriptorArray::SetNullValueUnchecked(int descriptor_number, Heap* heap) {
- ASSERT(descriptor_number < number_of_descriptors());
- set_null_unchecked(heap, ToValueIndex(descriptor_number));
-}
-
-
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
Object* details = get(ToDetailsIndex(descriptor_number));
@@ -1999,12 +2058,6 @@ PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
}
-void DescriptorArray::SetDetailsUnchecked(int descriptor_number, Smi* value) {
- ASSERT(descriptor_number < number_of_descriptors());
- set_unchecked(ToDetailsIndex(descriptor_number), value);
-}
-
-
PropertyType DescriptorArray::GetType(int descriptor_number) {
return GetDetails(descriptor_number).type();
}
@@ -2033,41 +2086,6 @@ AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
}
-bool DescriptorArray::IsProperty(int descriptor_number) {
- Entry entry(this, descriptor_number);
- return IsPropertyDescriptor(&entry);
-}
-
-
-bool DescriptorArray::IsTransitionOnly(int descriptor_number) {
- switch (GetType(descriptor_number)) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- return true;
- case CALLBACKS: {
- Object* value = GetValue(descriptor_number);
- if (!value->IsAccessorPair()) return false;
- AccessorPair* accessors = AccessorPair::cast(value);
- return accessors->getter()->IsMap() && accessors->setter()->IsMap();
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // Keep the compiler happy.
- return false;
-}
-
-
-bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
- return GetType(descriptor_number) == NULL_DESCRIPTOR;
-}
-
-
void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
desc->Init(GetKey(descriptor_number),
GetValue(descriptor_number),
@@ -2080,6 +2098,9 @@ void DescriptorArray::Set(int descriptor_number,
const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
+ ASSERT(desc->GetDetails().descriptor_index() <=
+ number_of_descriptors());
+ ASSERT(desc->GetDetails().descriptor_index() > 0);
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
@@ -2093,28 +2114,42 @@ void DescriptorArray::Set(int descriptor_number,
}
-void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
- int first, int second) {
- NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
- NoIncrementalWriteBarrierSwap(this,
- ToValueIndex(first),
- ToValueIndex(second));
- NoIncrementalWriteBarrierSwap(this,
- ToDetailsIndex(first),
- ToDetailsIndex(second));
+void DescriptorArray::Append(Descriptor* desc,
+ const WhitenessWitness& witness,
+ int number_of_set_descriptors) {
+ int enumeration_index = number_of_set_descriptors + 1;
+ desc->SetEnumerationIndex(enumeration_index);
+ Set(number_of_set_descriptors, desc, witness);
+
+ uint32_t hash = desc->GetKey()->Hash();
+
+ int insertion;
+
+ for (insertion = number_of_set_descriptors; insertion > 0; --insertion) {
+ String* key = GetSortedKey(insertion - 1);
+ if (key->Hash() <= hash) break;
+ SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
+ }
+
+ SetSortedKey(insertion, number_of_set_descriptors);
+}
+
+
+void DescriptorArray::SwapSortedKeys(int first, int second) {
+ int first_key = GetSortedKeyIndex(first);
+ SetSortedKey(first, GetSortedKeyIndex(second));
+ SetSortedKey(second, first_key);
}
-DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
+FixedArray::WhitenessWitness::WhitenessWitness(FixedArray* array)
: marking_(array->GetHeap()->incremental_marking()) {
marking_->EnterNoMarkingScope();
- if (array->number_of_descriptors() > 0) {
- ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
- }
+ ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
}
-DescriptorArray::WhitenessWitness::~WhitenessWitness() {
+FixedArray::WhitenessWitness::~WhitenessWitness() {
marking_->LeaveNoMarkingScope();
}
@@ -2409,9 +2444,10 @@ String* SlicedString::parent() {
}
-void SlicedString::set_parent(String* parent) {
+void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
ASSERT(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
}
@@ -2915,16 +2951,12 @@ bool Map::has_non_instance_prototype() {
void Map::set_function_with_prototype(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kFunctionWithPrototype));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kFunctionWithPrototype));
- }
+ set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
}
bool Map::function_with_prototype() {
- return ((1 << kFunctionWithPrototype) & bit_field3()) != 0;
+ return FunctionWithPrototype::decode(bit_field3());
}
@@ -2969,15 +3001,22 @@ bool Map::attached_to_shared_function_info() {
void Map::set_is_shared(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kIsShared));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kIsShared));
- }
+ set_bit_field3(IsShared::update(bit_field3(), value));
}
+
bool Map::is_shared() {
- return ((1 << kIsShared) & bit_field3()) != 0;
+ return IsShared::decode(bit_field3());
+}
+
+
+void Map::set_dictionary_map(bool value) {
+ set_bit_field3(DictionaryMap::update(bit_field3(), value));
+}
+
+
+bool Map::is_dictionary_map() {
+ return DictionaryMap::decode(bit_field3());
}
@@ -3025,7 +3064,7 @@ Code::ExtraICState Code::extra_ic_state() {
}
-PropertyType Code::type() {
+Code::StubType Code::type() {
return ExtractTypeFromFlags(flags());
}
@@ -3042,7 +3081,8 @@ int Code::major_key() {
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == TO_BOOLEAN_IC);
- return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
+ return StubMajorKeyField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
@@ -3053,7 +3093,9 @@ void Code::set_major_key(int major) {
kind() == COMPARE_IC ||
kind() == TO_BOOLEAN_IC);
ASSERT(0 <= major && major < 256);
- WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = StubMajorKeyField::update(previous, major);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
@@ -3155,39 +3197,50 @@ void Code::set_profiler_ticks(int ticks) {
unsigned Code::stack_slots() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kStackSlotsOffset);
+ return StackSlotsField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_stack_slots(unsigned slots) {
+ CHECK(slots <= (1 << kStackSlotsBitCount));
ASSERT(kind() == OPTIMIZED_FUNCTION);
- WRITE_UINT32_FIELD(this, kStackSlotsOffset, slots);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = StackSlotsField::update(previous, slots);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
+ return SafepointTableOffsetField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_safepoint_table_offset(unsigned offset) {
+ CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = SafepointTableOffsetField::update(previous, offset);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
unsigned Code::stack_check_table_offset() {
ASSERT_EQ(FUNCTION, kind());
- return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
+ return StackCheckTableOffsetField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_stack_check_table_offset(unsigned offset) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = StackCheckTableOffsetField::update(previous, offset);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
@@ -3206,85 +3259,106 @@ void Code::set_check_type(CheckType value) {
byte Code::unary_op_type() {
ASSERT(is_unary_op_stub());
- return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
+ return UnaryOpTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_unary_op_type(byte value) {
ASSERT(is_unary_op_stub());
- WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = UnaryOpTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::binary_op_type() {
ASSERT(is_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+ return BinaryOpTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_binary_op_type(byte value) {
ASSERT(is_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = BinaryOpTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::binary_op_result_type() {
ASSERT(is_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
+ return BinaryOpResultTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_binary_op_result_type(byte value) {
ASSERT(is_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = BinaryOpResultTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::compare_state() {
ASSERT(is_compare_ic_stub());
- return READ_BYTE_FIELD(this, kCompareStateOffset);
+ return CompareStateField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_compare_state(byte value) {
ASSERT(is_compare_ic_stub());
- WRITE_BYTE_FIELD(this, kCompareStateOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CompareStateField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::compare_operation() {
ASSERT(is_compare_ic_stub());
- return READ_BYTE_FIELD(this, kCompareOperationOffset);
+ return CompareOperationField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_compare_operation(byte value) {
ASSERT(is_compare_ic_stub());
- WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CompareOperationField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
- return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
+ return ToBooleanStateField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_to_boolean_state(byte value) {
ASSERT(is_to_boolean_ic_stub());
- WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = ToBooleanStateField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
bool Code::has_function_cache() {
ASSERT(kind() == STUB);
- return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+ return HasFunctionCacheField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_has_function_cache(bool flag) {
ASSERT(kind() == STUB);
- WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = HasFunctionCacheField::update(previous, flag);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
@@ -3297,7 +3371,7 @@ bool Code::is_inline_cache_stub() {
Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
- PropertyType type,
+ StubType type,
int argc,
InlineCacheHolderFlag holder) {
// Extra IC state is only allowed for call IC stubs or for store IC
@@ -3318,7 +3392,7 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- PropertyType type,
+ StubType type,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
int argc) {
@@ -3341,7 +3415,7 @@ Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
-PropertyType Code::ExtractTypeFromFlags(Flags flags) {
+Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
@@ -3392,159 +3466,234 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
DescriptorArray* Map::instance_descriptors() {
- Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- return GetHeap()->empty_descriptor_array();
- } else {
- return DescriptorArray::cast(object);
- }
+ if (!HasTransitionArray()) return GetHeap()->empty_descriptor_array();
+ return transitions()->descriptors();
}
-void Map::init_instance_descriptors() {
- WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, Smi::FromInt(0));
+// If the descriptor is using the empty transition array, install a new empty
+// transition array that will have place for an element transition.
+static MaybeObject* EnsureHasTransitionArray(Map* map) {
+ if (map->HasTransitionArray()) return map;
+
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions = TransitionArray::Allocate(0);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ map->set_transitions(transitions);
+ return transitions;
}
-void Map::clear_instance_descriptors() {
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- if (!object->IsSmi()) {
-#ifdef DEBUG
- ZapInstanceDescriptors();
-#endif
- WRITE_FIELD(
- this,
- kInstanceDescriptorsOrBitField3Offset,
- Smi::FromInt(DescriptorArray::cast(object)->bit_field3_storage()));
- }
+MaybeObject* Map::SetDescriptors(DescriptorArray* value,
+ WriteBarrierMode mode) {
+ ASSERT(!is_shared());
+ MaybeObject* maybe_failure = EnsureHasTransitionArray(this);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ transitions()->set_descriptors(value, mode);
+ return this;
}
-void Map::set_instance_descriptors(DescriptorArray* value,
- WriteBarrierMode mode) {
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- Heap* heap = GetHeap();
- if (value == heap->empty_descriptor_array()) {
- clear_instance_descriptors();
- return;
- } else {
- if (object->IsSmi()) {
- value->set_bit_field3_storage(Smi::cast(object)->value());
- } else {
- value->set_bit_field3_storage(
- DescriptorArray::cast(object)->bit_field3_storage());
- }
- }
- ASSERT(!is_shared());
+MaybeObject* Map::InitializeDescriptors(DescriptorArray* descriptors) {
#ifdef DEBUG
- if (value != instance_descriptors()) {
- ZapInstanceDescriptors();
+ int len = descriptors->number_of_descriptors();
+ ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
+ SLOW_ASSERT(descriptors->IsSortedNoDuplicates());
+
+ bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
+ for (int i = 0; i < len; ++i) used_indices[i] = false;
+
+ // Ensure that all enumeration indexes between 1 and length occur uniquely in
+ // the descriptor array.
+ for (int i = 0; i < len; ++i) {
+ int enum_index = descriptors->GetDetails(i).descriptor_index() -
+ PropertyDetails::kInitialIndex;
+ ASSERT(0 <= enum_index && enum_index < len);
+ ASSERT(!used_indices[enum_index]);
+ used_indices[enum_index] = true;
}
#endif
- WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
+
+ MaybeObject* maybe_failure = SetDescriptors(descriptors);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ SetNumberOfOwnDescriptors(descriptors->number_of_descriptors());
+
+ return this;
}
-int Map::bit_field3() {
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- return Smi::cast(object)->value();
+SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
+
+
+void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
+ Object* back_pointer = GetBackPointer();
+#ifdef DEBUG
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ if (object->IsTransitionArray()) {
+ ZapTransitions();
} else {
- return DescriptorArray::cast(object)->bit_field3_storage();
+ ASSERT(object->IsMap() || object->IsUndefined());
}
+#endif
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, back_pointer);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kTransitionsOrBackPointerOffset, back_pointer, mode);
}
-void Map::set_bit_field3(int value) {
- ASSERT(Smi::IsValid(value));
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- WRITE_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset,
- Smi::FromInt(value));
- } else {
- DescriptorArray::cast(object)->set_bit_field3_storage(value);
- }
+void Map::AppendDescriptor(Descriptor* desc,
+ const DescriptorArray::WhitenessWitness& witness) {
+ DescriptorArray* descriptors = instance_descriptors();
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ ASSERT(number_of_own_descriptors < descriptors->number_of_descriptors());
+ descriptors->Append(desc, witness, number_of_own_descriptors);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
}
Object* Map::GetBackPointer() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- return FixedArray::cast(object)->get(kProtoTransitionBackPointerOffset);
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ if (object->IsDescriptorArray()) {
+ return TransitionArray::cast(object)->back_pointer_storage();
} else {
+ ASSERT(object->IsMap() || object->IsUndefined());
return object;
}
}
+bool Map::HasElementsTransition() {
+ return HasTransitionArray() && transitions()->HasElementsTransition();
+}
+
+
+bool Map::HasTransitionArray() {
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ return object->IsTransitionArray();
+}
+
+
Map* Map::elements_transition_map() {
- return instance_descriptors()->elements_transition_map();
+ return transitions()->elements_transition();
}
-void Map::set_elements_transition_map(Map* transitioned_map) {
- return instance_descriptors()->set_elements_transition_map(transitioned_map);
+bool Map::CanHaveMoreTransitions() {
+ if (!HasTransitionArray()) return true;
+ return FixedArray::SizeFor(transitions()->length() +
+ TransitionArray::kTransitionSize)
+ <= Page::kMaxNonCodeHeapObjectSize;
}
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- FixedArray::cast(object)->set(
- kProtoTransitionBackPointerOffset, value, mode);
- } else {
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
- }
+MaybeObject* Map::AddTransition(String* key, Map* target) {
+ if (HasTransitionArray()) return transitions()->CopyInsert(key, target);
+ return TransitionArray::NewWith(key, target);
}
-FixedArray* Map::prototype_transitions() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- return FixedArray::cast(object);
- } else {
+void Map::SetTransition(int transition_index, Map* target) {
+ transitions()->SetTarget(transition_index, target);
+}
+
+
+MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
+ MaybeObject* allow_elements = EnsureHasTransitionArray(this);
+ if (allow_elements->IsFailure()) return allow_elements;
+ transitions()->set_elements_transition(transitioned_map);
+ return this;
+}
+
+
+FixedArray* Map::GetPrototypeTransitions() {
+ if (!HasTransitionArray()) return GetHeap()->empty_fixed_array();
+ if (!transitions()->HasPrototypeTransitions()) {
return GetHeap()->empty_fixed_array();
}
+ return transitions()->GetPrototypeTransitions();
}
-void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- ASSERT(value != heap->empty_fixed_array());
- value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
+MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
+ MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
+ if (allow_prototype->IsFailure()) return allow_prototype;
#ifdef DEBUG
- if (value != prototype_transitions()) {
+ if (HasPrototypeTransitions()) {
+ ASSERT(GetPrototypeTransitions() != proto_transitions);
ZapPrototypeTransitions();
}
#endif
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
+ transitions()->SetPrototypeTransitions(proto_transitions);
+ return this;
+}
+
+
+bool Map::HasPrototypeTransitions() {
+ return HasTransitionArray() && transitions()->HasPrototypeTransitions();
+}
+
+
+TransitionArray* Map::transitions() {
+ ASSERT(HasTransitionArray());
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ return TransitionArray::cast(object);
+}
+
+
+void Map::set_transitions(TransitionArray* transition_array,
+ WriteBarrierMode mode) {
+ transition_array->set_descriptors(instance_descriptors());
+ transition_array->set_back_pointer_storage(GetBackPointer());
+#ifdef DEBUG
+ if (HasTransitionArray()) {
+ ASSERT(transitions() != transition_array);
+ ZapTransitions();
+ }
+#endif
+
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, transition_array);
CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
+ GetHeap(), this, kTransitionsOrBackPointerOffset, transition_array, mode);
}
-void Map::init_prototype_transitions(Object* undefined) {
+void Map::init_back_pointer(Object* undefined) {
ASSERT(undefined->IsUndefined());
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, undefined);
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined);
}
-HeapObject* Map::unchecked_prototype_transitions() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- return reinterpret_cast<HeapObject*>(object);
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+ ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+ ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+ (value->IsMap() && GetBackPointer()->IsUndefined()));
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ if (object->IsTransitionArray()) {
+ TransitionArray::cast(object)->set_back_pointer_storage(value);
+ } else {
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, value);
+ CONDITIONAL_WRITE_BARRIER(
+ GetHeap(), this, kTransitionsOrBackPointerOffset, value, mode);
+ }
+}
+
+
+// Can either be Smi (no transitions), normal transition array, or a transition
+// array with the header overwritten as a Smi (thus iterating).
+TransitionArray* Map::unchecked_transition_array() {
+ Object* object = *HeapObject::RawField(this,
+ Map::kTransitionsOrBackPointerOffset);
+ TransitionArray* transition_array = static_cast<TransitionArray*>(object);
+ return transition_array;
+}
+
+
+HeapObject* Map::UncheckedPrototypeTransitions() {
+ ASSERT(HasTransitionArray());
+ ASSERT(unchecked_transition_array()->HasPrototypeTransitions());
+ return unchecked_transition_array()->UncheckedPrototypeTransitions();
}
@@ -3556,10 +3705,11 @@ ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
+ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
-ACCESSORS(JSGlobalProxy, context, Object, kContextOffset)
+ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
@@ -3648,6 +3798,8 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
+ kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
@@ -3682,6 +3834,10 @@ BOOL_ACCESSORS(SharedFunctionInfo,
kAllowLazyCompilation)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
+ allows_lazy_compilation_without_context,
+ kAllowLazyCompilationWithoutContext)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
uses_arguments,
kUsesArguments)
BOOL_ACCESSORS(SharedFunctionInfo,
@@ -3858,6 +4014,18 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
kDontOptimize)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
+
+void SharedFunctionInfo::BeforeVisitingPointers() {
+ if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
+
+ // Flush optimized code map on major GC.
+ // Note: we may experiment with rebuilding it or retaining entries
+ // which should survive as we iterate through optimized functions
+ // anyway.
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -4025,7 +4193,7 @@ void SharedFunctionInfo::TryReenableOptimization() {
bool JSFunction::IsBuiltin() {
- return context()->global()->IsJSBuiltinsObject();
+ return context()->global_object()->IsJSBuiltinsObject();
}
@@ -4050,6 +4218,18 @@ bool JSFunction::IsMarkedForLazyRecompilation() {
}
+bool JSFunction::IsMarkedForParallelRecompilation() {
+ return code() ==
+ GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
+}
+
+
+bool JSFunction::IsInRecompileQueue() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kInRecompileQueue);
+}
+
+
Code* JSFunction::code() {
return Code::cast(unchecked_code());
}
@@ -4081,10 +4261,10 @@ void JSFunction::ReplaceCode(Code* code) {
// Add/remove the function from the list of optimized functions for this
// context based on the state change.
if (!was_optimized && is_optimized) {
- context()->global_context()->AddOptimizedFunction(this);
+ context()->native_context()->AddOptimizedFunction(this);
}
if (was_optimized && !is_optimized) {
- context()->global_context()->RemoveOptimizedFunction(this);
+ context()->native_context()->RemoveOptimizedFunction(this);
}
}
@@ -4127,12 +4307,12 @@ void JSFunction::set_initial_map(Map* value) {
MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
Map* initial_map) {
- Context* global_context = context()->global_context();
+ Context* native_context = context()->native_context();
Object* array_function =
- global_context->get(Context::ARRAY_FUNCTION_INDEX);
+ native_context->get(Context::ARRAY_FUNCTION_INDEX);
if (array_function->IsJSFunction() &&
this == JSFunction::cast(array_function)) {
- // Replace all of the cached initial array maps in the global context with
+ // Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
Heap* heap = GetHeap();
MaybeObject* maybe_maps =
@@ -4149,12 +4329,12 @@ MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
Map* new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
MaybeObject* maybe_new_map =
- current_map->CreateNextElementsTransition(next_kind);
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
maps->set(next_kind, new_map);
current_map = new_map;
}
- global_context->set_js_array_maps(maps);
+ native_context->set_js_array_maps(maps);
}
set_initial_map(initial_map);
return this;
@@ -4297,6 +4477,7 @@ void Foreign::set_foreign_address(Address value) {
ACCESSORS(JSModule, context, Object, kContextOffset)
+ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
JSModule* JSModule::cast(Object* obj) {
@@ -4740,7 +4921,12 @@ bool String::AsArrayIndex(uint32_t* index) {
Object* JSReceiver::GetPrototype() {
- return HeapObject::cast(this)->map()->prototype();
+ return map()->prototype();
+}
+
+
+Object* JSReceiver::GetConstructor() {
+ return map()->constructor();
}
@@ -4852,7 +5038,9 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
Object* value,
PropertyDetails details) {
- ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
+ ASSERT(!key->IsString() ||
+ details.IsDeleted() ||
+ details.dictionary_index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
AssertNoAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
@@ -5014,13 +5202,13 @@ MaybeObject* FixedDoubleArray::Copy() {
}
-void TypeFeedbackCells::SetAstId(int index, Smi* id) {
- set(1 + index * 2, id);
+void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
+ set(1 + index * 2, Smi::FromInt(id.ToInt()));
}
-Smi* TypeFeedbackCells::AstId(int index) {
- return Smi::cast(get(1 + index * 2));
+TypeFeedbackId TypeFeedbackCells::AstId(int index) {
+ return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
}
@@ -5049,9 +5237,84 @@ Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
}
-SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
-SMI_ACCESSORS(TypeFeedbackInfo, ic_with_type_info_count,
- kIcWithTypeinfoCountOffset)
+int TypeFeedbackInfo::ic_total_count() {
+ int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ return ICTotalCountField::decode(current);
+}
+
+
+void TypeFeedbackInfo::set_ic_total_count(int count) {
+ int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ value = ICTotalCountField::update(value,
+ ICTotalCountField::decode(count));
+ WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
+}
+
+
+int TypeFeedbackInfo::ic_with_type_info_count() {
+ int current = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ return ICsWithTypeInfoCountField::decode(current);
+}
+
+
+void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
+ int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
+ // We can get negative count here when the type-feedback info is
+ // shared between two code objects. The can only happen when
+ // the debugger made a shallow copy of code object (see Heap::CopyCode).
+ // Since we do not optimize when the debugger is active, we can skip
+ // this counter update.
+ if (new_count >= 0) {
+ new_count &= ICsWithTypeInfoCountField::kMask;
+ value = ICsWithTypeInfoCountField::update(value, new_count);
+ WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
+ }
+}
+
+
+void TypeFeedbackInfo::initialize_storage() {
+ WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
+ WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
+}
+
+
+void TypeFeedbackInfo::change_own_type_change_checksum() {
+ int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ int checksum = OwnTypeChangeChecksum::decode(value);
+ checksum = (checksum + 1) % (1 << kTypeChangeChecksumBits);
+ value = OwnTypeChangeChecksum::update(value, checksum);
+ // Ensure packed bit field is in Smi range.
+ if (value > Smi::kMaxValue) value |= Smi::kMinValue;
+ if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
+ WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
+}
+
+
+void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
+ int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int mask = (1 << kTypeChangeChecksumBits) - 1;
+ value = InlinedTypeChangeChecksum::update(value, checksum & mask);
+ // Ensure packed bit field is in Smi range.
+ if (value > Smi::kMaxValue) value |= Smi::kMinValue;
+ if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
+ WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
+}
+
+
+int TypeFeedbackInfo::own_type_change_checksum() {
+ int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ return OwnTypeChangeChecksum::decode(value);
+}
+
+
+bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
+ int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int mask = (1 << kTypeChangeChecksumBits) - 1;
+ return InlinedTypeChangeChecksum::decode(value) == (checksum & mask);
+}
+
+
ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
@@ -5121,14 +5384,13 @@ void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
template<int start_offset, int end_offset, int size>
void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
HeapObject* obj,
ObjectVisitor* v) {
- v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, end_offset));
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
}
@@ -5136,10 +5398,10 @@ template<int start_offset>
void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
int object_size,
ObjectVisitor* v) {
- v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, object_size));
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, object_size));
}
-#undef SLOT_ADDR
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index b886168991..1ba0bb0d09 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -273,18 +273,12 @@ void JSObject::PrintProperties(FILE* out) {
descs->GetCallbacksObject(i)->ShortPrint(out);
PrintF(out, " (callback)\n");
break;
- case MAP_TRANSITION:
- PrintF(out, "(map transition)\n");
- break;
- case CONSTANT_TRANSITION:
- PrintF(out, "(constant transition)\n");
- break;
- case NULL_DESCRIPTOR:
- PrintF(out, "(null descriptor)\n");
- break;
case NORMAL: // only in slow mode
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
+ // There are no transitions in the descriptor array.
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -410,6 +404,37 @@ void JSObject::PrintElements(FILE* out) {
}
+void JSObject::PrintTransitions(FILE* out) {
+ if (!map()->HasTransitionArray()) return;
+ TransitionArray* transitions = map()->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ PrintF(out, " ");
+ transitions->GetKey(i)->StringPrint(out);
+ PrintF(out, ": ");
+ switch (transitions->GetTargetDetails(i).type()) {
+ case FIELD: {
+ PrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ PrintF(out, " (transition to constant function)\n");
+ break;
+ case CALLBACKS:
+ PrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void JSObject::JSObjectPrint(FILE* out) {
PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
@@ -419,11 +444,9 @@ void JSObject::JSObjectPrint(FILE* out) {
PrintF(out,
"]\n - prototype = %p\n",
reinterpret_cast<void*>(GetPrototype()));
- PrintF(out,
- " - elements transition to = %p\n",
- reinterpret_cast<void*>(map()->elements_transition_map()));
PrintF(out, " {\n");
PrintProperties(out);
+ PrintTransitions(out);
PrintElements(out);
PrintF(out, " }\n");
}
@@ -434,6 +457,8 @@ void JSModule::JSModulePrint(FILE* out) {
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - context = ");
context()->Print(out);
+ PrintF(out, " - scope_info = ");
+ scope_info()->ShortPrint(out);
PrintElementsKind(out, this->map()->elements_kind());
PrintF(out, " {\n");
PrintProperties(out);
@@ -539,6 +564,10 @@ void Map::MapPrint(FILE* out) {
}
PrintF(out, " - instance descriptors: ");
instance_descriptors()->ShortPrint(out);
+ if (HasTransitionArray()) {
+ PrintF(out, "\n - transitions: ");
+ transitions()->ShortPrint(out);
+ }
PrintF(out, "\n - prototype: ");
prototype()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
@@ -567,9 +596,9 @@ void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
- PrintF(out, "\n - ic_total_count: %d, ic_with_type_info_count: %d",
+ PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, "\n - type_feedback_cells: ");
+ PrintF(out, " - type_feedback_cells: ");
type_feedback_cells()->FixedArrayPrint(out);
}
@@ -741,6 +770,8 @@ void JSFunction::JSFunctionPrint(FILE* out) {
shared()->name()->Print(out);
PrintF(out, "\n - context = ");
unchecked_context()->ShortPrint(out);
+ PrintF(out, "\n - literals = ");
+ literals()->ShortPrint(out);
PrintF(out, "\n - code = ");
code()->ShortPrint(out);
PrintF(out, "\n");
@@ -761,8 +792,17 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
instance_class_name()->Print(out);
PrintF(out, "\n - code = ");
code()->ShortPrint(out);
- PrintF(out, "\n - source code = ");
- GetSourceCode()->ShortPrint(out);
+ if (HasSourceCode()) {
+ PrintF(out, "\n - source code = ");
+ String* source = String::cast(Script::cast(script())->source());
+ int start = start_position();
+ int length = end_position() - start;
+ SmartArrayPointer<char> source_string =
+ source->ToCString(DISALLOW_NULLS,
+ FAST_STRING_TRAVERSAL,
+ start, length, NULL);
+ PrintF(out, "%s", *source_string);
+ }
// Script files are often large, hard to read.
// PrintF(out, "\n - script =");
// script()->Print(out);
@@ -782,10 +822,10 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
- PrintF(out, "global_proxy");
+ PrintF(out, "global_proxy ");
JSObjectPrint(out);
- PrintF(out, "context : ");
- context()->ShortPrint(out);
+ PrintF(out, "native context : ");
+ native_context()->ShortPrint(out);
PrintF(out, "\n");
}
@@ -793,8 +833,8 @@ void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
void JSGlobalObject::JSGlobalObjectPrint(FILE* out) {
PrintF(out, "global ");
JSObjectPrint(out);
- PrintF(out, "global context : ");
- global_context()->ShortPrint(out);
+ PrintF(out, "native context : ");
+ native_context()->ShortPrint(out);
PrintF(out, "\n");
}
@@ -1022,6 +1062,37 @@ void DescriptorArray::PrintDescriptors(FILE* out) {
}
+void TransitionArray::PrintTransitions(FILE* out) {
+ PrintF(out, "Transition array %d\n", number_of_transitions());
+ for (int i = 0; i < number_of_transitions(); i++) {
+ PrintF(out, " %d: ", i);
+ GetKey(i)->StringPrint(out);
+ PrintF(out, ": ");
+ switch (GetTargetDetails(i).type()) {
+ case FIELD: {
+ PrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ PrintF(out, " (transition to constant function)\n");
+ break;
+ case CALLBACKS:
+ PrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
+ }
+ PrintF(out, "\n");
+}
+
+
#endif // OBJECT_PRINT
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 8ba92f70c9..856ae06b7b 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -56,7 +56,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
- table_.Register(kVisitGlobalContext,
+ table_.Register(kVisitNativeContext,
&FixedBodyVisitor<StaticVisitor,
Context::ScavengeBodyDescriptor,
int>::Visit);
@@ -93,6 +93,182 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
}
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::Initialize() {
+ table_.Register(kVisitShortcutCandidate,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitConsString,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitSlicedString,
+ &FixedBodyVisitor<StaticVisitor,
+ SlicedString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitNativeContext, &VisitNativeContext);
+
+ table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitJSWeakMap);
+
+ table_.Register(kVisitOddball,
+ &FixedBodyVisitor<StaticVisitor,
+ Oddball::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitMap,
+ &FixedBodyVisitor<StaticVisitor,
+ Map::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitCode, &StaticVisitor::VisitCode);
+
+ // Registration for kVisitSharedFunctionInfo is done by StaticVisitor.
+
+ // Registration for kVisitJSFunction is done by StaticVisitor.
+
+ // Registration for kVisitJSRegExp is done by StaticVisitor.
+
+ table_.Register(kVisitPropertyCell,
+ &FixedBodyVisitor<StaticVisitor,
+ JSGlobalPropertyCell::BodyDescriptor,
+ void>::Visit);
+
+ table_.template RegisterSpecializations<DataObjectVisitor,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
+
+ table_.template RegisterSpecializations<JSObjectVisitor,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+
+ table_.template RegisterSpecializations<StructObjectVisitor,
+ kVisitStruct,
+ kVisitStructGeneric>();
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
+ Heap* heap, Address entry_address) {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
+ StaticVisitor::MarkObject(heap, code);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ ASSERT(!rinfo->target_object()->IsConsString());
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ StaticVisitor::MarkObject(heap, object);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitGlobalPropertyCell(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+ JSGlobalPropertyCell* cell = rinfo->target_cell();
+ StaticVisitor::MarkObject(heap, cell);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ StaticVisitor::MarkObject(heap, target);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ // Monomorphic ICs are preserved when possible, but need to be flushed
+ // when they might be keeping a Context alive, or when the heap is about
+ // to be serialized.
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+ && (target->ic_state() == MEGAMORPHIC || Serializer::enabled() ||
+ heap->isolate()->context_exit_happened() ||
+ target->ic_age() != heap->global_ic_age())) {
+ IC::Clear(rinfo->pc());
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ }
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ StaticVisitor::MarkObject(heap, target);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
+ Map* map, HeapObject* object) {
+ FixedBodyVisitor<StaticVisitor,
+ Context::MarkCompactBodyDescriptor,
+ void>::Visit(map, object);
+
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::NATIVE_CONTEXT_SLOTS;
+ ++idx) {
+ Object** slot =
+ HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+ collector->RecordSlot(slot, slot, *slot);
+ }
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCode(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ Code* code = Code::cast(object);
+ if (FLAG_cleanup_code_caches_at_gc) {
+ code->ClearTypeFeedbackCells(heap);
+ }
+ code->CodeIterateBody<StaticVisitor>(heap);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
+ Map* map, HeapObject* object) {
+ int last_property_offset =
+ JSRegExp::kSize + kPointerSize * map->inobject_properties();
+ StaticVisitor::VisitPointers(map->GetHeap(),
+ HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
+ HeapObject::RawField(object, last_property_offset));
+}
+
+
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index b476dfef2e..76a0f74e86 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -46,71 +46,70 @@ namespace internal {
// Base class for all static visitors.
class StaticVisitorBase : public AllStatic {
public:
+#define VISITOR_ID_LIST(V) \
+ V(SeqAsciiString) \
+ V(SeqTwoByteString) \
+ V(ShortcutCandidate) \
+ V(ByteArray) \
+ V(FreeSpace) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(NativeContext) \
+ V(DataObject2) \
+ V(DataObject3) \
+ V(DataObject4) \
+ V(DataObject5) \
+ V(DataObject6) \
+ V(DataObject7) \
+ V(DataObject8) \
+ V(DataObject9) \
+ V(DataObjectGeneric) \
+ V(JSObject2) \
+ V(JSObject3) \
+ V(JSObject4) \
+ V(JSObject5) \
+ V(JSObject6) \
+ V(JSObject7) \
+ V(JSObject8) \
+ V(JSObject9) \
+ V(JSObjectGeneric) \
+ V(Struct2) \
+ V(Struct3) \
+ V(Struct4) \
+ V(Struct5) \
+ V(Struct6) \
+ V(Struct7) \
+ V(Struct8) \
+ V(Struct9) \
+ V(StructGeneric) \
+ V(ConsString) \
+ V(SlicedString) \
+ V(Oddball) \
+ V(Code) \
+ V(Map) \
+ V(PropertyCell) \
+ V(SharedFunctionInfo) \
+ V(JSFunction) \
+ V(JSWeakMap) \
+ V(JSRegExp)
+
+ // For data objects, JS objects and structs along with generic visitor which
+ // can visit object of any size we provide visitors specialized by
+ // object size in words.
+ // Ids of specialized visitors are declared in a linear order (without
+ // holes) starting from the id of visitor specialized for 2 words objects
+ // (base visitor id) and ending with the id of generic visitor.
+ // Method GetVisitorIdForSize depends on this ordering to calculate visitor
+ // id of specialized visitor from given instance size, base visitor id and
+ // generic visitor's id.
enum VisitorId {
- kVisitSeqAsciiString = 0,
- kVisitSeqTwoByteString,
- kVisitShortcutCandidate,
- kVisitByteArray,
- kVisitFreeSpace,
- kVisitFixedArray,
- kVisitFixedDoubleArray,
- kVisitGlobalContext,
-
- // For data objects, JS objects and structs along with generic visitor which
- // can visit object of any size we provide visitors specialized by
- // object size in words.
- // Ids of specialized visitors are declared in a linear order (without
- // holes) starting from the id of visitor specialized for 2 words objects
- // (base visitor id) and ending with the id of generic visitor.
- // Method GetVisitorIdForSize depends on this ordering to calculate visitor
- // id of specialized visitor from given instance size, base visitor id and
- // generic visitor's id.
-
- kVisitDataObject,
- kVisitDataObject2 = kVisitDataObject,
- kVisitDataObject3,
- kVisitDataObject4,
- kVisitDataObject5,
- kVisitDataObject6,
- kVisitDataObject7,
- kVisitDataObject8,
- kVisitDataObject9,
- kVisitDataObjectGeneric,
-
- kVisitJSObject,
- kVisitJSObject2 = kVisitJSObject,
- kVisitJSObject3,
- kVisitJSObject4,
- kVisitJSObject5,
- kVisitJSObject6,
- kVisitJSObject7,
- kVisitJSObject8,
- kVisitJSObject9,
- kVisitJSObjectGeneric,
-
- kVisitStruct,
- kVisitStruct2 = kVisitStruct,
- kVisitStruct3,
- kVisitStruct4,
- kVisitStruct5,
- kVisitStruct6,
- kVisitStruct7,
- kVisitStruct8,
- kVisitStruct9,
- kVisitStructGeneric,
-
- kVisitConsString,
- kVisitSlicedString,
- kVisitOddball,
- kVisitCode,
- kVisitMap,
- kVisitPropertyCell,
- kVisitSharedFunctionInfo,
- kVisitJSFunction,
- kVisitJSWeakMap,
- kVisitJSRegExp,
-
+#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
+ VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+#undef VISITOR_ID_ENUM_DECL
kVisitorIdCount,
+ kVisitDataObject = kVisitDataObject2,
+ kVisitJSObject = kVisitJSObject2,
+ kVisitStruct = kVisitStruct2,
kMinObjectSizeInWords = 2
};
@@ -361,7 +360,74 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
template<typename StaticVisitor>
VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
- StaticNewSpaceVisitor<StaticVisitor>::table_;
+ StaticNewSpaceVisitor<StaticVisitor>::table_;
+
+
+// Base class for visitors used to transitively mark the entire heap.
+// IterateBody returns nothing.
+// Certain types of objects might not be handled by this base class and
+// no visitor function is registered by the generic initialization. A
+// specialized visitor function needs to be provided by the inheriting
+// class itself for those cases.
+//
+// This class is intended to be used in the following way:
+//
+// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
+// ...
+// }
+//
+// This is an example of Curiously recurring template pattern.
+template<typename StaticVisitor>
+class StaticMarkingVisitor : public StaticVisitorBase {
+ public:
+ static void Initialize();
+
+ static inline void IterateBody(Map* map, HeapObject* obj) {
+ table_.GetVisitor(map)(map, obj);
+ }
+
+ static inline void VisitCodeEntry(Heap* heap, Address entry_address);
+ static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitExternalReference(RelocInfo* rinfo) { }
+ static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
+
+ // TODO(mstarzinger): This should be made protected once refactoring is done.
+ static inline void VisitNativeContext(Map* map, HeapObject* object);
+
+ protected:
+ static inline void VisitCode(Map* map, HeapObject* object);
+ static inline void VisitJSRegExp(Map* map, HeapObject* object);
+
+ class DataObjectVisitor {
+ public:
+ template<int size>
+ static inline void VisitSpecialized(Map* map, HeapObject* object) {
+ }
+
+ static inline void Visit(Map* map, HeapObject* object) {
+ }
+ };
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSObject::BodyDescriptor,
+ void> JSObjectVisitor;
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ StructBodyDescriptor,
+ void> StructObjectVisitor;
+
+ typedef void (*Callback)(Map* map, HeapObject* object);
+
+ static VisitorDispatchTable<Callback> table_;
+};
+
+
+template<typename StaticVisitor>
+VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
+ StaticMarkingVisitor<StaticVisitor>::table_;
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 6b2f64ac2e..45d108b1ca 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -69,13 +69,13 @@ MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
}
-MaybeObject* Object::ToObject(Context* global_context) {
+MaybeObject* Object::ToObject(Context* native_context) {
if (IsNumber()) {
- return CreateJSValue(global_context->number_function(), this);
+ return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
- return CreateJSValue(global_context->boolean_function(), this);
+ return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
- return CreateJSValue(global_context->string_function(), this);
+ return CreateJSValue(native_context->string_function(), this);
}
ASSERT(IsJSObject());
return this;
@@ -87,16 +87,16 @@ MaybeObject* Object::ToObject() {
return this;
} else if (IsNumber()) {
Isolate* isolate = Isolate::Current();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->number_function(), this);
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->boolean_function(), this);
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->string_function(), this);
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->string_function(), this);
}
// Throw a type error.
@@ -134,13 +134,16 @@ void Object::Lookup(String* name, LookupResult* result) {
if (IsJSReceiver()) {
holder = this;
} else {
- Context* global_context = Isolate::Current()->context()->global_context();
+ Context* native_context = Isolate::Current()->context()->native_context();
if (IsNumber()) {
- holder = global_context->number_function()->instance_prototype();
+ holder = native_context->number_function()->instance_prototype();
} else if (IsString()) {
- holder = global_context->string_function()->instance_prototype();
+ holder = native_context->string_function()->instance_prototype();
} else if (IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
+ holder = native_context->boolean_function()->instance_prototype();
+ } else {
+ Isolate::Current()->PushStackTraceAndDie(
+ 0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
}
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
@@ -190,6 +193,7 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
}
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
Handle<String> key(name);
@@ -206,7 +210,9 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
if (result.IsEmpty()) {
return isolate->heap()->undefined_value();
}
- return *v8::Utils::OpenHandle(*result);
+ Object* return_value = *v8::Utils::OpenHandle(*result);
+ return_value->VerifyApiCallResultType();
+ return return_value;
}
// __defineGetter__ callback
@@ -406,16 +412,16 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
} else {
result->holder()->LocalLookupRealNamedProperty(name, &r);
}
- if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
- break;
+ if (!r.IsFound()) break;
+ return GetPropertyAttributeWithFailedAccessCheck(receiver,
+ &r,
+ name,
+ continue_search);
}
- default:
+ case HANDLER:
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
}
}
@@ -481,10 +487,21 @@ MaybeObject* JSObject::SetNormalizedProperty(String* name,
set_properties(StringDictionary::cast(dict));
return value;
}
- // Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- property_dictionary()->DetailsAt(entry).index());
+
+ PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
+ int enumeration_index;
+ // Preserve the enumeration index unless the property was deleted.
+ if (original_details.IsDeleted()) {
+ enumeration_index = property_dictionary()->NextEnumerationIndex();
+ property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
+ } else {
+ enumeration_index = original_details.dictionary_index();
+ ASSERT(enumeration_index > 0);
+ }
+
+ details = PropertyDetails(
+ details.attributes(), details.type(), enumeration_index);
+
if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
@@ -512,11 +529,12 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
// map change to invalidate any ICs that think they can load
// from the DontDelete cell without checking if it contains
// the hole value.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- set_map(Map::cast(new_map));
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ ASSERT(new_map->is_dictionary_map());
+ set_map(new_map);
}
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
@@ -638,9 +656,9 @@ MaybeObject* Object::GetProperty(Object* receiver,
return result->holder()->GetPropertyWithInterceptor(
recvr, name, attributes);
}
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
UNREACHABLE();
@@ -661,13 +679,13 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
holder = holder->GetPrototype()) {
if (!holder->IsJSObject()) {
Isolate* isolate = heap->isolate();
- Context* global_context = isolate->context()->global_context();
+ Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
- holder = global_context->number_function()->instance_prototype();
+ holder = native_context->number_function()->instance_prototype();
} else if (holder->IsString()) {
- holder = global_context->string_function()->instance_prototype();
+ holder = native_context->string_function()->instance_prototype();
} else if (holder->IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
+ holder = native_context->boolean_function()->instance_prototype();
} else if (holder->IsJSProxy()) {
return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
} else {
@@ -709,7 +727,7 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
Object* Object::GetPrototype() {
if (IsSmi()) {
Heap* heap = Isolate::Current()->heap();
- Context* context = heap->isolate()->context()->global_context();
+ Context* context = heap->isolate()->context()->native_context();
return context->number_function()->instance_prototype();
}
@@ -721,7 +739,7 @@ Object* Object::GetPrototype() {
return heap_object->map()->prototype();
}
Heap* heap = heap_object->GetHeap();
- Context* context = heap->isolate()->context()->global_context();
+ Context* context = heap->isolate()->context()->native_context();
if (heap_object->IsHeapNumber()) {
return context->number_function()->instance_prototype();
@@ -763,7 +781,6 @@ MaybeObject* Object::GetHash(CreationFlag flag) {
bool Object::SameValue(Object* other) {
if (other == this) return true;
- if (!IsHeapObject() || !other->IsHeapObject()) return false;
// The object is either a number, a string, an odd-ball,
// a real JS object, or a Harmony proxy.
@@ -1399,8 +1416,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
- shared->SharedFunctionInfoIterateBody(v);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
break;
}
@@ -1488,20 +1504,19 @@ String* JSReceiver::constructor_name() {
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
String* name,
- Object* value) {
- int index = new_map->PropertyIndexFor(name);
+ Object* value,
+ int field_index) {
if (map()->unused_property_fields() == 0) {
- ASSERT(map()->unused_property_fields() == 0);
int new_unused = new_map->unused_property_fields();
- Object* values;
+ FixedArray* values;
{ MaybeObject* maybe_values =
properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->ToObject(&values)) return maybe_values;
+ if (!maybe_values->To(&values)) return maybe_values;
}
- set_properties(FixedArray::cast(values));
+ set_properties(values);
}
set_map(new_map);
- return FastPropertyAtPut(index, value);
+ return FastPropertyAtPut(field_index, value);
}
@@ -1526,93 +1541,62 @@ MaybeObject* JSObject::AddFastProperty(String* name,
PropertyAttributes attributes,
StoreFromKeyed store_mode) {
ASSERT(!IsJSGlobalProxy());
+ ASSERT(map()->instance_descriptors()->Search(name) ==
+ DescriptorArray::kNotFound);
// Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier.
+ // Normalize the object if it will have too many fast properties.
Isolate* isolate = GetHeap()->isolate();
StringInputBuffer buffer(name);
- if (!IsIdentifier(isolate->unicode_cache(), &buffer)
- && name != isolate->heap()->hidden_symbol()) {
+ if ((!IsIdentifier(isolate->unicode_cache(), &buffer)
+ && name != isolate->heap()->hidden_symbol()) ||
+ (map()->unused_property_fields() == 0 &&
+ TooManyFastProperties(properties()->length(), store_mode))) {
Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_obj =
+ NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+
return AddSlowProperty(name, value, attributes);
}
- DescriptorArray* old_descriptors = map()->instance_descriptors();
// Compute the new index for new field.
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors =
- old_descriptors->CopyInsert(&new_field, REMOVE_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- }
-
- // Only allow map transition if the object isn't the global object and there
- // is not a transition for the name, or there's a transition for the name but
- // it's unrelated to properties.
- int descriptor_index = old_descriptors->Search(name);
-
- // Element transitions are stored in the descriptor for property "", which is
- // not a identifier and should have forced a switch to slow properties above.
- bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound;
- bool allow_map_transition =
- can_insert_transition &&
- (isolate->context()->global_context()->object_function()->map() != map());
+ FieldDescriptor new_field(name, index, attributes, 0);
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
map()->unused_property_fields() == 0);
- // Allocate a new map for the object.
- Object* r;
- { MaybeObject* maybe_r = map()->CopyDropDescriptors();
- if (!maybe_r->ToObject(&r)) return maybe_r;
- }
- Map* new_map = Map::cast(r);
- if (allow_map_transition) {
- // Allocate new instance descriptors for the old map with map transition.
- MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
- Object* r;
- { MaybeObject* maybe_r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
- if (!maybe_r->ToObject(&r)) return maybe_r;
- }
- old_descriptors = DescriptorArray::cast(r);
- }
+
+ FixedArray* values = NULL;
if (map()->unused_property_fields() == 0) {
- if (TooManyFastProperties(properties()->length(), store_mode)) {
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return AddSlowProperty(name, value, attributes);
- }
// Make room for the new value
- Object* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->ToObject(&values)) return maybe_values;
- }
- set_properties(FixedArray::cast(values));
+ MaybeObject* maybe_values =
+ properties()->CopySize(properties()->length() + kFieldsAdded);
+ if (!maybe_values->To(&values)) return maybe_values;
+ }
+
+ // Only allow map transition if the object isn't the global object.
+ TransitionFlag flag = isolate->empty_object_map() != map()
+ ? INSERT_TRANSITION
+ : OMIT_TRANSITION;
+
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ if (map()->unused_property_fields() == 0) {
+ ASSERT(values != NULL);
+ set_properties(values);
new_map->set_unused_property_fields(kFieldsAdded - 1);
} else {
new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
}
- // We have now allocated all the necessary objects.
- // All the changes can be applied at once, so they are atomic.
- if (allow_map_transition) {
- map()->set_instance_descriptors(old_descriptors);
- }
- new_map->SetBackPointer(map());
- new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+
set_map(new_map);
return FastPropertyAtPut(index, value);
}
@@ -1623,57 +1607,26 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
JSFunction* function,
PropertyAttributes attributes) {
// Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors =
- map()->instance_descriptors()->CopyInsert(&d, REMOVE_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- }
-
- // Allocate a new map for the object.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ ConstantFunctionDescriptor d(name, function, attributes, 0);
- DescriptorArray* descriptors = DescriptorArray::cast(new_descriptors);
- Map::cast(new_map)->set_instance_descriptors(descriptors);
- Map* old_map = map();
- set_map(Map::cast(new_map));
-
- // If the old map is the global object map (from new Object()),
- // then transitions are not added to it, so we are done.
Heap* heap = GetHeap();
- if (old_map == heap->isolate()->context()->global_context()->
- object_function()->map()) {
- return function;
- }
+ TransitionFlag flag =
+ // Do not add transitions to the empty object map (map of "new Object()"),
+ // nor to global objects.
+ (map() == heap->isolate()->empty_object_map() || IsGlobalObject() ||
+ // Don't add transitions to special properties with non-trivial
+ // attributes.
+ // TODO(verwaest): Once we support attribute changes, these transitions
+ // should be kept as well.
+ attributes != NONE)
+ ? OMIT_TRANSITION
+ : INSERT_TRANSITION;
- // Do not add CONSTANT_TRANSITIONS to global objects
- if (IsGlobalObject()) {
- return function;
- }
-
- // Add a CONSTANT_TRANSITION descriptor to the old map,
- // so future assignments to this property on other objects
- // of the same type will create a normal field, not a constant function.
- // Don't do this for special properties, with non-trival attributes.
- if (attributes != NONE) {
- return function;
- }
- ConstTransitionDescriptor mark(name, Map::cast(new_map));
- { MaybeObject* maybe_new_descriptors =
- old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- // We have accomplished the main goal, so return success.
- return function;
- }
- }
- old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- Map::cast(new_map)->SetBackPointer(old_map);
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ set_map(new_map);
return function;
}
@@ -1770,9 +1723,10 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
+ if (!result.IsFound()) map()->LookupTransition(this, name, &result);
if (result.IsFound()) {
- // An existing property, a map transition or a null descriptor was
- // found. Use set property to handle all these cases.
+ // An existing property or a map transition was found. Use set property to
+ // handle all these cases.
return SetProperty(&result, name, value, attributes, strict_mode);
}
bool done = false;
@@ -1794,8 +1748,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name,
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
- ASSERT(!dictionary->ContainsTransition(old_index));
- new_enumeration_index = dictionary->DetailsAt(old_index).index();
+ new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
}
PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
@@ -1803,38 +1756,28 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name,
}
-MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
+MaybeObject* JSObject::ConvertTransitionToMapTransition(
+ int transition_index,
String* name,
Object* new_value,
PropertyAttributes attributes) {
Map* old_map = map();
Object* result;
- { MaybeObject* maybe_result =
- ConvertDescriptorToField(name, new_value, attributes);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // If we get to this point we have succeeded - do not return failure
- // after this point. Later stuff is optional.
- if (!HasFastProperties()) {
- return result;
- }
- // Do not add transitions to the map of "new Object()".
- if (map() == GetIsolate()->context()->global_context()->
- object_function()->map()) {
- return result;
- }
- MapTransitionDescriptor transition(name,
- map(),
- attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors = old_map->instance_descriptors()->
- CopyInsert(&transition, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return result; // Yes, return _result_.
- }
- }
- old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ MaybeObject* maybe_result =
+ ConvertDescriptorToField(name, new_value, attributes);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ if (!HasFastProperties()) return result;
+
+ // This method should only be used to convert existing transitions. Objects
+ // with the map of "new Object()" cannot have transitions in the first place.
+ ASSERT(map() != GetIsolate()->empty_object_map());
+
+ // TODO(verwaest): From here on we lose existing map transitions, causing
+ // invalid back pointers. This will change once we can store multiple
+ // transitions with the same key.
+ old_map->SetTransition(transition_index, map());
map()->SetBackPointer(old_map);
return result;
}
@@ -1846,57 +1789,36 @@ MaybeObject* JSObject::ConvertDescriptorToField(String* name,
if (map()->unused_property_fields() == 0 &&
TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
return ReplaceSlowProperty(name, new_value, attributes);
}
int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes);
- // Make a new DescriptorArray replacing an entry with FieldDescriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked = map()->instance_descriptors()->
- CopyInsert(&new_field, REMOVE_TRANSITIONS);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
- }
- }
- DescriptorArray* new_descriptors =
- DescriptorArray::cast(descriptors_unchecked);
+ FieldDescriptor new_field(name, index, attributes, 0);
// Make a new map for the object.
- Object* new_map_unchecked;
- { MaybeObject* maybe_new_map_unchecked = map()->CopyDropDescriptors();
- if (!maybe_new_map_unchecked->ToObject(&new_map_unchecked)) {
- return maybe_new_map_unchecked;
- }
- }
- Map* new_map = Map::cast(new_map_unchecked);
- new_map->set_instance_descriptors(new_descriptors);
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field,
+ OMIT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
// Make new properties array if necessary.
- FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
+ FixedArray* new_properties = NULL;
int new_unused_property_fields = map()->unused_property_fields() - 1;
if (map()->unused_property_fields() == 0) {
new_unused_property_fields = kFieldsAdded - 1;
- Object* new_properties_object;
- { MaybeObject* maybe_new_properties_object =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties_object->ToObject(&new_properties_object)) {
- return maybe_new_properties_object;
- }
- }
- new_properties = FixedArray::cast(new_properties_object);
+ MaybeObject* maybe_new_properties =
+ properties()->CopySize(properties()->length() + kFieldsAdded);
+ if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
}
// Update pointers to commit changes.
// Object points to the new map.
new_map->set_unused_property_fields(new_unused_property_fields);
set_map(new_map);
- if (new_properties) {
- set_properties(FixedArray::cast(new_properties));
+ if (new_properties != NULL) {
+ set_properties(new_properties);
}
return FastPropertyAtPut(index, new_value);
}
@@ -1964,6 +1886,9 @@ MaybeObject* JSReceiver::SetProperty(String* name,
JSReceiver::StoreFromKeyed store_mode) {
LookupResult result(GetIsolate());
LocalLookup(name, &result);
+ if (!result.IsFound()) {
+ map()->LookupTransition(JSObject::cast(this), name, &result);
+ }
return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
}
@@ -2152,9 +2077,9 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
return result.proxy()->SetPropertyViaPrototypesWithHandler(
this, name, value, attributes, strict_mode, done);
}
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
@@ -2171,32 +2096,120 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
}
-void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
- DescriptorArray* descriptors = map()->instance_descriptors();
- int number = descriptors->SearchWithCache(name);
- if (number != DescriptorArray::kNotFound) {
- result->DescriptorResult(this, descriptors->GetDetails(number), number);
- } else {
- result->NotFound();
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
}
}
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ ASSERT(!HEAP->lo_space()->Contains(elms));
-void Map::LookupInDescriptors(JSObject* holder,
- String* name,
- LookupResult* result) {
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache =
- GetHeap()->isolate()->descriptor_lookup_cache();
- int number = cache->Lookup(descriptors, name);
- if (number == DescriptorLookupCache::kAbsent) {
- number = descriptors->Search(name);
- cache->Update(descriptors, name, number);
- }
- if (number != DescriptorArray::kNotFound) {
- result->DescriptorResult(holder, descriptors->GetDetails(number), number);
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode == FROM_GC) {
+#ifdef DEBUG
+ ZapEndOfFixedArray(new_end, to_trim);
+#endif
} else {
- result->NotFound();
+ ZapEndOfFixedArray(new_end, to_trim);
+ }
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+}
+
+
+void Map::CopyAppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ Isolate* isolate = map->GetIsolate();
+ Handle<DescriptorArray> array(map->instance_descriptors());
+ v8::NeanderArray callbacks(descriptors);
+ int nof_callbacks = callbacks.length();
+ int descriptor_count = array->number_of_descriptors();
+
+ // Ensure the keys are symbols before writing them into the instance
+ // descriptor. Since it may cause a GC, it has to be done before we
+ // temporarily put the heap in an invalid state while appending descriptors.
+ for (int i = 0; i < nof_callbacks; ++i) {
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
+ Handle<String> key =
+ isolate->factory()->SymbolFromString(
+ Handle<String>(String::cast(entry->name())));
+ entry->set_name(*key);
+ }
+
+ Handle<DescriptorArray> result =
+ isolate->factory()->NewDescriptorArray(descriptor_count + nof_callbacks);
+
+ // Ensure that marking will not progress and change color of objects.
+ DescriptorArray::WhitenessWitness witness(*result);
+
+ // Copy the descriptors from the array.
+ if (0 < descriptor_count) {
+ for (int i = 0; i < descriptor_count; i++) {
+ result->CopyFrom(i, *array, i, witness);
+ }
+ }
+
+ // After this point the GC is not allowed to run anymore until the map is in a
+ // consistent state again, i.e., all the descriptors are appended and the
+ // descriptor array is trimmed to the right size.
+ Map::SetDescriptors(map, result);
+
+ // Fill in new callback descriptors. Process the callbacks from
+ // back to front so that the last callback with a given name takes
+ // precedence over previously added callbacks with that name.
+ for (int i = nof_callbacks - 1; i >= 0; i--) {
+ AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
+ String* key = String::cast(entry->name());
+ // Check if a descriptor with this name already exists before writing.
+ if (LinearSearch(*result, key, map->NumberOfOwnDescriptors()) ==
+ DescriptorArray::kNotFound) {
+ CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ map->AppendDescriptor(&desc, witness);
+ }
+ }
+
+ int new_number_of_descriptors = map->NumberOfOwnDescriptors();
+ // Reinstall the original descriptor array if no new elements were added.
+ if (new_number_of_descriptors == descriptor_count) {
+ Map::SetDescriptors(map, array);
+ return;
+ }
+
+ // If duplicates were detected, trim the descriptor array to the right size.
+ int new_array_size = DescriptorArray::LengthFor(new_number_of_descriptors);
+ if (new_array_size < result->length()) {
+ RightTrimFixedArray<FROM_MUTATOR>(
+ isolate->heap(), *result, result->length() - new_array_size);
}
}
@@ -2250,21 +2263,16 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
ASSERT(index <= to_index);
for (; index < to_index; ++index) {
- Map* next_map = current_map->elements_transition_map();
- if (next_map == NULL) {
- return current_map;
- }
- current_map = next_map;
+ if (!current_map->HasElementsTransition()) return current_map;
+ current_map = current_map->elements_transition_map();
}
- if (!IsFastElementsKind(to_kind)) {
+ if (!IsFastElementsKind(to_kind) && current_map->HasElementsTransition()) {
Map* next_map = current_map->elements_transition_map();
- if (next_map != NULL && next_map->elements_kind() == to_kind) {
- return next_map;
- }
- ASSERT(current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
- } else {
- ASSERT(current_map->elements_kind() == to_kind);
+ if (next_map->elements_kind() == to_kind) return next_map;
}
+ ASSERT(IsFastElementsKind(to_kind)
+ ? current_map->elements_kind() == to_kind
+ : current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
return current_map;
}
@@ -2276,29 +2284,6 @@ Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
}
-MaybeObject* Map::CreateNextElementsTransition(ElementsKind next_kind) {
- ASSERT(elements_transition_map() == NULL ||
- ((elements_transition_map()->elements_kind() == DICTIONARY_ELEMENTS ||
- IsExternalArrayElementsKind(
- elements_transition_map()->elements_kind())) &&
- (next_kind == DICTIONARY_ELEMENTS ||
- IsExternalArrayElementsKind(next_kind))));
- ASSERT(!IsFastElementsKind(next_kind) ||
- IsMoreGeneralElementsKindTransition(elements_kind(), next_kind));
- ASSERT(next_kind != elements_kind());
-
- Map* next_map;
- MaybeObject* maybe_next_map =
- this->CopyDropTransitions(DescriptorArray::CANNOT_BE_SHARED);
- if (!maybe_next_map->To(&next_map)) return maybe_next_map;
-
- next_map->set_elements_kind(next_kind);
- next_map->SetBackPointer(this);
- this->set_elements_transition_map(next_map);
- return next_map;
-}
-
-
static MaybeObject* AddMissingElementsTransitions(Map* map,
ElementsKind to_kind) {
ASSERT(IsFastElementsKind(map->elements_kind()));
@@ -2312,18 +2297,18 @@ static MaybeObject* AddMissingElementsTransitions(Map* map,
Map* current_map = map;
for (; index < to_index; ++index) {
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
- MaybeObject* maybe_next_map =
- current_map->CreateNextElementsTransition(next_kind);
- if (!maybe_next_map->To(&current_map)) return maybe_next_map;
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
+ MaybeObject* maybe_next_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_next_map->To(&current_map)) return maybe_next_map;
}
// In case we are exiting the fast elements kind system, just add the map in
// the end.
if (!IsFastElementsKind(to_kind)) {
- MaybeObject* maybe_next_map =
- current_map->CreateNextElementsTransition(to_kind);
- if (!maybe_next_map->To(&current_map)) return maybe_next_map;
+ MaybeObject* maybe_next_map =
+ current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION);
+ if (!maybe_next_map->To(&current_map)) return maybe_next_map;
}
ASSERT(current_map->elements_kind() == to_kind);
@@ -2340,22 +2325,6 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
}
-// If the map is using the empty descriptor array, install a new empty
-// descriptor array that will contain an element transition.
-// TODO(verwaest) Goes away once the descriptor array is immutable.
-static MaybeObject* EnsureMayContainTransitions(Map* map) {
- if (map->instance_descriptors()->MayContainTransitions()) return map;
- DescriptorArray* descriptor_array;
- MaybeObject* maybe_descriptor_array =
- DescriptorArray::Allocate(0, DescriptorArray::CANNOT_BE_SHARED);
- if (!maybe_descriptor_array->To(&descriptor_array)) {
- return maybe_descriptor_array;
- }
- map->set_instance_descriptors(descriptor_array);
- return map;
-}
-
-
MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
Map* start_map = map();
ElementsKind from_kind = start_map->elements_kind();
@@ -2364,12 +2333,11 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
return start_map;
}
- Context* global_context = GetIsolate()->context()->global_context();
bool allow_store_transition =
// Only remember the map transition if the object's map is NOT equal to
// the global object_function's map and there is not an already existing
// non-matching element transition.
- (global_context->object_function()->map() != map()) &&
+ (GetIsolate()->empty_object_map() != map()) &&
!start_map->IsUndefined() && !start_map->is_shared() &&
IsFastElementsKind(from_kind);
@@ -2381,16 +2349,9 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
}
if (!allow_store_transition) {
- // Create a new free-floating map only if we are not allowed to store it.
- Map* new_map = NULL;
- MaybeObject* maybe_new_map =
- start_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- new_map->set_elements_kind(to_kind);
- return new_map;
+ return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION);
}
- EnsureMayContainTransitions(start_map);
Map* closest_map = FindClosestElementsTransition(start_map, to_kind);
if (closest_map->elements_kind() == to_kind) {
@@ -2412,47 +2373,47 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
}
if (HasFastProperties()) {
- LookupInDescriptor(name, result);
- if (result->IsFound()) {
- // A property, a map transition or a null descriptor was found.
- // We return all of these result types because
- // LocalLookupRealNamedProperty is used when setting properties
- // where map transitions and null descriptors are handled.
- ASSERT(result->holder() == this && result->type() != NORMAL);
- // Disallow caching for uninitialized constants. These can only
- // occur as fields.
- if (result->IsReadOnly() && result->type() == FIELD &&
- FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
- result->DisallowCaching();
- }
- return;
+ map()->LookupDescriptor(this, name, result);
+ // A property or a map transition was found. We return all of these result
+ // types because LocalLookupRealNamedProperty is used when setting
+ // properties where map transitions are handled.
+ ASSERT(!result->IsFound() ||
+ (result->holder() == this && result->IsFastPropertyType()));
+ // Disallow caching for uninitialized constants. These can only
+ // occur as fields.
+ if (result->IsField() &&
+ result->IsReadOnly() &&
+ FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
+ result->DisallowCaching();
}
- } else {
- int entry = property_dictionary()->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- Object* value = property_dictionary()->ValueAt(entry);
- if (IsGlobalObject()) {
- PropertyDetails d = property_dictionary()->DetailsAt(entry);
- if (d.IsDeleted()) {
- result->NotFound();
- return;
- }
- value = JSGlobalPropertyCell::cast(value)->value();
+ return;
+ }
+
+ int entry = property_dictionary()->FindEntry(name);
+ if (entry != StringDictionary::kNotFound) {
+ Object* value = property_dictionary()->ValueAt(entry);
+ if (IsGlobalObject()) {
+ PropertyDetails d = property_dictionary()->DetailsAt(entry);
+ if (d.IsDeleted()) {
+ result->NotFound();
+ return;
}
- // Make sure to disallow caching for uninitialized constants
- // found in the dictionary-mode objects.
- if (value->IsTheHole()) result->DisallowCaching();
- result->DictionaryResult(this, entry);
- return;
+ value = JSGlobalPropertyCell::cast(value)->value();
}
+ // Make sure to disallow caching for uninitialized constants
+ // found in the dictionary-mode objects.
+ if (value->IsTheHole()) result->DisallowCaching();
+ result->DictionaryResult(this, entry);
+ return;
}
+
result->NotFound();
}
void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty()) return;
+ if (result->IsFound()) return;
LookupRealNamedPropertyInPrototypes(name, result);
}
@@ -2468,8 +2429,8 @@ void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
return result->HandlerResult(JSProxy::cast(pt));
}
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- ASSERT(!(result->IsProperty() && result->type() == INTERCEPTOR));
- if (result->IsProperty()) return;
+ ASSERT(!(result->IsFound() && result->type() == INTERCEPTOR));
+ if (result->IsFound()) return;
}
result->NotFound();
}
@@ -2538,7 +2499,7 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
JSReceiver::StoreFromKeyed store_mode) {
- if (result->IsFound() && result->type() == HANDLER) {
+ if (result->IsHandler()) {
return result->proxy()->SetPropertyWithHandler(
this, key, value, attributes, strict_mode);
} else {
@@ -2676,7 +2637,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
String* name_raw, DeleteMode mode) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> receiver(this);
+ Handle<JSProxy> receiver(this);
Handle<Object> name(name_raw);
Handle<Object> args[] = { name };
@@ -2686,8 +2647,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
Object* bool_result = result->ToBoolean();
if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+ Handle<Object> handler(receiver->handler());
Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
- Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
+ Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
@@ -2790,7 +2752,7 @@ void JSProxy::Fix() {
Object* hash;
if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, hash);
+ isolate->factory()->SetIdentityHash(new_self, Smi::cast(hash));
}
}
@@ -2823,8 +2785,8 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
- String* name,
- Object* value,
+ String* name_raw,
+ Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
StoreFromKeyed store_mode) {
@@ -2836,114 +2798,127 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We make these short keys into symbols to avoid constantly
// reallocating them.
- if (!name->IsSymbol() && name->length() <= 2) {
+ if (!name_raw->IsSymbol() && name_raw->length() <= 2) {
Object* symbol_version;
- { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name);
+ { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name_raw);
if (maybe_symbol_version->ToObject(&symbol_version)) {
- name = String::cast(symbol_version);
+ name_raw = String::cast(symbol_version);
}
}
}
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(
- result, name, value, true, strict_mode);
+ result, name_raw, value_raw, true, strict_mode);
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
+ if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetPropertyForResult(
- result, name, value, attributes, strict_mode, store_mode);
+ result, name_raw, value_raw, attributes, strict_mode, store_mode);
}
- if (!result->IsProperty() && !IsJSContextExtensionObject()) {
+ // From this point on everything needs to be handlified, because
+ // SetPropertyViaPrototypes might call back into JavaScript.
+ HandleScope scope(GetIsolate());
+ Handle<JSObject> self(this);
+ Handle<String> name(name_raw);
+ Handle<Object> value(value_raw);
+
+ if (!result->IsProperty() && !self->IsJSContextExtensionObject()) {
bool done = false;
- MaybeObject* result_object =
- SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ MaybeObject* result_object = self->SetPropertyViaPrototypes(
+ *name, *value, attributes, strict_mode, &done);
if (done) return result_object;
}
if (!result->IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes, strict_mode, store_mode);
+ return self->AddProperty(
+ *name, *value, attributes, strict_mode, store_mode);
}
- if (result->IsReadOnly() && result->IsProperty()) {
+ if (result->IsProperty() && result->IsReadOnly()) {
if (strict_mode == kStrictMode) {
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> args[] = { hname, self };
+ Handle<Object> args[] = { name, self };
return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
} else {
- return value;
+ return *value;
}
}
+
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
switch (result->type()) {
case NORMAL:
- return SetNormalizedProperty(result, value);
+ return self->SetNormalizedProperty(result, *value);
case FIELD:
- return FastPropertyAtPut(result->GetFieldIndex(), value);
- case MAP_TRANSITION:
- if (attributes == result->GetAttributes()) {
- // Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result->GetTransitionMap(),
- name,
- value);
- }
- return ConvertDescriptorToField(name, value, attributes);
+ return self->FastPropertyAtPut(result->GetFieldIndex(), *value);
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
- if (value == result->GetConstantFunction()) return value;
+ if (*value == result->GetConstantFunction()) return *value;
// Preserve the attributes of this existing property.
attributes = result->GetAttributes();
- return ConvertDescriptorToField(name, value, attributes);
+ return self->ConvertDescriptorToField(*name, *value, attributes);
case CALLBACKS: {
Object* callback_object = result->GetCallbackObject();
- if (callback_object->IsAccessorPair() &&
- !AccessorPair::cast(callback_object)->ContainsAccessor()) {
- return ConvertDescriptorToField(name, value, attributes);
- }
- return SetPropertyWithCallback(callback_object,
- name,
- value,
- result->holder(),
- strict_mode);
+ return self->SetPropertyWithCallback(callback_object,
+ *name,
+ *value,
+ result->holder(),
+ strict_mode);
}
case INTERCEPTOR:
- return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
- case CONSTANT_TRANSITION: {
+ return self->SetPropertyWithInterceptor(*name,
+ *value,
+ attributes,
+ strict_mode);
+ case TRANSITION: {
+ Map* transition_map = result->GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == FIELD) {
+ if (attributes == details.attributes()) {
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ return self->AddFastPropertyUsingMap(transition_map,
+ *name,
+ *value,
+ field_index);
+ }
+ return self->ConvertDescriptorToField(*name, *value, attributes);
+ } else if (details.type() == CALLBACKS) {
+ return ConvertDescriptorToField(*name, *value, attributes);
+ }
+
+ ASSERT(details.type() == CONSTANT_FUNCTION);
+
+ Object* constant_function = descriptors->GetValue(descriptor);
// If the same constant function is being added we can simply
// transition to the target map.
- Map* target_map = result->GetTransitionMap();
- DescriptorArray* target_descriptors = target_map->instance_descriptors();
- int number = target_descriptors->SearchWithCache(name);
- ASSERT(number != DescriptorArray::kNotFound);
- ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
- JSFunction* function =
- JSFunction::cast(target_descriptors->GetValue(number));
- if (value == function) {
- set_map(target_map);
- return value;
+ if (constant_function == *value) {
+ self->set_map(transition_map);
+ return constant_function;
}
- // Otherwise, replace with a MAP_TRANSITION to a new map with a
- // FIELD, even if the value is a constant function.
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+ // Otherwise, replace with a map transition to a new map with a FIELD,
+ // even if the value is a constant function.
+ return ConvertTransitionToMapTransition(
+ result->GetTransitionIndex(), *name, *value, attributes);
}
- case NULL_DESCRIPTOR:
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case HANDLER:
+ case NONEXISTENT:
UNREACHABLE();
- return value;
+ return *value;
}
UNREACHABLE(); // keep the compiler happy
- return value;
+ return *value;
}
@@ -2978,6 +2953,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Isolate* isolate = GetIsolate();
LookupResult result(isolate);
LocalLookup(name, &result);
+ if (!result.IsFound()) map()->LookupTransition(this, name, &result);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
@@ -3005,22 +2981,14 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
return AddProperty(name, value, attributes, kNonStrictMode);
}
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
-
// Check of IsReadOnly removed from here in clone.
switch (result.type()) {
- case NORMAL:
+ case NORMAL: {
+ PropertyDetails details = PropertyDetails(attributes, NORMAL);
return SetNormalizedProperty(name, value, details);
+ }
case FIELD:
return FastPropertyAtPut(result.GetFieldIndex(), value);
- case MAP_TRANSITION:
- if (attributes == result.GetAttributes()) {
- // Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result.GetTransitionMap(),
- name,
- value);
- }
- return ConvertDescriptorToField(name, value, attributes);
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (value == result.GetConstantFunction()) return value;
@@ -3031,12 +2999,35 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
case INTERCEPTOR:
// Override callback in clone
return ConvertDescriptorToField(name, value, attributes);
- case CONSTANT_TRANSITION:
- // Replace with a MAP_TRANSITION to a new map with a FIELD, even
- // if the value is a function.
- case NULL_DESCRIPTOR:
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+ case TRANSITION: {
+ Map* transition_map = result.GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == FIELD) {
+ if (attributes == details.attributes()) {
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ return AddFastPropertyUsingMap(transition_map,
+ name,
+ value,
+ field_index);
+ }
+ return ConvertDescriptorToField(name, value, attributes);
+ } else if (details.type() == CALLBACKS) {
+ return ConvertDescriptorToField(name, value, attributes);
+ }
+
+ ASSERT(details.type() == CONSTANT_FUNCTION);
+
+ // Replace transition to CONSTANT FUNCTION with a map transition to a new
+ // map with a FIELD, even if the value is a function.
+ return ConvertTransitionToMapTransition(
+ result.GetTransitionIndex(), name, value, attributes);
+ }
case HANDLER:
+ case NONEXISTENT:
UNREACHABLE();
}
UNREACHABLE(); // keep the compiler happy
@@ -3051,7 +3042,7 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) return result.GetAttributes();
+ if (result.IsFound()) return result.GetAttributes();
if (continue_search) {
// Continue searching via the prototype chain.
@@ -3144,7 +3135,7 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
receiver, result, name, continue_search);
}
}
- if (result->IsProperty()) {
+ if (result->IsFound()) {
switch (result->type()) {
case NORMAL: // fall through
case FIELD:
@@ -3158,7 +3149,8 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
case INTERCEPTOR:
return result->holder()->GetPropertyAttributeWithInterceptor(
JSObject::cast(receiver), name, continue_search);
- default:
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
}
}
@@ -3197,17 +3189,16 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj,
// except for the code cache, which can contain some ics which can be
// applied to the shared map.
Object* fresh;
- { MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- int offset = Map::kCodeCacheOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
+ MaybeObject* maybe_fresh =
+ fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (maybe_fresh->ToObject(&fresh)) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ int offset = Map::kCodeCacheOffset + kPointerSize;
+ ASSERT(memcmp(Map::cast(fresh)->address() + offset,
+ Map::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
}
#endif
@@ -3218,6 +3209,7 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj,
fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
+ ASSERT(Map::cast(result)->is_dictionary_map());
set(index, result);
isolate->counters()->normalized_maps()->Increment();
@@ -3297,8 +3289,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT_FUNCTION: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), NORMAL, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ NORMAL,
+ details.descriptor_index());
Object* value = descs->GetConstantFunction(i);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
@@ -3306,8 +3299,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
break;
}
case FIELD: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), NORMAL, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ NORMAL,
+ details.descriptor_index());
Object* value = FastPropertyAt(descs->GetFieldIndex(i));
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
@@ -3315,25 +3309,19 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
break;
}
case CALLBACKS: {
- if (!descs->IsProperty(i)) break;
Object* value = descs->GetCallbacksObject(i);
- if (value->IsAccessorPair()) {
- MaybeObject* maybe_copy =
- AccessorPair::cast(value)->CopyWithoutTransitions();
- if (!maybe_copy->To(&value)) return maybe_copy;
- }
+ details = details.set_pointer(0);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, details);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
case INTERCEPTOR:
break;
case HANDLER:
case NORMAL:
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -3346,11 +3334,11 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
dictionary->SetNextEnumerationIndex(index);
Map* new_map;
- { MaybeObject* maybe_map =
- current_heap->isolate()->context()->global_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
- }
+ MaybeObject* maybe_map =
+ current_heap->isolate()->context()->native_context()->
+ normalized_map_cache()->Get(this, mode);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ ASSERT(new_map->is_dictionary_map());
// We have now successfully allocated all the necessary objects.
// Changes can now be made with the guarantee that all of them take effect.
@@ -3366,9 +3354,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
-instance_size_delta);
}
-
set_map(new_map);
- new_map->clear_instance_descriptors();
set_properties(dictionary);
@@ -3515,7 +3501,7 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
+MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
hash);
if (maybe->IsFailure()) return maybe;
@@ -3561,6 +3547,7 @@ MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
Object* JSObject::GetHiddenProperty(String* key) {
+ ASSERT(key->IsSymbol());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3570,22 +3557,32 @@ Object* JSObject::GetHiddenProperty(String* key) {
return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
}
ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+ MaybeObject* hidden_lookup =
+ GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
- if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->undefined_value();
+ Object* inline_value = hidden_lookup->ToObjectUnchecked();
+
+ if (inline_value->IsSmi()) {
+ // Handle inline-stored identity hash.
+ if (key == GetHeap()->identity_hash_symbol()) {
+ return inline_value;
+ } else {
+ return GetHeap()->undefined_value();
+ }
}
- StringDictionary* dictionary =
- StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
- int entry = dictionary->FindEntry(key);
- if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
- return dictionary->ValueAt(entry);
+
+ if (inline_value->IsUndefined()) return GetHeap()->undefined_value();
+
+ ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
+ Object* entry = hashtable->Lookup(key);
+ if (entry->IsTheHole()) return GetHeap()->undefined_value();
+ return entry;
}
Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value) {
+ Handle<String> key,
+ Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->SetHiddenProperty(*key, *value),
Object);
@@ -3593,6 +3590,7 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
+ ASSERT(key->IsSymbol());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3602,27 +3600,31 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
}
ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
- StringDictionary* dictionary;
- if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
- // If it was found, check if the key is already in the dictionary.
- int entry = dictionary->FindEntry(key);
- if (entry != StringDictionary::kNotFound) {
- // If key was found, just update the value.
- dictionary->ValueAtPut(entry, value);
- return this;
+ // If there is no backing store yet, store the identity hash inline.
+ MaybeObject* hidden_lookup =
+ GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ ASSERT(!hidden_lookup->IsFailure());
+ Object* inline_value = hidden_lookup->ToObjectUnchecked();
+
+ if (value->IsSmi() &&
+ key == GetHeap()->identity_hash_symbol() &&
+ (inline_value->IsUndefined() || inline_value->IsSmi())) {
+ return SetHiddenPropertiesHashTable(value);
}
- // Key was not already in the dictionary, so add the entry.
- MaybeObject* insert_result = dictionary->Add(key,
- value,
- PropertyDetails(NONE, NORMAL));
- StringDictionary* new_dict;
- if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
- if (new_dict != dictionary) {
+
+ hidden_lookup = GetHiddenPropertiesHashTable(CREATE_NEW_IF_ABSENT);
+ ObjectHashTable* hashtable;
+ if (!hidden_lookup->To(&hashtable)) return hidden_lookup;
+
+ // If it was found, check if the key is already in the dictionary.
+ MaybeObject* insert_result = hashtable->Put(key, value);
+ ObjectHashTable* new_table;
+ if (!insert_result->To(&new_table)) return insert_result;
+ if (new_table != hashtable) {
// If adding the key expanded the dictionary (i.e., Add returned a new
// dictionary), store it back to the object.
- MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
+ MaybeObject* store_result = SetHiddenPropertiesHashTable(new_table);
if (store_result->IsFailure()) return store_result;
}
// Return this to mark success.
@@ -3631,6 +3633,7 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
void JSObject::DeleteHiddenProperty(String* key) {
+ ASSERT(key->IsSymbol());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3640,18 +3643,18 @@ void JSObject::DeleteHiddenProperty(String* key) {
JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
return;
}
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+ MaybeObject* hidden_lookup =
+ GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
- StringDictionary* dictionary =
- StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
- int entry = dictionary->FindEntry(key);
- if (entry == StringDictionary::kNotFound) {
- // Key wasn't in dictionary. Deletion is a success.
- return;
- }
- // Key was in the dictionary. Remove it.
- dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
+ // We never delete (inline-stored) identity hashes.
+ ASSERT(!hidden_lookup->ToObjectUnchecked()->IsSmi());
+
+ ObjectHashTable* hashtable =
+ ObjectHashTable::cast(hidden_lookup->ToObjectUnchecked());
+ MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
+ USE(delete_result);
+ ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
}
@@ -3662,78 +3665,97 @@ bool JSObject::HasHiddenProperties() {
}
-MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
+MaybeObject* JSObject::GetHiddenPropertiesHashTable(
+ InitializeHiddenProperties init_option) {
ASSERT(!IsJSGlobalProxy());
+ Object* inline_value;
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden symbol. Since the
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
- if (descriptors->GetType(0) == FIELD) {
- Object* hidden_store =
- this->FastPropertyAt(descriptors->GetFieldIndex(0));
- return StringDictionary::cast(hidden_store);
+ if (descriptors->number_of_descriptors() > 0) {
+ int sorted_index = descriptors->GetSortedKeyIndex(0);
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol()) {
+ ASSERT(descriptors->GetType(sorted_index) == FIELD);
+ inline_value = this->FastPropertyAt(
+ descriptors->GetFieldIndex(sorted_index));
} else {
- ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
- descriptors->GetType(0) == MAP_TRANSITION);
+ inline_value = GetHeap()->undefined_value();
}
+ } else {
+ inline_value = GetHeap()->undefined_value();
}
} else {
PropertyAttributes attributes;
// You can't install a getter on a property indexed by the hidden symbol,
// so we can be sure that GetLocalPropertyPostInterceptor returns a real
// object.
- Object* lookup =
+ inline_value =
GetLocalPropertyPostInterceptor(this,
GetHeap()->hidden_symbol(),
&attributes)->ToObjectUnchecked();
- if (!lookup->IsUndefined()) {
- return StringDictionary::cast(lookup);
- }
}
- if (!create_if_absent) return GetHeap()->undefined_value();
- const int kInitialSize = 5;
- MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
- StringDictionary* dictionary;
- if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
+
+ if (init_option == ONLY_RETURN_INLINE_VALUE ||
+ inline_value->IsHashTable()) {
+ return inline_value;
+ }
+
+ ObjectHashTable* hashtable;
+ static const int kInitialCapacity = 4;
+ MaybeObject* maybe_obj =
+ ObjectHashTable::Allocate(kInitialCapacity,
+ ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY);
+ if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj;
+
+ if (inline_value->IsSmi()) {
+ // We were storing the identity hash inline and now allocated an actual
+ // dictionary. Put the identity hash into the new dictionary.
+ MaybeObject* insert_result =
+ hashtable->Put(GetHeap()->identity_hash_symbol(), inline_value);
+ ObjectHashTable* new_table;
+ if (!insert_result->To(&new_table)) return insert_result;
+ // We expect no resizing for the first insert.
+ ASSERT_EQ(hashtable, new_table);
+ }
+
MaybeObject* store_result =
SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- dictionary,
+ hashtable,
DONT_ENUM,
kNonStrictMode,
OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
- return dictionary;
+ return hashtable;
}
-MaybeObject* JSObject::SetHiddenPropertiesDictionary(
- StringDictionary* dictionary) {
+MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
ASSERT(!IsJSGlobalProxy());
- ASSERT(HasHiddenProperties());
+ // We can store the identity hash inline iff there is no backing store
+ // for hidden properties yet.
+ ASSERT(HasHiddenProperties() != value->IsSmi());
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden symbol. Since the
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
- if (descriptors->GetType(0) == FIELD) {
- this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
+ if (descriptors->number_of_descriptors() > 0) {
+ int sorted_index = descriptors->GetSortedKeyIndex(0);
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol()) {
+ ASSERT(descriptors->GetType(sorted_index) == FIELD);
+ this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
+ value);
return this;
- } else {
- ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
- descriptors->GetType(0) == MAP_TRANSITION);
}
}
}
MaybeObject* store_result =
SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- dictionary,
+ value,
DONT_ENUM,
kNonStrictMode,
OMIT_EXTENSIBILITY_CHECK);
@@ -3747,7 +3769,7 @@ MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (!result.IsProperty()) return GetHeap()->true_value();
+ if (!result.IsFound()) return GetHeap()->true_value();
// Normalize object if needed.
Object* obj;
@@ -3781,7 +3803,9 @@ MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
}
MaybeObject* raw_result =
@@ -3816,7 +3840,9 @@ MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
*this_handle,
@@ -3896,7 +3922,7 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
} else {
LookupResult result(isolate);
LocalLookup(name, &result);
- if (!result.IsProperty()) return isolate->heap()->true_value();
+ if (!result.IsFound()) return isolate->heap()->true_value();
// Ignore attributes if forcing a deletion.
if (result.IsDontDelete() && mode != FORCE_DELETION) {
if (mode == STRICT_DELETION) {
@@ -3909,7 +3935,7 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
return isolate->heap()->false_value();
}
// Check for interceptor.
- if (result.type() == INTERCEPTOR) {
+ if (result.IsInterceptor()) {
// Skip interceptor if forcing a deletion.
if (mode == FORCE_DELETION) {
return DeletePropertyPostInterceptor(name, mode);
@@ -4036,15 +4062,15 @@ bool JSObject::ReferencesObject(Object* obj) {
if (IsJSFunction()) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
- heap->isolate()->context()->global_context()->
+ heap->isolate()->context()->native_context()->
arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
- // Get the context and don't check if it is the global context.
+ // Get the context and don't check if it is the native context.
JSFunction* f = JSFunction::cast(this);
Context* context = f->context();
- if (context->IsGlobalContext()) {
+ if (context->IsNativeContext()) {
return false;
}
@@ -4120,10 +4146,9 @@ MaybeObject* JSObject::PreventExtensions() {
// Do a map transition, other objects with this map may still
// be extensible.
Map* new_map;
- { MaybeObject* maybe =
- map()->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
- if (!maybe->To<Map>(&new_map)) return maybe;
- }
+ MaybeObject* maybe = map()->Copy();
+ if (!maybe->To(&new_map)) return maybe;
+
new_map->set_is_extensible(false);
set_map(new_map);
ASSERT(!map()->is_extensible());
@@ -4143,27 +4168,31 @@ bool JSReceiver::IsSimpleEnum() {
o = JSObject::cast(o)->GetPrototype()) {
if (!o->IsJSObject()) return false;
JSObject* curr = JSObject::cast(o);
- if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
+ int enum_length = curr->map()->EnumLength();
+ if (enum_length == Map::kInvalidEnumCache) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
- if (curr != this) {
- FixedArray* curr_fixed_array =
- FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
- if (curr_fixed_array->length() > 0) return false;
- }
+ if (curr != this && enum_length != 0) return false;
}
return true;
}
+void Map::SetDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors) {
+ Isolate* isolate = map->GetIsolate();
+ CALL_HEAP_FUNCTION_VOID(isolate, map->SetDescriptors(*descriptors));
+}
+
+
int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
int result = 0;
DescriptorArray* descs = instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
- if (descs->IsProperty(i) && (details.attributes() & filter) == 0) {
+ if ((details.attributes() & filter) == 0) {
result++;
}
}
@@ -4174,9 +4203,7 @@ int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
int Map::PropertyIndexFor(String* name) {
DescriptorArray* descs = instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (name->Equals(descs->GetKey(i)) && !descs->IsNullDescriptor(i)) {
- return descs->GetFieldIndex(i);
- }
+ if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
}
return -1;
}
@@ -4255,20 +4282,20 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSReceiver::cast(current)->LocalLookup(name, result);
- if (result->IsProperty()) return;
+ if (result->IsFound()) return;
}
result->NotFound();
}
// Search object and its prototype chain for callback properties.
-void JSObject::LookupCallback(String* name, LookupResult* result) {
+void JSObject::LookupCallbackProperty(String* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
current != heap->null_value() && current->IsJSObject();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsFound() && result->type() == CALLBACKS) return;
+ if (result->IsPropertyCallbacks()) return;
}
result->NotFound();
}
@@ -4371,7 +4398,7 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
LookupResult result(GetHeap()->isolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty() && result.type() == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
// Note that the result can actually have IsDontDelete() == true when we
// e.g. have to fall back to the slow case while adding a setter after
// successfully reusing a map transition for a getter. Nevertheless, this is
@@ -4380,7 +4407,7 @@ MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
// DefinePropertyAccessor below.
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->CopyWithoutTransitions();
+ return AccessorPair::cast(obj)->Copy();
}
}
return GetHeap()->AllocateAccessorPair();
@@ -4414,9 +4441,9 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
}
AccessorPair* accessors;
- { MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
- }
+ MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
+ if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+
accessors->SetComponents(getter, setter);
return SetPropertyCallback(name, accessors, attributes);
}
@@ -4433,8 +4460,8 @@ bool JSObject::CanSetCallback(String* name) {
// to be overwritten because allowing overwriting could potentially
// cause security problems.
LookupResult callback_result(GetIsolate());
- LookupCallback(name, &callback_result);
- if (callback_result.IsProperty()) {
+ LookupCallbackProperty(name, &callback_result);
+ if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
if (obj->IsAccessorInfo() &&
AccessorInfo::cast(obj)->prohibits_overwriting()) {
@@ -4488,17 +4515,17 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
Object* structure,
PropertyAttributes attributes) {
// Normalize object to make this operation simple.
- { MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+ MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (maybe_ok->IsFailure()) return maybe_ok;
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
if (IsGlobalObject()) {
Map* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- }
+ MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ ASSERT(new_map->is_dictionary_map());
+
set_map(new_map);
// When running crankshaft, changing the map is not enough. We
// need to deoptimize all functions that rely on this global
@@ -4508,9 +4535,8 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
// Update the dictionary with the new CALLBACKS property.
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
- { MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details);
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+ maybe_ok = SetNormalizedProperty(name, structure, details);
+ if (maybe_ok->IsFailure()) return maybe_ok;
return GetHeap()->undefined_value();
}
@@ -4562,111 +4588,32 @@ MaybeObject* JSObject::DefineAccessor(String* name,
}
-static MaybeObject* CreateFreshAccessor(JSObject* obj,
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
- // step 1: create a new getter/setter pair with only the accessor in it
- Heap* heap = obj->GetHeap();
- AccessorPair* accessors2;
- { MaybeObject* maybe_accessors2 = heap->AllocateAccessorPair();
- if (!maybe_accessors2->To(&accessors2)) return maybe_accessors2;
- }
- accessors2->set(component, accessor);
+static MaybeObject* TryAccessorTransition(JSObject* self,
+ Map* transitioned_map,
+ int target_descriptor,
+ AccessorComponent component,
+ Object* accessor,
+ PropertyAttributes attributes) {
+ DescriptorArray* descs = transitioned_map->instance_descriptors();
+ PropertyDetails details = descs->GetDetails(target_descriptor);
- // step 2: create a copy of the descriptors, incl. the new getter/setter pair
- Map* map1 = obj->map();
- CallbacksDescriptor callbacks_descr2(name, accessors2, attributes);
- DescriptorArray* descriptors2;
- { MaybeObject* maybe_descriptors2 =
- map1->instance_descriptors()->CopyInsert(&callbacks_descr2,
- REMOVE_TRANSITIONS);
- if (!maybe_descriptors2->To(&descriptors2)) return maybe_descriptors2;
- }
+ // If the transition target was not callbacks, fall back to the slow case.
+ if (details.type() != CALLBACKS) return self->GetHeap()->null_value();
+ Object* descriptor = descs->GetCallbacksObject(target_descriptor);
+ if (!descriptor->IsAccessorPair()) return self->GetHeap()->null_value();
- // step 3: create a new map with the new descriptors
- Map* map2;
- { MaybeObject* maybe_map2 = map1->CopyDropDescriptors();
- if (!maybe_map2->To(&map2)) return maybe_map2;
- }
- map2->set_instance_descriptors(descriptors2);
+ Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
+ PropertyAttributes target_attributes = details.attributes();
- // step 4: create a new getter/setter pair with a transition to the new map
- AccessorPair* accessors1;
- { MaybeObject* maybe_accessors1 = heap->AllocateAccessorPair();
- if (!maybe_accessors1->To(&accessors1)) return maybe_accessors1;
+ // Reuse transition if adding same accessor with same attributes.
+ if (target_accessor == accessor && target_attributes == attributes) {
+ self->set_map(transitioned_map);
+ return self;
}
- accessors1->set(component, map2);
- // step 5: create a copy of the descriptors, incl. the new getter/setter pair
- // with the transition
- CallbacksDescriptor callbacks_descr1(name, accessors1, attributes);
- DescriptorArray* descriptors1;
- { MaybeObject* maybe_descriptors1 =
- map1->instance_descriptors()->CopyInsert(&callbacks_descr1,
- KEEP_TRANSITIONS);
- if (!maybe_descriptors1->To(&descriptors1)) return maybe_descriptors1;
- }
-
- // step 6: everything went well so far, so we make our changes visible
- obj->set_map(map2);
- map1->set_instance_descriptors(descriptors1);
- map2->SetBackPointer(map1);
- return obj;
-}
-
-
-static bool TransitionToSameAccessor(Object* map,
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes ) {
- DescriptorArray* descs = Map::cast(map)->instance_descriptors();
- int number = descs->SearchWithCache(name);
- ASSERT(number != DescriptorArray::kNotFound);
- Object* target_accessor =
- AccessorPair::cast(descs->GetCallbacksObject(number))->get(component);
- PropertyAttributes target_attributes = descs->GetDetails(number).attributes();
- return target_accessor == accessor && target_attributes == attributes;
-}
-
-
-static MaybeObject* NewCallbackTransition(JSObject* obj,
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes,
- AccessorPair* accessors2) {
- // step 1: copy the old getter/setter pair and set the new accessor
- AccessorPair* accessors3;
- { MaybeObject* maybe_accessors3 = accessors2->CopyWithoutTransitions();
- if (!maybe_accessors3->To(&accessors3)) return maybe_accessors3;
- }
- accessors3->set(component, accessor);
-
- // step 2: create a copy of the descriptors, incl. the new getter/setter pair
- Map* map2 = obj->map();
- CallbacksDescriptor callbacks_descr3(name, accessors3, attributes);
- DescriptorArray* descriptors3;
- { MaybeObject* maybe_descriptors3 =
- map2->instance_descriptors()->CopyInsert(&callbacks_descr3,
- REMOVE_TRANSITIONS);
- if (!maybe_descriptors3->To(&descriptors3)) return maybe_descriptors3;
- }
-
- // step 3: create a new map with the new descriptors
- Map* map3;
- { MaybeObject* maybe_map3 = map2->CopyDropDescriptors();
- if (!maybe_map3->To(&map3)) return maybe_map3;
- }
- map3->set_instance_descriptors(descriptors3);
-
- // step 4: everything went well so far, so we make our changes visible
- obj->set_map(map3);
- accessors2->set(component, map3);
- map3->SetBackPointer(map2);
- return obj;
+ // If either not the same accessor, or not the same attributes, fall back to
+ // the slow case.
+ return self->GetHeap()->null_value();
}
@@ -4678,40 +4625,74 @@ MaybeObject* JSObject::DefineFastAccessor(String* name,
LookupResult result(GetIsolate());
LocalLookup(name, &result);
- // If we have a new property, create a fresh accessor plus a transition to it.
- if (!result.IsFound()) {
- return CreateFreshAccessor(this, name, component, accessor, attributes);
- }
+ if (result.IsFound()
+ && !result.IsPropertyCallbacks()
+ && !result.IsTransition()) return GetHeap()->null_value();
+
+ // Return success if the same accessor with the same attributes already exist.
+ AccessorPair* source_accessors = NULL;
+ if (result.IsPropertyCallbacks()) {
+ Object* callback_value = result.GetCallbackObject();
+ if (callback_value->IsAccessorPair()) {
+ source_accessors = AccessorPair::cast(callback_value);
+ Object* entry = source_accessors->get(component);
+ if (entry == accessor && result.GetAttributes() == attributes) {
+ return this;
+ }
+ } else {
+ return GetHeap()->null_value();
+ }
- // If the property is not a JavaScript accessor, fall back to the slow case.
- if (result.type() != CALLBACKS) return GetHeap()->null_value();
- Object* callback_value = result.GetCallbackObject();
- if (!callback_value->IsAccessorPair()) return GetHeap()->null_value();
- AccessorPair* accessors = AccessorPair::cast(callback_value);
+ int descriptor_number = result.GetDescriptorIndex();
- // Follow a callback transition, if there is a fitting one.
- Object* entry = accessors->get(component);
- if (entry->IsMap() &&
- TransitionToSameAccessor(entry, name, component, accessor, attributes)) {
- set_map(Map::cast(entry));
- return this;
+ map()->LookupTransition(this, name, &result);
+
+ if (result.IsFound()) {
+ Map* target = result.GetTransitionTarget();
+ ASSERT(target->instance_descriptors()->number_of_descriptors() ==
+ map()->instance_descriptors()->number_of_descriptors());
+ ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name);
+ return TryAccessorTransition(
+ this, target, descriptor_number, component, accessor, attributes);
+ }
+ } else {
+ // If not, lookup a transition.
+ map()->LookupTransition(this, name, &result);
+
+ // If there is a transition, try to follow it.
+ if (result.IsFound()) {
+ Map* target = result.GetTransitionTarget();
+ int descriptor_number = target->LastAdded();
+ ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name);
+ return TryAccessorTransition(
+ this, target, descriptor_number, component, accessor, attributes);
+ }
}
- // When we re-add the same accessor again, there is nothing to do.
- if (entry == accessor && result.GetAttributes() == attributes) return this;
+ // If there is no transition yet, add a transition to the a new accessor pair
+ // containing the accessor.
+ AccessorPair* accessors;
+ MaybeObject* maybe_accessors;
- // Only the other accessor has been set so far, create a new transition.
- if (entry->IsTheHole()) {
- return NewCallbackTransition(this,
- name,
- component,
- accessor,
- attributes,
- accessors);
+ // Allocate a new pair if there were no source accessors. Otherwise, copy the
+ // pair and modify the accessor.
+ if (source_accessors != NULL) {
+ maybe_accessors = source_accessors->Copy();
+ } else {
+ maybe_accessors = GetHeap()->AllocateAccessorPair();
}
+ if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+ accessors->set(component, accessor);
- // Nothing from the above worked, so we have to fall back to the slow case.
- return GetHeap()->null_value();
+ CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
+
+ Map* new_map;
+ MaybeObject* maybe_new_map =
+ map()->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ set_map(new_map);
+ return this;
}
@@ -4739,9 +4720,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Try to flatten before operating on the string.
name->TryFlatten();
- if (!CanSetCallback(name)) {
- return isolate->heap()->undefined_value();
- }
+ if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
@@ -4777,23 +4756,22 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
break;
}
- { MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+ MaybeObject* maybe_ok =
+ SetElementCallback(index, info, info->property_attributes());
+ if (maybe_ok->IsFailure()) return maybe_ok;
} else {
// Lookup the name.
LookupResult result(isolate);
LocalLookup(name, &result);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
- if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
+ if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
return isolate->heap()->undefined_value();
}
- { MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+
+ MaybeObject* maybe_ok =
+ SetPropertyCallback(name, info, info->property_attributes());
+ if (maybe_ok->IsFailure()) return maybe_ok;
}
return this;
@@ -4819,9 +4797,9 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
obj != heap->null_value();
- obj = JSObject::cast(obj)->GetPrototype()) {
- JSObject* js_object = JSObject::cast(obj);
- if (js_object->HasDictionaryElements()) {
+ obj = JSReceiver::cast(obj)->GetPrototype()) {
+ if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) {
+ JSObject* js_object = JSObject::cast(obj);
SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
@@ -4836,12 +4814,12 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
} else {
for (Object* obj = this;
obj != heap->null_value();
- obj = JSObject::cast(obj)->GetPrototype()) {
+ obj = JSReceiver::cast(obj)->GetPrototype()) {
LookupResult result(heap->isolate());
- JSObject::cast(obj)->LocalLookup(name, &result);
- if (result.IsProperty()) {
+ JSReceiver::cast(obj)->LocalLookup(name, &result);
+ if (result.IsFound()) {
if (result.IsReadOnly()) return heap->undefined_value();
- if (result.type() == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
return AccessorPair::cast(obj)->GetComponent(component);
@@ -4875,46 +4853,19 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
-MaybeObject* Map::CopyDropDescriptors() {
- Heap* heap = GetHeap();
- Object* result;
- { MaybeObject* maybe_result =
- heap->AllocateMap(instance_type(), instance_size());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Map::cast(result)->set_prototype(prototype());
- Map::cast(result)->set_constructor(constructor());
- // Don't copy descriptors, so map transitions always remain a forest.
- // If we retained the same descriptors we would have two maps
- // pointing to the same transition which is bad because the garbage
- // collector relies on being able to reverse pointers from transitions
- // to maps. If properties need to be retained use CopyDropTransitions.
- Map::cast(result)->clear_instance_descriptors();
- // Please note instance_type and instance_size are set when allocated.
- Map::cast(result)->set_inobject_properties(inobject_properties());
- Map::cast(result)->set_unused_property_fields(unused_property_fields());
-
- // If the map has pre-allocated properties always start out with a descriptor
- // array describing these properties.
- if (pre_allocated_property_fields() > 0) {
- ASSERT(constructor()->IsJSFunction());
- JSFunction* ctor = JSFunction::cast(constructor());
- Object* descriptors;
- { MaybeObject* maybe_descriptors =
- ctor->initial_map()->instance_descriptors()->RemoveTransitions(
- DescriptorArray::MAY_BE_SHARED);
- if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
- }
- Map::cast(result)->set_instance_descriptors(
- DescriptorArray::cast(descriptors));
- Map::cast(result)->set_pre_allocated_property_fields(
- pre_allocated_property_fields());
- }
- Map::cast(result)->set_bit_field(bit_field());
- Map::cast(result)->set_bit_field2(bit_field2());
- Map::cast(result)->set_bit_field3(bit_field3());
- Map::cast(result)->set_is_shared(false);
- Map::cast(result)->ClearCodeCache(heap);
+MaybeObject* Map::RawCopy(int instance_size) {
+ Map* result;
+ MaybeObject* maybe_result =
+ GetHeap()->AllocateMap(instance_type(), instance_size);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ result->set_prototype(prototype());
+ result->set_constructor(constructor());
+ result->set_bit_field(bit_field());
+ result->set_bit_field2(bit_field2());
+ result->set_bit_field3(bit_field3());
+ result->SetNumberOfOwnDescriptors(0);
+ result->SetEnumLength(kInvalidEnumCache);
return result;
}
@@ -4926,29 +4877,21 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
new_instance_size -= inobject_properties() * kPointerSize;
}
- Object* result;
- { MaybeObject* maybe_result =
- GetHeap()->AllocateMap(instance_type(), new_instance_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Map* result;
+ MaybeObject* maybe_result = RawCopy(new_instance_size);
+ if (!maybe_result->To(&result)) return maybe_result;
if (mode != CLEAR_INOBJECT_PROPERTIES) {
- Map::cast(result)->set_inobject_properties(inobject_properties());
+ result->set_inobject_properties(inobject_properties());
}
- Map::cast(result)->set_prototype(prototype());
- Map::cast(result)->set_constructor(constructor());
-
- Map::cast(result)->set_bit_field(bit_field());
- Map::cast(result)->set_bit_field2(bit_field2());
- Map::cast(result)->set_bit_field3(bit_field3());
- Map::cast(result)->set_code_cache(code_cache());
-
- Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+ result->set_code_cache(code_cache());
+ result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+ result->set_dictionary_map(true);
#ifdef DEBUG
- if (FLAG_verify_heap && Map::cast(result)->is_shared()) {
- Map::cast(result)->SharedMapVerify();
+ if (FLAG_verify_heap && result->is_shared()) {
+ result->SharedMapVerify();
}
#endif
@@ -4956,22 +4899,184 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
}
-MaybeObject* Map::CopyDropTransitions(
- DescriptorArray::SharedMode shared_mode) {
- Object* new_map;
- { MaybeObject* maybe_new_map = CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+MaybeObject* Map::CopyDropDescriptors() {
+ Map* result;
+ MaybeObject* maybe_result = RawCopy(instance_size());
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ // Please note instance_type and instance_size are set when allocated.
+ result->set_inobject_properties(inobject_properties());
+ result->set_unused_property_fields(unused_property_fields());
+
+ result->set_pre_allocated_property_fields(pre_allocated_property_fields());
+ result->set_is_shared(false);
+ result->ClearCodeCache(GetHeap());
+ return result;
+}
+
+
+MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
+ String* name,
+ TransitionFlag flag) {
+ Map* result;
+ MaybeObject* maybe_result = CopyDropDescriptors();
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ if (descriptors->number_of_descriptors() != 0) {
+ MaybeObject* maybe_failure = result->SetDescriptors(descriptors);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ result->SetNumberOfOwnDescriptors(descriptors->number_of_descriptors());
}
- Object* descriptors;
- { MaybeObject* maybe_descriptors =
- instance_descriptors()->RemoveTransitions(shared_mode);
- if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
+
+ if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions = AddTransition(name, result);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+
+ set_transitions(transitions);
+ result->SetBackPointer(this);
+ }
+
+ return result;
+}
+
+
+MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
+ // Create a new free-floating map only if we are not allowed to store it.
+ Map* new_map = NULL;
+ MaybeObject* maybe_new_map = Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ new_map->set_elements_kind(kind);
+
+ if (flag == INSERT_TRANSITION) {
+ ASSERT(!HasElementsTransition() ||
+ ((elements_transition_map()->elements_kind() == DICTIONARY_ELEMENTS ||
+ IsExternalArrayElementsKind(
+ elements_transition_map()->elements_kind())) &&
+ (kind == DICTIONARY_ELEMENTS ||
+ IsExternalArrayElementsKind(kind))));
+ ASSERT(!IsFastElementsKind(kind) ||
+ IsMoreGeneralElementsKindTransition(elements_kind(), kind));
+ ASSERT(kind != elements_kind());
+
+ MaybeObject* added_elements = set_elements_transition_map(new_map);
+ if (added_elements->IsFailure()) return added_elements;
+
+ new_map->SetBackPointer(this);
}
- cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
+
return new_map;
}
+MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
+ if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
+
+ // If the map has pre-allocated properties always start out with a descriptor
+ // array describing these properties.
+ ASSERT(constructor()->IsJSFunction());
+ JSFunction* ctor = JSFunction::cast(constructor());
+ Map* map = ctor->initial_map();
+ DescriptorArray* descriptors = map->instance_descriptors();
+
+ return CopyReplaceDescriptors(descriptors, NULL, OMIT_TRANSITION);
+}
+
+
+MaybeObject* Map::Copy() {
+ DescriptorArray* descriptors = instance_descriptors();
+ return CopyReplaceDescriptors(descriptors, NULL, OMIT_TRANSITION);
+}
+
+
+MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
+ TransitionFlag flag) {
+ DescriptorArray* descriptors = instance_descriptors();
+
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ String* key = descriptor->GetKey();
+ ASSERT(descriptors->Search(key) == DescriptorArray::kNotFound);
+
+ int old_size = descriptors->number_of_descriptors();
+ int new_size = old_size + 1;
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ FixedArray::WhitenessWitness witness(new_descriptors);
+
+ // Copy the descriptors, inserting a descriptor.
+ for (int i = 0; i < old_size; ++i) {
+ new_descriptors->CopyFrom(i, descriptors, i, witness);
+ }
+
+ new_descriptors->Append(descriptor, witness, old_size);
+
+ SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
+
+ return CopyReplaceDescriptors(new_descriptors, key, flag);
+}
+
+
+MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
+ TransitionFlag flag) {
+ DescriptorArray* old_descriptors = instance_descriptors();
+
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_result = descriptor->KeyToSymbol();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // We replace the key if it is already present.
+ int index = old_descriptors->SearchWithCache(descriptor->GetKey());
+ if (index != DescriptorArray::kNotFound) {
+ return CopyReplaceDescriptor(descriptor, index, flag);
+ }
+ return CopyAddDescriptor(descriptor, flag);
+}
+
+
+MaybeObject* Map::CopyReplaceDescriptor(Descriptor* descriptor,
+ int insertion_index,
+ TransitionFlag flag) {
+ DescriptorArray* descriptors = instance_descriptors();
+ int size = descriptors->number_of_descriptors();
+ ASSERT(0 <= insertion_index && insertion_index < size);
+
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ String* key = descriptor->GetKey();
+ ASSERT(key == descriptors->GetKey(insertion_index));
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(size);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ FixedArray::WhitenessWitness witness(new_descriptors);
+
+ // Copy the descriptors, replacing a descriptor.
+ for (int index = 0; index < size; ++index) {
+ if (index == insertion_index) continue;
+ new_descriptors->CopyFrom(index, descriptors, index, witness);
+ }
+
+ PropertyDetails original_details = descriptors->GetDetails(insertion_index);
+ descriptor->SetEnumerationIndex(original_details.descriptor_index());
+ descriptor->SetSortedKey(original_details.pointer());
+
+ new_descriptors->Set(insertion_index, descriptor, witness);
+
+ SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
+
+ return CopyReplaceDescriptors(new_descriptors, key, flag);
+}
+
+
void Map::UpdateCodeCache(Handle<Map> map,
Handle<String> name,
Handle<Code> code) {
@@ -4980,6 +5085,7 @@ void Map::UpdateCodeCache(Handle<Map> map,
map->UpdateCodeCache(*name, *code));
}
+
MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
@@ -5028,85 +5134,43 @@ void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
// field of the contens array while it is running.
class IntrusiveMapTransitionIterator {
public:
- explicit IntrusiveMapTransitionIterator(DescriptorArray* descriptor_array)
- : descriptor_array_(descriptor_array) { }
+ explicit IntrusiveMapTransitionIterator(TransitionArray* transition_array)
+ : transition_array_(transition_array) { }
void Start() {
ASSERT(!IsIterating());
- if (descriptor_array_->MayContainTransitions())
- *DescriptorArrayHeader() = Smi::FromInt(0);
+ *TransitionArrayHeader() = Smi::FromInt(0);
}
bool IsIterating() {
- return descriptor_array_->MayContainTransitions() &&
- (*DescriptorArrayHeader())->IsSmi();
+ return (*TransitionArrayHeader())->IsSmi();
}
Map* Next() {
ASSERT(IsIterating());
- // Attention, tricky index manipulation ahead: Two consecutive indices are
- // assigned to each descriptor. Most descriptors directly advance to the
- // next descriptor by adding 2 to the index. The exceptions are the
- // CALLBACKS entries: An even index means we look at its getter, and an odd
- // index means we look at its setter.
- int raw_index = Smi::cast(*DescriptorArrayHeader())->value();
- int index = raw_index / 2;
- int number_of_descriptors = descriptor_array_->number_of_descriptors();
- while (index < number_of_descriptors) {
- PropertyDetails details(descriptor_array_->GetDetails(index));
- switch (details.type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- // We definitely have a map transition.
- *DescriptorArrayHeader() = Smi::FromInt(raw_index + 2);
- return static_cast<Map*>(descriptor_array_->GetValue(index));
- case CALLBACKS: {
- // We might have a map transition in a getter or in a setter.
- AccessorPair* accessors =
- static_cast<AccessorPair*>(descriptor_array_->GetValue(index));
- Object* accessor;
- if ((raw_index & 1) == 0) {
- accessor = accessors->setter();
- } else {
- ++index;
- accessor = accessors->getter();
- }
- ++raw_index;
- if (accessor->IsMap()) {
- *DescriptorArrayHeader() = Smi::FromInt(raw_index);
- return static_cast<Map*>(accessor);
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- // We definitely have no map transition.
- raw_index += 2;
- ++index;
- break;
- }
+ int index = Smi::cast(*TransitionArrayHeader())->value();
+ int number_of_transitions = transition_array_->number_of_transitions();
+ while (index < number_of_transitions) {
+ *TransitionArrayHeader() = Smi::FromInt(index + 1);
+ return transition_array_->GetTarget(index);
}
- if (index == descriptor_array_->number_of_descriptors()) {
- Map* elements_transition = descriptor_array_->elements_transition_map();
- if (elements_transition != NULL) {
- *DescriptorArrayHeader() = Smi::FromInt(raw_index + 2);
- return elements_transition;
- }
+
+ if (index == number_of_transitions &&
+ transition_array_->HasElementsTransition()) {
+ Map* elements_transition = transition_array_->elements_transition();
+ *TransitionArrayHeader() = Smi::FromInt(index + 1);
+ return elements_transition;
}
- *DescriptorArrayHeader() = descriptor_array_->GetHeap()->fixed_array_map();
+ *TransitionArrayHeader() = transition_array_->GetHeap()->fixed_array_map();
return NULL;
}
private:
- Object** DescriptorArrayHeader() {
- return HeapObject::RawField(descriptor_array_, DescriptorArray::kMapOffset);
+ Object** TransitionArrayHeader() {
+ return HeapObject::RawField(transition_array_, TransitionArray::kMapOffset);
}
- DescriptorArray* descriptor_array_;
+ TransitionArray* transition_array_;
};
@@ -5119,11 +5183,11 @@ class IntrusivePrototypeTransitionIterator {
void Start() {
ASSERT(!IsIterating());
- if (HasTransitions()) *Header() = Smi::FromInt(0);
+ *Header() = Smi::FromInt(0);
}
bool IsIterating() {
- return HasTransitions() && (*Header())->IsSmi();
+ return (*Header())->IsSmi();
}
Map* Next() {
@@ -5138,23 +5202,17 @@ class IntrusivePrototypeTransitionIterator {
}
private:
- bool HasTransitions() {
- return proto_trans_->map()->IsSmi() || proto_trans_->IsFixedArray();
- }
-
Object** Header() {
return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
}
int NumberOfTransitions() {
- ASSERT(HasTransitions());
FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
return Smi::cast(num)->value();
}
Map* GetTransition(int transitionNumber) {
- ASSERT(HasTransitions());
FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
}
@@ -5204,43 +5262,43 @@ class TraversableMap : public Map {
return old_parent;
}
- // Can either be Smi (no instance descriptors), or a descriptor array with the
- // header overwritten as a Smi (thus iterating).
- DescriptorArray* MutatedInstanceDescriptors() {
- Object* object =
- *HeapObject::RawField(this, kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- return GetHeap()->empty_descriptor_array();
- } else {
- DescriptorArray* descriptor_array =
- static_cast<DescriptorArray*>(object);
- return descriptor_array;
- }
- }
-
// Start iterating over this map's children, possibly destroying a FixedArray
// map (see explanation above).
void ChildIteratorStart() {
- IntrusiveMapTransitionIterator(instance_descriptors()).Start();
- IntrusivePrototypeTransitionIterator(
- unchecked_prototype_transitions()).Start();
+ if (HasTransitionArray()) {
+ if (HasPrototypeTransitions()) {
+ IntrusivePrototypeTransitionIterator(GetPrototypeTransitions()).Start();
+ }
+
+ IntrusiveMapTransitionIterator(transitions()).Start();
+ }
}
// If we have an unvisited child map, return that one and advance. If we have
// none, return NULL and reset any destroyed FixedArray maps.
TraversableMap* ChildIteratorNext() {
- IntrusivePrototypeTransitionIterator
- proto_iterator(unchecked_prototype_transitions());
- if (proto_iterator.IsIterating()) {
- Map* next = proto_iterator.Next();
- if (next != NULL) return static_cast<TraversableMap*>(next);
+ TransitionArray* transition_array = unchecked_transition_array();
+ if (!transition_array->map()->IsSmi() &&
+ !transition_array->IsTransitionArray()) {
+ return NULL;
+ }
+
+ if (transition_array->HasPrototypeTransitions()) {
+ HeapObject* proto_transitions =
+ transition_array->UncheckedPrototypeTransitions();
+ IntrusivePrototypeTransitionIterator proto_iterator(proto_transitions);
+ if (proto_iterator.IsIterating()) {
+ Map* next = proto_iterator.Next();
+ if (next != NULL) return static_cast<TraversableMap*>(next);
+ }
}
- IntrusiveMapTransitionIterator
- descriptor_iterator(MutatedInstanceDescriptors());
- if (descriptor_iterator.IsIterating()) {
- Map* next = descriptor_iterator.Next();
+
+ IntrusiveMapTransitionIterator transition_iterator(transition_array);
+ if (transition_iterator.IsIterating()) {
+ Map* next = transition_iterator.Next();
if (next != NULL) return static_cast<TraversableMap*>(next);
}
+
return NULL;
}
};
@@ -5271,7 +5329,7 @@ MaybeObject* CodeCache::Update(String* name, Code* code) {
// The number of monomorphic stubs for normal load/store/call IC's can grow to
// a large number and therefore they need to go into a hash table. They are
// used to load global properties from cells.
- if (code->type() == NORMAL) {
+ if (code->type() == Code::NORMAL) {
// Make sure that a hash table is allocated for the normal load code cache.
if (normal_type_cache()->IsUndefined()) {
Object* result;
@@ -5362,7 +5420,7 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
Object* CodeCache::Lookup(String* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == NORMAL) {
+ if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) {
return LookupNormalTypeCache(name, flags);
} else {
return LookupDefaultCache(name, flags);
@@ -5400,7 +5458,7 @@ Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
int CodeCache::GetIndex(Object* name, Code* code) {
- if (code->type() == NORMAL) {
+ if (code->type() == Code::NORMAL) {
if (normal_type_cache()->IsUndefined()) return -1;
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
return cache->GetIndex(String::cast(name), code->flags());
@@ -5416,7 +5474,7 @@ int CodeCache::GetIndex(Object* name, Code* code) {
void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
- if (code->type() == NORMAL) {
+ if (code->type() == Code::NORMAL) {
ASSERT(!normal_type_cache()->IsUndefined());
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
@@ -5734,7 +5792,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
MaybeObject* maybe_result =
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
- if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
+ if (!maybe_result->To(&result)) return maybe_result;
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
@@ -5790,24 +5848,17 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
#endif
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors,
- SharedMode shared_mode) {
+MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
Heap* heap = Isolate::Current()->heap();
// Do not use DescriptorArray::cast on incomplete object.
FixedArray* result;
- if (number_of_descriptors == 0 && shared_mode == MAY_BE_SHARED) {
- return heap->empty_descriptor_array();
- }
+ if (number_of_descriptors == 0) return heap->empty_descriptor_array();
// Allocate the array of keys.
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
- if (!maybe_array->To(&result)) return maybe_array;
- }
+ MaybeObject* maybe_array =
+ heap->AllocateFixedArray(LengthFor(number_of_descriptors));
+ if (!maybe_array->To(&result)) return maybe_array;
- result->set(kBitField3StorageIndex, Smi::FromInt(0));
- result->set(kEnumerationIndexIndex,
- Smi::FromInt(PropertyDetails::kInitialIndex));
- result->set(kTransitionsIndex, Smi::FromInt(0));
+ result->set(kEnumCacheIndex, Smi::FromInt(0));
return result;
}
@@ -5818,9 +5869,9 @@ void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
if (HasEnumCache()) {
- FixedArray::cast(get(kEnumerationIndexIndex))->
+ FixedArray::cast(get(kEnumCacheIndex))->
set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(get(kEnumerationIndexIndex))->
+ FixedArray::cast(get(kEnumCacheIndex))->
set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
} else {
if (IsEmpty()) return; // Do nothing for empty descriptor array.
@@ -5828,197 +5879,49 @@ void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
set(kEnumCacheBridgeCacheIndex, new_cache);
FixedArray::cast(bridge_storage)->
set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- NoWriteBarrierSet(FixedArray::cast(bridge_storage),
- kEnumCacheBridgeEnumIndex,
- get(kEnumerationIndexIndex));
- set(kEnumerationIndexIndex, bridge_storage);
+ set(kEnumCacheIndex, bridge_storage);
}
}
-static bool InsertionPointFound(String* key1, String* key2) {
- return key1->Hash() > key2->Hash() || key1 == key2;
-}
-
-
-void DescriptorArray::CopyFrom(Handle<DescriptorArray> dst,
- int dst_index,
- Handle<DescriptorArray> src,
+void DescriptorArray::CopyFrom(int dst_index,
+ DescriptorArray* src,
int src_index,
const WhitenessWitness& witness) {
- CALL_HEAP_FUNCTION_VOID(dst->GetIsolate(),
- dst->CopyFrom(dst_index, *src, src_index, witness));
-}
-
-
-MaybeObject* DescriptorArray::CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness& witness) {
Object* value = src->GetValue(src_index);
PropertyDetails details = src->GetDetails(src_index);
- if (details.type() == CALLBACKS && value->IsAccessorPair()) {
- MaybeObject* maybe_copy =
- AccessorPair::cast(value)->CopyWithoutTransitions();
- if (!maybe_copy->To(&value)) return maybe_copy;
- }
Descriptor desc(src->GetKey(src_index), value, details);
Set(dst_index, &desc, witness);
- return this;
}
-MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
- TransitionFlag transition_flag) {
- // Transitions are only kept when inserting another transition.
- // This precondition is not required by this function's implementation, but
- // is currently required by the semantics of maps, so we check it.
- // Conversely, we filter after replacing, so replacing a transition and
- // removing all other transitions is not supported.
- bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
- ASSERT(remove_transitions == !descriptor->ContainsTransition());
- ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
-
- // Ensure the key is a symbol.
- { MaybeObject* maybe_result = descriptor->KeyToSymbol();
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- int new_size = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsNullDescriptor(i)) continue;
- if (remove_transitions && IsTransitionOnly(i)) continue;
- new_size++;
- }
-
- // If key is in descriptor, we replace it in-place when filtering.
- // Count a null descriptor for key as inserted, not replaced.
- int index = Search(descriptor->GetKey());
- const bool replacing = (index != kNotFound);
- bool keep_enumeration_index = false;
- if (!replacing) {
- ++new_size;
- } else if (!IsTransitionOnly(index)) {
- // We are replacing an existing descriptor. We keep the enumeration index
- // of a visible property.
- keep_enumeration_index = true;
- } else if (remove_transitions) {
- // Replaced descriptor has been counted as removed if it is a transition
- // that will be replaced. Adjust count in this case.
- ++new_size;
- }
-
- DescriptorArray* new_descriptors;
- { SharedMode mode = remove_transitions ? MAY_BE_SHARED : CANNOT_BE_SHARED;
- MaybeObject* maybe_result = Allocate(new_size, mode);
- if (!maybe_result->To(&new_descriptors)) return maybe_result;
- }
-
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- // Set the enumeration index in the descriptors and set the enumeration index
- // in the result.
- int enumeration_index = NextEnumerationIndex();
- if (!descriptor->ContainsTransition()) {
- if (keep_enumeration_index) {
- descriptor->SetEnumerationIndex(GetDetails(index).index());
- } else {
- descriptor->SetEnumerationIndex(enumeration_index);
- ++enumeration_index;
- }
- }
- Map* old_elements_transition = elements_transition_map();
- if ((!remove_transitions) && (old_elements_transition != NULL)) {
- new_descriptors->set_elements_transition_map(old_elements_transition);
- }
- new_descriptors->SetNextEnumerationIndex(enumeration_index);
-
- // Copy the descriptors, filtering out transitions and null descriptors,
- // and inserting or replacing a descriptor.
- int to_index = 0;
- int insertion_index = -1;
- int from_index = 0;
- while (from_index < number_of_descriptors()) {
- if (insertion_index < 0 &&
- InsertionPointFound(GetKey(from_index), descriptor->GetKey())) {
- insertion_index = to_index++;
- if (replacing) from_index++;
- } else {
- if (!(IsNullDescriptor(from_index) ||
- (remove_transitions && IsTransitionOnly(from_index)))) {
- MaybeObject* copy_result =
- new_descriptors->CopyFrom(to_index++, this, from_index, witness);
- if (copy_result->IsFailure()) return copy_result;
- }
- from_index++;
- }
- }
- if (insertion_index < 0) insertion_index = to_index++;
-
- ASSERT(insertion_index < new_descriptors->number_of_descriptors());
- new_descriptors->Set(insertion_index, descriptor, witness);
-
- ASSERT(to_index == new_descriptors->number_of_descriptors());
- SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
-
- return new_descriptors;
-}
-
-
-MaybeObject* DescriptorArray::RemoveTransitions(SharedMode shared_mode) {
- // Allocate the new descriptor array.
- int new_number_of_descriptors = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsProperty(i)) new_number_of_descriptors++;
- }
- DescriptorArray* new_descriptors;
- { MaybeObject* maybe_result = Allocate(new_number_of_descriptors,
- shared_mode);
- if (!maybe_result->To(&new_descriptors)) return maybe_result;
- }
-
- // Copy the content.
- DescriptorArray::WhitenessWitness witness(new_descriptors);
- int next_descriptor = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsProperty(i)) {
- MaybeObject* copy_result =
- new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
- if (copy_result->IsFailure()) return copy_result;
- }
- }
- ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
- new_descriptors->SetNextEnumerationIndex(NextEnumerationIndex());
-
- return new_descriptors;
-}
-
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
// candidate. This would result in missing updates upon evacuation.
-void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
+void DescriptorArray::Sort() {
// In-place heap sort.
int len = number_of_descriptors();
-
+ // Reset sorting since the descriptor array might contain invalid pointers.
+ for (int i = 0; i < len; ++i) SetSortedKey(i, i);
// Bottom-up max-heap construction.
// Index of the last node with children
const int max_parent_index = (len / 2) - 1;
for (int i = max_parent_index; i >= 0; --i) {
int parent_index = i;
- const uint32_t parent_hash = GetKey(i)->Hash();
+ const uint32_t parent_hash = GetSortedKey(i)->Hash();
while (parent_index <= max_parent_index) {
int child_index = 2 * parent_index + 1;
- uint32_t child_hash = GetKey(child_index)->Hash();
+ uint32_t child_hash = GetSortedKey(child_index)->Hash();
if (child_index + 1 < len) {
- uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+ uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
if (right_child_hash > child_hash) {
child_index++;
child_hash = right_child_hash;
}
}
if (child_hash <= parent_hash) break;
- NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
+ SwapSortedKeys(parent_index, child_index);
// Now element at child_index could be < its children.
parent_index = child_index; // parent_hash remains correct.
}
@@ -6027,83 +5930,37 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
// Extract elements and create sorted array.
for (int i = len - 1; i > 0; --i) {
// Put max element at the back of the array.
- NoIncrementalWriteBarrierSwapDescriptors(0, i);
+ SwapSortedKeys(0, i);
// Shift down the new top element.
int parent_index = 0;
- const uint32_t parent_hash = GetKey(parent_index)->Hash();
+ const uint32_t parent_hash = GetSortedKey(parent_index)->Hash();
const int max_parent_index = (i / 2) - 1;
while (parent_index <= max_parent_index) {
int child_index = parent_index * 2 + 1;
- uint32_t child_hash = GetKey(child_index)->Hash();
+ uint32_t child_hash = GetSortedKey(child_index)->Hash();
if (child_index + 1 < i) {
- uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+ uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
if (right_child_hash > child_hash) {
child_index++;
child_hash = right_child_hash;
}
}
if (child_hash <= parent_hash) break;
- NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
+ SwapSortedKeys(parent_index, child_index);
parent_index = child_index;
}
}
}
-void DescriptorArray::Sort(const WhitenessWitness& witness) {
- SortUnchecked(witness);
- SLOW_ASSERT(IsSortedNoDuplicates());
-}
-
-
-int DescriptorArray::BinarySearch(String* name, int low, int high) {
- uint32_t hash = name->Hash();
- int limit = high;
-
- ASSERT(low <= high);
-
- while (low != high) {
- int mid = (low + high) / 2;
- String* mid_name = GetKey(mid);
- uint32_t mid_hash = mid_name->Hash();
-
- if (mid_hash >= hash) {
- high = mid;
- } else {
- low = mid + 1;
- }
- }
-
- for (; low <= limit && GetKey(low)->Hash() == hash; ++low) {
- if (GetKey(low)->Equals(name) && !IsNullDescriptor(low))
- return low;
- }
-
- return kNotFound;
-}
-
-
-int DescriptorArray::LinearSearch(SearchMode mode, String* name, int len) {
- uint32_t hash = name->Hash();
- for (int number = 0; number < len; number++) {
- String* entry = GetKey(number);
- if (mode == EXPECT_SORTED && entry->Hash() > hash) break;
- if (name->Equals(entry) && !IsNullDescriptor(number)) {
- return number;
- }
- }
- return kNotFound;
-}
-
-
-MaybeObject* AccessorPair::CopyWithoutTransitions() {
+MaybeObject* AccessorPair::Copy() {
Heap* heap = GetHeap();
AccessorPair* copy;
- { MaybeObject* maybe_copy = heap->AllocateAccessorPair();
- if (!maybe_copy->To(&copy)) return maybe_copy;
- }
- copy->set_getter(getter()->IsMap() ? heap->the_hole_value() : getter());
- copy->set_setter(setter()->IsMap() ? heap->the_hole_value() : setter());
+ MaybeObject* maybe_copy = heap->AllocateAccessorPair();
+ if (!maybe_copy->To(&copy)) return maybe_copy;
+
+ copy->set_getter(getter());
+ copy->set_setter(setter());
return copy;
}
@@ -7349,70 +7206,59 @@ static bool ClearBackPointer(Heap* heap, Object* target) {
}
+// TODO(mstarzinger): This method should be moved into MarkCompactCollector,
+// because it cannot be called from outside the GC and we already have methods
+// depending on the transitions layout in the GC anyways.
void Map::ClearNonLiveTransitions(Heap* heap) {
- DescriptorArray* d = DescriptorArray::cast(
- *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
- if (d->IsEmpty()) return;
- Smi* NullDescriptorDetails =
- PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
- for (int i = 0; i < d->number_of_descriptors(); ++i) {
- // If the pair (value, details) is a map transition, check if the target is
- // live. If not, null the descriptor. Also drop the back pointer for that
- // map transition, so that this map is not reached again by following a back
- // pointer from that non-live map.
- bool keep_entry = false;
- PropertyDetails details(d->GetDetails(i));
- switch (details.type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- keep_entry = !ClearBackPointer(heap, d->GetValue(i));
- break;
- case CALLBACKS: {
- Object* object = d->GetValue(i);
- if (object->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(object);
- Object* getter = accessors->getter();
- if (getter->IsMap()) {
- if (ClearBackPointer(heap, getter)) {
- accessors->set_getter(heap->the_hole_value());
- } else {
- keep_entry = true;
- }
- } else if (!getter->IsTheHole()) {
- keep_entry = true;
- }
- Object* setter = accessors->setter();
- if (setter->IsMap()) {
- if (ClearBackPointer(heap, setter)) {
- accessors->set_setter(heap->the_hole_value());
- } else {
- keep_entry = true;
- }
- } else if (!setter->IsTheHole()) {
- keep_entry = true;
- }
- } else {
- keep_entry = true;
- }
- break;
+ // If there are no transitions to be cleared, return.
+ // TODO(verwaest) Should be an assert, otherwise back pointers are not
+ // properly cleared.
+ if (!HasTransitionArray()) return;
+
+ TransitionArray* t = transitions();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+
+ int transition_index = 0;
+
+ // Compact all live descriptors to the left.
+ for (int i = 0; i < t->number_of_transitions(); ++i) {
+ if (!ClearBackPointer(heap, t->GetTarget(i))) {
+ if (i != transition_index) {
+ String* key = t->GetKey(i);
+ t->SetKey(transition_index, key);
+ Object** key_slot = t->GetKeySlot(transition_index);
+ collector->RecordSlot(key_slot, key_slot, key);
+ // Target slots do not need to be recorded since maps are not compacted.
+ t->SetTarget(transition_index, t->GetTarget(i));
}
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- keep_entry = true;
- break;
- }
- // Make sure that an entry containing only dead transitions gets collected.
- // What we *really* want to do here is removing this entry completely, but
- // for technical reasons we can't do this, so we zero it out instead.
- if (!keep_entry) {
- d->SetDetailsUnchecked(i, NullDescriptorDetails);
- d->SetNullValueUnchecked(i, heap);
+ transition_index++;
}
}
+
+ if (t->HasElementsTransition() &&
+ ClearBackPointer(heap, t->elements_transition())) {
+ t->ClearElementsTransition();
+ } else {
+ // If there are no transitions to be cleared, return.
+ // TODO(verwaest) Should be an assert, otherwise back pointers are not
+ // properly cleared.
+ if (transition_index == t->number_of_transitions()) return;
+ }
+
+ // If the final transition array does not contain any live transitions, remove
+ // the transition array from the map.
+ if (transition_index == 0 &&
+ !t->HasElementsTransition() &&
+ !t->HasPrototypeTransitions() &&
+ t->descriptors()->IsEmpty()) {
+ return ClearTransitions(heap);
+ }
+
+ int trim = t->number_of_transitions() - transition_index;
+ if (trim > 0) {
+ RightTrimFixedArray<FROM_GC>(
+ heap, t, trim * TransitionArray::kTransitionSize);
+ }
}
@@ -7445,8 +7291,7 @@ bool Map::EquivalentToForNormalization(Map* other,
instance_type() == other->instance_type() &&
bit_field() == other->bit_field() &&
bit_field2() == other->bit_field2() &&
- (bit_field3() & ~(1<<Map::kIsShared)) ==
- (other->bit_field3() & ~(1<<Map::kIsShared));
+ function_with_prototype() == other->function_with_prototype();
}
@@ -7467,13 +7312,19 @@ void JSFunction::MarkForLazyRecompilation() {
ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
}
+void JSFunction::MarkForParallelRecompilation() {
+ ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
+ Builtins* builtins = GetIsolate()->builtins();
+ ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
-bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- return shared->is_compiled() || CompileLazy(shared, flag);
+ // Unlike MarkForLazyRecompilation, after queuing a function for
+ // recompilation on the compiler thread, we actually tail-call into
+ // the full code. We reset the profiler ticks here so that the
+ // function doesn't bother the runtime profiler too much.
+ shared()->code()->set_profiler_ticks(0);
}
-
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
@@ -7490,11 +7341,77 @@ static bool CompileLazyHelper(CompilationInfo* info,
bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
- CompilationInfo info(shared);
+ ASSERT(shared->allows_lazy_compilation_without_context());
+ CompilationInfoWithZone info(shared);
return CompileLazyHelper(&info, flag);
}
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
+
+void SharedFunctionInfo::AddToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<Code> code,
+ Handle<FixedArray> literals) {
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(native_context->IsNativeContext());
+ STATIC_ASSERT(kEntryLength == 3);
+ Object* value = shared->optimized_code_map();
+ Handle<FixedArray> new_code_map;
+ if (value->IsSmi()) {
+ // No optimized code map.
+ ASSERT_EQ(0, Smi::cast(value)->value());
+ // Crate 3 entries per context {context, code, literals}.
+ new_code_map = FACTORY->NewFixedArray(kEntryLength);
+ new_code_map->set(0, *native_context);
+ new_code_map->set(1, *code);
+ new_code_map->set(2, *literals);
+ } else {
+ // Copy old map and append one new entry.
+ Handle<FixedArray> old_code_map(FixedArray::cast(value));
+ ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context));
+ int old_length = old_code_map->length();
+ int new_length = old_length + kEntryLength;
+ new_code_map = FACTORY->NewFixedArray(new_length);
+ old_code_map->CopyTo(0, *new_code_map, 0, old_length);
+ new_code_map->set(old_length, *native_context);
+ new_code_map->set(old_length + 1, *code);
+ new_code_map->set(old_length + 2, *literals);
+ }
+#ifdef DEBUG
+ for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+ ASSERT(new_code_map->get(i)->IsNativeContext());
+ ASSERT(new_code_map->get(i + 1)->IsCode());
+ ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+ Code::OPTIMIZED_FUNCTION);
+ ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+ }
+#endif
+ shared->set_optimized_code_map(*new_code_map);
+}
+
+
+void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
+ int index) {
+ ASSERT(index > 0);
+ ASSERT(optimized_code_map()->IsFixedArray());
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ if (!bound()) {
+ FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
+ ASSERT(cached_literals != NULL);
+ function->set_literals(cached_literals);
+ }
+ Code* code = Code::cast(code_map->get(index));
+ ASSERT(code != NULL);
+ ASSERT(function->context()->native_context() == code_map->get(index - 1));
+ function->ReplaceCode(code);
+}
+
+
bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
@@ -7502,7 +7419,8 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
} else {
- CompilationInfo info(function);
+ ASSERT(function->shared()->allows_lazy_compilation());
+ CompilationInfoWithZone info(function);
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
}
@@ -7511,14 +7429,20 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
+ BailoutId osr_ast_id,
ClearExceptionFlag flag) {
- CompilationInfo info(function);
+ CompilationInfoWithZone info(function);
info.SetOptimizing(osr_ast_id);
return CompileLazyHelper(&info, flag);
}
+bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag) {
+ return function->is_compiled() || CompileLazy(function, flag);
+}
+
+
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
@@ -7564,12 +7488,10 @@ MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
// If the function has allocated the initial map
// replace it with a copy containing the new prototype.
Map* new_map;
- MaybeObject* maybe_new_map =
- initial_map()->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
+ MaybeObject* maybe_new_map = initial_map()->Copy();
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
new_map->set_prototype(value);
- MaybeObject* maybe_object =
- set_initial_map_and_cache_transitions(new_map);
+ MaybeObject* maybe_object = set_initial_map_and_cache_transitions(new_map);
if (maybe_object->IsFailure()) return maybe_object;
} else {
// Put the value in the initial map field until an initial map is
@@ -7595,16 +7517,15 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
// Remove map transitions because they point to maps with a
// different prototype.
Map* new_map;
- { MaybeObject* maybe_new_map =
- map()->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- }
+ MaybeObject* maybe_new_map = map()->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
Heap* heap = new_map->GetHeap();
set_map(new_map);
new_map->set_constructor(value);
new_map->set_non_instance_prototype(true);
construct_prototype =
- heap->isolate()->context()->global_context()->
+ heap->isolate()->context()->native_context()->
initial_object_prototype();
} else {
map()->set_non_instance_prototype(false);
@@ -7614,30 +7535,25 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
}
-Object* JSFunction::RemovePrototype() {
- Context* global_context = context()->global_context();
+void JSFunction::RemovePrototype() {
+ Context* native_context = context()->native_context();
Map* no_prototype_map = shared()->is_classic_mode()
- ? global_context->function_without_prototype_map()
- : global_context->strict_mode_function_without_prototype_map();
+ ? native_context->function_without_prototype_map()
+ : native_context->strict_mode_function_without_prototype_map();
- if (map() == no_prototype_map) {
- // Be idempotent.
- return this;
- }
+ if (map() == no_prototype_map) return;
ASSERT(map() == (shared()->is_classic_mode()
- ? global_context->function_map()
- : global_context->strict_mode_function_map()));
+ ? native_context->function_map()
+ : native_context->strict_mode_function_map()));
set_map(no_prototype_map);
set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
- return this;
}
-Object* JSFunction::SetInstanceClassName(String* name) {
+void JSFunction::SetInstanceClassName(String* name) {
shared()->set_instance_class_name(name);
- return this;
}
@@ -7647,8 +7563,8 @@ void JSFunction::PrintName(FILE* out) {
}
-Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
- return Context::cast(literals->get(JSFunction::kLiteralGlobalContextIndex));
+Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
+ return Context::cast(literals->get(JSFunction::kLiteralNativeContextIndex));
}
@@ -7716,26 +7632,33 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
return false;
}
- // If the prototype is null inline constructors cause no problems.
- if (!prototype->IsJSObject()) {
- ASSERT(prototype->IsNull());
- return true;
- }
-
Heap* heap = GetHeap();
- // Traverse the proposed prototype chain looking for setters for properties of
- // the same names as are set by the inline constructor.
+ // Traverse the proposed prototype chain looking for properties of the
+ // same names as are set by the inline constructor.
for (Object* obj = prototype;
obj != heap->null_value();
obj = obj->GetPrototype()) {
- JSObject* js_object = JSObject::cast(obj);
+ JSReceiver* receiver = JSReceiver::cast(obj);
for (int i = 0; i < this_property_assignments_count(); i++) {
LookupResult result(heap->isolate());
String* name = GetThisPropertyAssignmentName(i);
- js_object->LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound() && result.type() == CALLBACKS) {
- return false;
+ receiver->LocalLookup(name, &result);
+ if (result.IsFound()) {
+ switch (result.type()) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ break;
+ case INTERCEPTOR:
+ case CALLBACKS:
+ case HANDLER:
+ return false;
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
}
}
}
@@ -7880,7 +7803,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
}
-void SharedFunctionInfo::DisableOptimization() {
+void SharedFunctionInfo::DisableOptimization(const char* reason) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
@@ -7896,13 +7819,14 @@ void SharedFunctionInfo::DisableOptimization() {
code()->set_optimizable(false);
}
if (FLAG_trace_opt) {
- PrintF("[disabled optimization for %s]\n", *DebugName()->ToCString());
+ PrintF("[disabled optimization for %s, reason: %s]\n",
+ *DebugName()->ToCString(), reason);
}
}
-bool SharedFunctionInfo::VerifyBailoutId(int id) {
- ASSERT(id != AstNode::kNoNumber);
+bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
+ ASSERT(!id.IsNone());
Code* unoptimized = code();
DeoptimizationOutputData* data =
DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
@@ -7985,7 +7909,7 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
if (code()->kind() == Code::FUNCTION) {
code()->set_profiler_ticks(0);
if (optimization_disabled() &&
- opt_count() >= Compiler::kDefaultMaxOptCount) {
+ opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
set_optimization_disabled(false);
code()->set_optimizable(true);
@@ -8039,9 +7963,20 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
}
-void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
- v->VisitSharedFunctionInfo(this);
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
+ ASSERT(native_context->IsNativeContext());
+ if (!FLAG_cache_optimized_code) return -1;
+ Object* value = optimized_code_map();
+ if (!value->IsSmi()) {
+ FixedArray* optimized_code_map = FixedArray::cast(value);
+ int length = optimized_code_map->length();
+ for (int i = 0; i < length; i += 3) {
+ if (optimized_code_map->get(i) == native_context) {
+ return i + 1;
+ }
+ }
+ }
+ return -1;
}
@@ -8269,7 +8204,6 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- ASSERT(type_feedback_cells->AstId(i)->IsSmi());
JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
}
@@ -8296,7 +8230,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
for (int i = 0; i < deopt_count; i++) {
PrintF(out, "%6d %6d %6d %6d",
i,
- AstId(i)->value(),
+ AstId(i).ToInt(),
ArgumentsStackHeight(i)->value(),
Pc(i)->value());
@@ -8330,11 +8264,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
case Translation::JS_FRAME: {
int ast_id = iterator.Next();
int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
unsigned height = iterator.Next();
PrintF(out, "{ast_id=%d, function=", ast_id);
- function->PrintName(out);
+ if (function_id != Translation::kSelfLiteralId) {
+ Object* function = LiteralArray()->get(function_id);
+ JSFunction::cast(function)->PrintName(out);
+ } else {
+ PrintF(out, "<self>");
+ }
PrintF(out, ", height=%u}", height);
break;
}
@@ -8351,6 +8288,17 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME: {
+ int function_id = iterator.Next();
+ JSFunction* function =
+ JSFunction::cast(LiteralArray()->get(function_id));
+ PrintF(out, "{function=");
+ function->PrintName(out);
+ PrintF(out, "}");
+ break;
+ }
+
case Translation::DUPLICATE:
break;
@@ -8366,6 +8314,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
+ case Translation::UINT32_REGISTER: {
+ int reg_code = iterator.Next();
+ PrintF(out,
+ "{input=%s (unsigned)}",
+ converter.NameOfCPURegister(reg_code));
+ break;
+ }
+
case Translation::DOUBLE_REGISTER: {
int reg_code = iterator.Next();
PrintF(out, "{input=%s}",
@@ -8385,6 +8341,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ PrintF(out, "{input=%d (unsigned)}", input_slot_index);
+ break;
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator.Next();
PrintF(out, "{input=%d}", input_slot_index);
@@ -8415,7 +8377,7 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
for (int i = 0; i < this->DeoptPoints(); i++) {
int pc_and_state = this->PcAndState(i)->value();
PrintF("%6d %8d %s\n",
- this->AstId(i)->value(),
+ this->AstId(i).ToInt(),
FullCodeGenerator::PcField::decode(pc_and_state),
FullCodeGenerator::State2String(
FullCodeGenerator::StateField::decode(pc_and_state)));
@@ -8461,17 +8423,15 @@ const char* Code::ICState2String(InlineCacheState state) {
}
-const char* Code::PropertyType2String(PropertyType type) {
+const char* Code::StubType2String(StubType type) {
switch (type) {
case NORMAL: return "NORMAL";
case FIELD: return "FIELD";
case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
case CALLBACKS: return "CALLBACKS";
- case HANDLER: return "HANDLER";
case INTERCEPTOR: return "INTERCEPTOR";
case MAP_TRANSITION: return "MAP_TRANSITION";
- case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
- case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
+ case NONEXISTENT: return "NONEXISTENT";
}
UNREACHABLE(); // keep the compiler happy
return NULL;
@@ -8509,7 +8469,7 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
PrintExtraICState(out, kind(), extra_ic_state());
if (ic_state() == MONOMORPHIC) {
- PrintF(out, "type = %s\n", PropertyType2String(type()));
+ PrintF(out, "type = %s\n", StubType2String(type()));
}
if (is_call_stub() || is_keyed_call_stub()) {
PrintF(out, "argc = %d\n", arguments_count());
@@ -8565,6 +8525,8 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "\n");
}
PrintF(out, "\n");
+ // Just print if type feedback info is ever used for optimized code.
+ ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
@@ -8581,6 +8543,12 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF(out, "\n");
}
+#ifdef OBJECT_PRINT
+ if (!type_feedback_info()->IsUndefined()) {
+ TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(out);
+ PrintF(out, "\n");
+ }
+#endif
}
PrintF("RelocInfo (size = %d)\n", relocation_size());
@@ -8740,7 +8708,7 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
Map* Map::GetPrototypeTransition(Object* prototype) {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
int number_of_transitions = NumberOfProtoTransitions();
const int proto_offset =
kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
@@ -8762,7 +8730,7 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
// Don't cache prototype transition if this map is shared.
if (is_shared() || !FLAG_cache_prototype_transitions) return this;
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
const int step = kProtoTransitionElementsPerEntry;
const int header = kProtoTransitionHeaderSize;
@@ -8785,7 +8753,8 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
new_cache->set(i + header, cache->get(i + header));
}
cache = new_cache;
- set_prototype_transitions(cache);
+ MaybeObject* set_result = SetPrototypeTransitions(cache);
+ if (set_result->IsFailure()) return set_result;
}
int last = transitions - 1;
@@ -8864,15 +8833,12 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
Map* new_map = map->GetPrototypeTransition(value);
if (new_map == NULL) {
- { MaybeObject* maybe_new_map =
- map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- }
+ MaybeObject* maybe_new_map = map->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- { MaybeObject* maybe_new_cache =
- map->PutPrototypeTransition(value, new_map);
- if (maybe_new_cache->IsFailure()) return maybe_new_cache;
- }
+ MaybeObject* maybe_new_cache =
+ map->PutPrototypeTransition(value, new_map);
+ if (maybe_new_cache->IsFailure()) return maybe_new_cache;
new_map->set_prototype(value);
}
@@ -9152,6 +9118,7 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Handle<AccessorInfo> data(AccessorInfo::cast(structure));
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
Handle<JSObject> holder_handle(JSObject::cast(holder));
@@ -9168,7 +9135,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return isolate->heap()->undefined_value();
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
// __defineGetter__ callback
@@ -9432,7 +9401,8 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
// is read-only (a declared const that has not been initialized). If a
// value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(attributes, NORMAL, details.index());
+ details = PropertyDetails(
+ attributes, NORMAL, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
@@ -9722,8 +9692,9 @@ MaybeObject* JSObject::SetElement(uint32_t index,
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
Isolate* isolate = GetHeap()->isolate();
+ Handle<Object> receiver(this);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { Handle<Object>(this), number };
+ Handle<Object> args[] = { receiver, number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
@@ -9985,7 +9956,11 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
result = getter(index, info);
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ if (!result.IsEmpty()) {
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
+ }
}
Heap* heap = holder_handle->GetHeap();
@@ -10235,7 +10210,7 @@ MaybeObject* JSObject::GetPropertyPostInterceptor(
// Check local property in holder, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
return GetProperty(receiver, &result, name, attributes);
}
// Continue searching via the prototype chain.
@@ -10253,7 +10228,7 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
// Check local property in holder, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
return GetProperty(receiver, &result, name, attributes);
}
return GetHeap()->undefined_value();
@@ -10287,7 +10262,9 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
*attributes = NONE;
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
}
@@ -10312,7 +10289,7 @@ bool JSObject::HasRealNamedProperty(String* key) {
LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
- return result.IsProperty() && (result.type() != INTERCEPTOR);
+ return result.IsFound() && !result.IsInterceptor();
}
@@ -10392,7 +10369,7 @@ bool JSObject::HasRealNamedCallbackProperty(String* key) {
LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
- return result.IsFound() && (result.type() == CALLBACKS);
+ return result.IsPropertyCallbacks();
}
@@ -10523,10 +10500,10 @@ void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
ASSERT(storage->length() >= (NumberOfLocalProperties() - index));
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
+ ASSERT(storage->length() >= index + descs->number_of_descriptors());
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i)) storage->set(index++, descs->GetKey(i));
+ storage->set(index + i, descs->GetKey(i));
}
- ASSERT(storage->length() >= index);
} else {
property_dictionary()->CopyKeysTo(storage,
index,
@@ -11096,8 +11073,12 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
+ MinimumCapacity capacity_option,
PretenureFlag pretenure) {
- int capacity = ComputeCapacity(at_least_space_for);
+ ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for));
+ int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
+ ? at_least_space_for
+ : ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
return Failure::OutOfMemoryException();
}
@@ -11154,30 +11135,6 @@ int StringDictionary::FindEntry(String* key) {
}
-bool StringDictionary::ContainsTransition(int entry) {
- switch (DetailsAt(entry).type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- return true;
- case CALLBACKS: {
- Object* value = ValueAt(entry);
- if (!value->IsAccessorPair()) return false;
- AccessorPair* accessors = AccessorPair::cast(value);
- return accessors->getter()->IsMap() || accessors->setter()->IsMap();
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // Keep the compiler happy.
- return false;
-}
-
-
template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
ASSERT(NumberOfElements() < new_table->Capacity());
@@ -11230,7 +11187,9 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
(capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
- Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
+ Allocate(nof * 2,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -11259,7 +11218,9 @@ MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
!GetHeap()->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
- Allocate(at_least_room_for, pretenure ? TENURED : NOT_TENURED);
+ Allocate(at_least_room_for,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -11664,7 +11625,7 @@ Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
clamped_value = 255;
} else {
// Other doubles are rounded to the nearest integer.
- clamped_value = static_cast<uint8_t>(double_value + 0.5);
+ clamped_value = static_cast<uint8_t>(lrint(double_value));
}
} else {
// Clamp undefined to zero (default). All other types have been
@@ -12004,8 +11965,23 @@ MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
}
-Object* CompilationCacheTable::Lookup(String* src) {
- StringKey key(src);
+// The key for the script compilation cache is dependent on the mode flags,
+// because they change the global language mode and thus binding behaviour.
+// If flags change at some point, we must ensure that we do not hit the cache
+// for code compiled with different settings.
+static LanguageMode CurrentGlobalLanguageMode() {
+ return FLAG_use_strict
+ ? (FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE)
+ : CLASSIC_MODE;
+}
+
+
+Object* CompilationCacheTable::Lookup(String* src, Context* context) {
+ SharedFunctionInfo* shared = context->closure()->shared();
+ StringSharedKey key(src,
+ shared,
+ CurrentGlobalLanguageMode(),
+ RelocInfo::kNoPosition);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -12035,17 +12011,24 @@ Object* CompilationCacheTable::LookupRegExp(String* src,
}
-MaybeObject* CompilationCacheTable::Put(String* src, Object* value) {
- StringKey key(src);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+MaybeObject* CompilationCacheTable::Put(String* src,
+ Context* context,
+ Object* value) {
+ SharedFunctionInfo* shared = context->closure()->shared();
+ StringSharedKey key(src,
+ shared,
+ CurrentGlobalLanguageMode(),
+ RelocInfo::kNoPosition);
+ CompilationCacheTable* cache;
+ MaybeObject* maybe_cache = EnsureCapacity(1, &key);
+ if (!maybe_cache->To(&cache)) return maybe_cache;
+
+ Object* k;
+ MaybeObject* maybe_k = key.AsObject();
+ if (!maybe_k->To(&k)) return maybe_k;
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), src);
+ cache->set(EntryToIndex(entry), k);
cache->set(EntryToIndex(entry) + 1, value);
cache->ElementAdded();
return cache;
@@ -12189,6 +12172,12 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
}
+void StringDictionary::DoGenerateNewEnumerationIndices(
+ Handle<StringDictionary> dictionary) {
+ CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
+ dictionary->GenerateNewEnumerationIndices());
+}
+
template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
Heap* heap = Dictionary<Shape, Key>::GetHeap();
@@ -12215,7 +12204,8 @@ MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
int pos = 0;
for (int i = 0; i < capacity; i++) {
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()));
+ enumeration_order->set(
+ pos++, Smi::FromInt(DetailsAt(i).dictionary_index()));
}
}
@@ -12314,6 +12304,8 @@ template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::Add(Key key,
Object* value,
PropertyDetails details) {
+ ASSERT(details.dictionary_index() == details.descriptor_index());
+
// Valdate key is absent.
SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
@@ -12341,7 +12333,8 @@ MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
// Insert element at empty or deleted entry
- if (!details.IsDeleted() && details.index() == 0 && Shape::kIsEnumerable) {
+ if (!details.IsDeleted() &&
+ details.dictionary_index() == 0 && Shape::kIsEnumerable) {
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = NextEnumerationIndex();
@@ -12432,7 +12425,7 @@ MaybeObject* SeededNumberDictionary::Set(uint32_t key,
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
- DetailsAt(entry).index());
+ DetailsAt(entry).dictionary_index());
MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
Object* object_key;
if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
@@ -12503,23 +12496,45 @@ void Dictionary<Shape, Key>::CopyKeysTo(
}
-void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
- FixedArray* sort_array) {
- ASSERT(storage->length() >= NumberOfEnumElements());
+FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) {
+ int length = storage->length();
+ ASSERT(length >= NumberOfEnumElements());
+ Heap* heap = GetHeap();
+ Object* undefined_value = heap->undefined_value();
int capacity = Capacity();
- int index = 0;
+ int properties = 0;
+
+ // Fill in the enumeration array by assigning enumerable keys at their
+ // enumeration index. This will leave holes in the array if there are keys
+ // that are deleted or not enumerable.
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
- storage->set(index, k);
- sort_array->set(index, Smi::FromInt(details.index()));
- index++;
+ properties++;
+ storage->set(details.dictionary_index() - 1, k);
+ if (properties == length) break;
}
}
- storage->SortPairs(sort_array, sort_array->length());
- ASSERT(storage->length() >= index);
+
+ // There are holes in the enumeration array if less properties were assigned
+ // than the length of the array. If so, crunch all the existing properties
+ // together by shifting them to the left (maintaining the enumeration order),
+ // and trimming of the right side of the array.
+ if (properties < length) {
+ if (properties == 0) return heap->empty_fixed_array();
+ properties = 0;
+ for (int i = 0; i < length; ++i) {
+ Object* value = storage->get(i);
+ if (value != undefined_value) {
+ storage->set(properties, value);
+ ++properties;
+ }
+ }
+ RightTrimFixedArray<FROM_MUTATOR>(heap, storage, length - properties);
+ }
+ return storage;
}
@@ -12569,18 +12584,12 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
JSObject* obj, int unused_property_fields) {
// Make sure we preserve dictionary representation if there are too many
// descriptors.
- if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
-
- // Figure out if it is necessary to generate new enumeration indices.
- int max_enumeration_index =
- NextEnumerationIndex() +
- (DescriptorArray::kMaxNumberOfDescriptors -
- NumberOfElements());
- if (!PropertyDetails::IsValidIndex(max_enumeration_index)) {
- Object* result;
- { MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ int number_of_elements = NumberOfElements();
+ if (number_of_elements > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+
+ if (number_of_elements != NextEnumerationIndex()) {
+ MaybeObject* maybe_result = GenerateNewEnumerationIndices();
+ if (maybe_result->IsFailure()) return maybe_result;
}
int instance_descriptor_length = 0;
@@ -12604,19 +12613,35 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
}
+ int inobject_props = obj->map()->inobject_properties();
+
+ // Allocate new map.
+ Map* new_map;
+ MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ new_map->set_dictionary_map(false);
+
+ if (instance_descriptor_length == 0) {
+ ASSERT_LE(unused_property_fields, inobject_props);
+ // Transform the object.
+ new_map->set_unused_property_fields(inobject_props);
+ obj->set_map(new_map);
+ obj->set_properties(heap->empty_fixed_array());
+ // Check that it really works.
+ ASSERT(obj->HasFastProperties());
+ return obj;
+ }
+
// Allocate the instance descriptor.
DescriptorArray* descriptors;
- { MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length,
- DescriptorArray::MAY_BE_SHARED);
- if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
- return maybe_descriptors;
- }
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(instance_descriptor_length);
+ if (!maybe_descriptors->To(&descriptors)) {
+ return maybe_descriptors;
}
- DescriptorArray::WhitenessWitness witness(descriptors);
+ FixedArray::WhitenessWitness witness(descriptors);
- int inobject_props = obj->map()->inobject_properties();
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
if (number_of_allocated_fields < 0) {
@@ -12626,33 +12651,33 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
// Allocate the fixed array for the fields.
- Object* fields;
- { MaybeObject* maybe_fields =
- heap->AllocateFixedArray(number_of_allocated_fields);
- if (!maybe_fields->ToObject(&fields)) return maybe_fields;
- }
+ FixedArray* fields;
+ MaybeObject* maybe_fields =
+ heap->AllocateFixedArray(number_of_allocated_fields);
+ if (!maybe_fields->To(&fields)) return maybe_fields;
// Fill in the instance descriptor and the fields.
- int next_descriptor = 0;
int current_offset = 0;
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k)) {
Object* value = ValueAt(i);
// Ensure the key is a symbol before writing into the instance descriptor.
- Object* key;
- { MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
- if (!maybe_key->ToObject(&key)) return maybe_key;
- }
+ String* key;
+ MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
+ if (!maybe_key->To(&key)) return maybe_key;
+
PropertyDetails details = DetailsAt(i);
+ ASSERT(details.descriptor_index() == details.dictionary_index());
+ int enumeration_index = details.descriptor_index();
PropertyType type = details.type();
if (value->IsJSFunction() && !heap->InNewSpace(value)) {
- ConstantFunctionDescriptor d(String::cast(key),
+ ConstantFunctionDescriptor d(key,
JSFunction::cast(value),
details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d, witness);
+ enumeration_index);
+ descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
obj->InObjectPropertyAtPut(current_offset,
@@ -12660,24 +12685,19 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
UPDATE_WRITE_BARRIER);
} else {
int offset = current_offset - inobject_props;
- FixedArray::cast(fields)->set(offset, value);
+ fields->set(offset, value);
}
- FieldDescriptor d(String::cast(key),
+ FieldDescriptor d(key,
current_offset++,
details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d, witness);
+ enumeration_index);
+ descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
- if (value->IsAccessorPair()) {
- MaybeObject* maybe_copy =
- AccessorPair::cast(value)->CopyWithoutTransitions();
- if (!maybe_copy->To(&value)) return maybe_copy;
- }
- CallbacksDescriptor d(String::cast(key),
+ CallbacksDescriptor d(key,
value,
details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d, witness);
+ enumeration_index);
+ descriptors->Set(enumeration_index - 1, &d, witness);
} else {
UNREACHABLE();
}
@@ -12685,22 +12705,18 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
ASSERT(current_offset == number_of_fields);
- descriptors->Sort(witness);
- // Allocate new map.
- Object* new_map;
- { MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ descriptors->Sort();
+
+ MaybeObject* maybe_failure = new_map->InitializeDescriptors(descriptors);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ new_map->set_unused_property_fields(unused_property_fields);
// Transform the object.
- obj->set_map(Map::cast(new_map));
- obj->map()->set_instance_descriptors(descriptors);
- obj->map()->set_unused_property_fields(unused_property_fields);
+ obj->set_map(new_map);
- obj->set_properties(FixedArray::cast(fields));
+ obj->set_properties(fields);
ASSERT(obj->IsJSObject());
- descriptors->SetNextEnumerationIndex(NextEnumerationIndex());
// Check that it really works.
ASSERT(obj->HasFastProperties());
@@ -12771,11 +12787,11 @@ Object* ObjectHashTable::Lookup(Object* key) {
// If the object does not have an identity hash, it was never used as a key.
{ MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->undefined_value();
+ return GetHeap()->the_hole_value();
}
}
int entry = FindEntry(key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
return get(EntryToIndex(entry) + 1);
}
@@ -12792,7 +12808,7 @@ MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
int entry = FindEntry(key);
// Check whether to perform removal operation.
- if (value->IsUndefined()) {
+ if (value->IsTheHole()) {
if (entry == kNotFound) return this;
RemoveEntry(entry);
return Shrink(key);
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 88e3243fc2..45a2ac0d8f 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -33,7 +33,7 @@
#include "elements-kind.h"
#include "list.h"
#include "property-details.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
@@ -84,6 +84,7 @@
// - Context
// - JSFunctionResultCache
// - ScopeInfo
+// - TransitionArray
// - FixedDoubleArray
// - ExternalArray
// - ExternalPixelArray
@@ -170,17 +171,18 @@ enum CreationFlag {
};
-// Indicates whether the search function should expect a sorted or an unsorted
-// array as input.
-enum SearchMode {
- EXPECT_SORTED,
- EXPECT_UNSORTED
+// Indicates whether transitions can be added to a source map or not.
+enum TransitionFlag {
+ INSERT_TRANSITION,
+ OMIT_TRANSITION
};
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
+const int kStubMajorKeyBits = 6;
+const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// All Maps have a field instance_type containing a InstanceType.
// It describes the type of the instances.
@@ -653,6 +655,25 @@ STATIC_CHECK(ODDBALL_TYPE == Internals::kOddballType);
STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
+ V(FAST_ELEMENTS_SUB_TYPE) \
+ V(DICTIONARY_ELEMENTS_SUB_TYPE) \
+ V(FAST_PROPERTIES_SUB_TYPE) \
+ V(DICTIONARY_PROPERTIES_SUB_TYPE) \
+ V(MAP_CODE_CACHE_SUB_TYPE) \
+ V(SCOPE_INFO_SUB_TYPE) \
+ V(SYMBOL_TABLE_SUB_TYPE) \
+ V(DESCRIPTOR_ARRAY_SUB_TYPE) \
+ V(TRANSITION_ARRAY_SUB_TYPE)
+
+enum FixedArraySubInstanceType {
+#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE
+};
+
+
enum CompareResult {
LESS = -1,
EQUAL = 0,
@@ -780,14 +801,14 @@ class MaybeObject BASE_EMBEDDED {
V(JSModule) \
V(Map) \
V(DescriptorArray) \
+ V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(Context) \
- V(GlobalContext) \
- V(ModuleContext) \
+ V(NativeContext) \
V(ScopeInfo) \
V(JSFunction) \
V(Code) \
@@ -883,8 +904,8 @@ class Object : public MaybeObject {
Object* ToBoolean(); // ECMA-262 9.2.
// Convert to a JSObject if needed.
- // global_context is used when creating wrapper object.
- MUST_USE_RESULT MaybeObject* ToObject(Context* global_context);
+ // native_context is used when creating wrapper object.
+ MUST_USE_RESULT MaybeObject* ToObject(Context* native_context);
// Converts this to a Smi if possible.
// Failure is returned otherwise.
@@ -949,6 +970,8 @@ class Object : public MaybeObject {
static void VerifyPointer(Object* p);
#endif
+ inline void VerifyApiCallResultType();
+
// Prints this object without details.
inline void ShortPrint() {
ShortPrint(stdout);
@@ -1233,9 +1256,6 @@ class HeapObject: public Object {
};
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
// This class describes a body of an object of a fixed size
// in which all pointer fields are located in the [start_offset, end_offset)
// interval.
@@ -1250,8 +1270,8 @@ class FixedBodyDescriptor {
template<typename StaticVisitor>
static inline void IterateBody(HeapObject* obj) {
- StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
- SLOT_ADDR(obj, end_offset));
+ StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
}
};
@@ -1270,13 +1290,11 @@ class FlexibleBodyDescriptor {
template<typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
- StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
- SLOT_ADDR(obj, object_size));
+ StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, object_size));
}
};
-#undef SLOT_ADDR
-
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer)
@@ -1435,6 +1453,9 @@ class JSReceiver: public HeapObject {
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
+ // Return the constructor function (may be Heap::null_value()).
+ inline Object* GetConstructor();
+
// Set the object's prototype (only JSReceiver and null are allowed).
MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
bool skip_hidden_prototypes);
@@ -1707,7 +1728,7 @@ class JSObject: public JSReceiver {
static int GetIdentityHash(Handle<JSObject> obj);
MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
+ MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
static Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> name);
@@ -1870,7 +1891,7 @@ class JSObject: public JSReceiver {
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
- void LookupCallback(String* name, LookupResult* result);
+ void LookupCallbackProperty(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
@@ -1898,7 +1919,8 @@ class JSObject: public JSReceiver {
// new_map.
MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
String* name,
- Object* value);
+ Object* value,
+ int field_index);
// Add a constant function property to a fast-case object.
// This leaves a CONSTANT_TRANSITION in the old map, and
@@ -1931,19 +1953,15 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- // Converts a descriptor of any other type to a real field,
- // backed by the properties array. Descriptors of visible
- // types, such as CONSTANT_FUNCTION, keep their enumeration order.
- // Converts the descriptor on the original object's map to a
- // map transition, and the the new field is on the object's new map.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToFieldAndMapTransition(
+ // Replaces an existing transition with a transition to a map with a FIELD.
+ MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
+ int transition_index,
String* name,
Object* new_value,
PropertyAttributes attributes);
- // Converts a descriptor of any other type to a real field,
- // backed by the properties array. Descriptors of visible
- // types, such as CONSTANT_FUNCTION, keep their enumeration order.
+ // Converts a descriptor of any other type to a real field, backed by the
+ // properties array.
MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
String* name,
Object* new_value,
@@ -2056,6 +2074,10 @@ class JSObject: public JSReceiver {
PrintElements(stdout);
}
void PrintElements(FILE* out);
+ inline void PrintTransitions() {
+ PrintTransitions(stdout);
+ }
+ void PrintTransitions(FILE* out);
#endif
void PrintElementsTransition(
@@ -2209,18 +2231,23 @@ class JSObject: public JSReceiver {
Object* getter,
Object* setter,
PropertyAttributes attributes);
- void LookupInDescriptor(String* name, LookupResult* result);
-
- // Returns the hidden properties backing store object, currently
- // a StringDictionary, stored on this object.
- // If no hidden properties object has been put on this object,
- // return undefined, unless create_if_absent is true, in which case
- // a new dictionary is created, added to this object, and returned.
- MUST_USE_RESULT MaybeObject* GetHiddenPropertiesDictionary(
- bool create_if_absent);
- // Updates the existing hidden properties dictionary.
- MUST_USE_RESULT MaybeObject* SetHiddenPropertiesDictionary(
- StringDictionary* dictionary);
+
+
+ enum InitializeHiddenProperties {
+ CREATE_NEW_IF_ABSENT,
+ ONLY_RETURN_INLINE_VALUE
+ };
+
+ // If create_if_absent is true, return the hash table backing store
+ // for hidden properties. If there is no backing store, allocate one.
+ // If create_if_absent is false, return the hash table backing store
+ // or the inline stored identity hash, whatever is found.
+ MUST_USE_RESULT MaybeObject* GetHiddenPropertiesHashTable(
+ InitializeHiddenProperties init_option);
+ // Set the hidden property backing store to either a hash table or
+ // the inline-stored identity hash.
+ MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
+ Object* value);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2244,6 +2271,8 @@ class FixedArrayBase: public HeapObject {
class FixedDoubleArray;
+class IncrementalMarking;
+
// FixedArray describes fixed-sized arrays with element type Object*.
class FixedArray: public FixedArrayBase {
@@ -2339,6 +2368,23 @@ class FixedArray: public FixedArrayBase {
}
};
+ // WhitenessWitness is used to prove that a descriptor array is white
+ // (unmarked), so incremental write barriers can be skipped because the
+ // marking invariant cannot be broken and slots pointing into evacuation
+ // candidates will be discovered when the object is scanned. A witness is
+ // always stack-allocated right after creating an array. By allocating a
+ // witness, incremental marking is globally disabled. The witness is then
+ // passed along wherever needed to statically prove that the array is known to
+ // be white.
+ class WhitenessWitness {
+ public:
+ inline explicit WhitenessWitness(FixedArray* array);
+ inline ~WhitenessWitness();
+
+ private:
+ IncrementalMarking* marking_;
+ };
+
protected:
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
@@ -2413,31 +2459,19 @@ class FixedDoubleArray: public FixedArrayBase {
};
-class IncrementalMarking;
-
-
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
-// TODO(1399): It should be possible to make room for bit_field3 in the map
-// without overloading the instance descriptors field in the map
-// (and storing it in the DescriptorArray when the map has one).
-// [0]: storage for bit_field3 for Map owning this object (Smi)
-// [1]: point to a fixed array with (value, detail) pairs.
-// [2]: next enumeration index (Smi), or pointer to small fixed array:
-// [0]: next enumeration index (Smi)
-// [1]: pointer to fixed array with enum cache
-// [3]: first key
-// [length() - 1]: last key
-//
+// [0]: Either Smi(0) if uninitialized, or a pointer to small fixed array:
+// [0]: pointer to fixed array with enum cache
+// [1]: either Smi(0) or pointer to fixed array with indices
+// [1]: first key
+// [length() - kDescriptorSize]: last key
class DescriptorArray: public FixedArray {
public:
// Returns true for both shared empty_descriptor_array and for smis, which the
// map uses to encode additional bit fields when the descriptor array is not
// yet used.
inline bool IsEmpty();
- inline bool MayContainTransitions();
-
- DECL_ACCESSORS(elements_transition_map, Map)
// Returns the number of descriptors in the array.
int number_of_descriptors() {
@@ -2446,51 +2480,25 @@ class DescriptorArray: public FixedArray {
return len <= kFirstIndex ? 0 : (len - kFirstIndex) / kDescriptorSize;
}
- int NextEnumerationIndex() {
- if (IsEmpty()) return PropertyDetails::kInitialIndex;
- Object* obj = get(kEnumerationIndexIndex);
- if (obj->IsSmi()) {
- return Smi::cast(obj)->value();
- } else {
- Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeEnumIndex);
- return Smi::cast(index)->value();
- }
- }
+ inline int number_of_entries() { return number_of_descriptors(); }
+ inline int NextEnumerationIndex() { return number_of_descriptors() + 1; }
- // Set next enumeration index and flush any enum cache.
- void SetNextEnumerationIndex(int value) {
- if (!IsEmpty()) {
- set(kEnumerationIndexIndex, Smi::FromInt(value));
- }
- }
bool HasEnumCache() {
- return !IsEmpty() && !get(kEnumerationIndexIndex)->IsSmi();
+ return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
}
Object* GetEnumCache() {
ASSERT(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumerationIndexIndex));
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
return bridge->get(kEnumCacheBridgeCacheIndex);
}
Object** GetEnumCacheSlot() {
ASSERT(HasEnumCache());
return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kEnumerationIndexOffset);
- }
-
- Object** GetTransitionsSlot() {
- ASSERT(elements_transition_map() != NULL);
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kTransitionsOffset);
+ kEnumCacheOffset);
}
- // TODO(1399): It should be possible to make room for bit_field3 in the map
- // without overloading the instance descriptors field in the map
- // (and storing it in the DescriptorArray when the map has one).
- inline int bit_field3_storage();
- inline void set_bit_field3_storage(int value);
-
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
void SetEnumCache(FixedArray* bridge_storage,
@@ -2502,34 +2510,16 @@ class DescriptorArray: public FixedArray {
inline Object** GetKeySlot(int descriptor_number);
inline Object* GetValue(int descriptor_number);
inline Object** GetValueSlot(int descriptor_number);
- inline void SetNullValueUnchecked(int descriptor_number, Heap* heap);
inline PropertyDetails GetDetails(int descriptor_number);
- inline void SetDetailsUnchecked(int descriptor_number, Smi* value);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
inline JSFunction* GetConstantFunction(int descriptor_number);
inline Object* GetCallbacksObject(int descriptor_number);
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
- inline bool IsProperty(int descriptor_number);
- inline bool IsTransitionOnly(int descriptor_number);
- inline bool IsNullDescriptor(int descriptor_number);
- // WhitenessWitness is used to prove that a specific descriptor array is white
- // (unmarked), so incremental write barriers can be skipped because the
- // marking invariant cannot be broken and slots pointing into evacuation
- // candidates will be discovered when the object is scanned. A witness is
- // always stack-allocated right after creating a descriptor array. By
- // allocating a witness, incremental marking is globally disabled. The witness
- // is then passed along wherever needed to statically prove that the
- // descriptor array is known to be white.
- class WhitenessWitness {
- public:
- inline explicit WhitenessWitness(DescriptorArray* array);
- inline ~WhitenessWitness();
-
- private:
- IncrementalMarking* marking_;
- };
+ inline String* GetSortedKey(int descriptor_number);
+ inline int GetSortedKeyIndex(int descriptor_number);
+ inline void SetSortedKey(int pointer, int descriptor_number);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
@@ -2537,48 +2527,23 @@ class DescriptorArray: public FixedArray {
Descriptor* desc,
const WhitenessWitness&);
- // Transfer a complete descriptor from the src descriptor array to the dst
- // one, dropping map transitions in CALLBACKS.
- static void CopyFrom(Handle<DescriptorArray> dst,
- int dst_index,
- Handle<DescriptorArray> src,
- int src_index,
- const WhitenessWitness& witness);
+ // Append automatically sets the enumeration index. This should only be used
+ // to add descriptors in bulk at the end, followed by sorting the descriptor
+ // array.
+ inline void Append(Descriptor* desc,
+ const WhitenessWitness&,
+ int number_of_set_descriptors);
// Transfer a complete descriptor from the src descriptor array to this
- // descriptor array, dropping map transitions in CALLBACKS.
- MUST_USE_RESULT MaybeObject* CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness&);
-
- // Copy the descriptor array, insert a new descriptor and optionally
- // remove map transitions. If the descriptor is already present, it is
- // replaced. If a replaced descriptor is a real property (not a transition
- // or null), its enumeration index is kept as is.
- // If adding a real property, map transitions must be removed. If adding
- // a transition, they must not be removed. All null descriptors are removed.
- MUST_USE_RESULT MaybeObject* CopyInsert(Descriptor* descriptor,
- TransitionFlag transition_flag);
-
- // Indicates whether the search function should expect a sorted or an unsorted
- // descriptor array as input.
- enum SharedMode {
- MAY_BE_SHARED,
- CANNOT_BE_SHARED
- };
-
- // Return a copy of the array with all transitions and null descriptors
- // removed. Return a Failure object in case of an allocation failure.
- MUST_USE_RESULT MaybeObject* RemoveTransitions(SharedMode shared_mode);
+ // descriptor array.
+ void CopyFrom(int dst_index,
+ DescriptorArray* src,
+ int src_index,
+ const WhitenessWitness&);
// Sort the instance descriptors by the hash codes of their keys.
- // Does not check for duplicates.
- void SortUnchecked(const WhitenessWitness&);
-
- // Sort the instance descriptors by the hash codes of their keys.
- // Checks the result for duplicates.
- void Sort(const WhitenessWitness&);
+ void Sort();
+ inline void SwapSortedKeys(int first, int second);
// Search the instance descriptors for given name.
INLINE(int Search(String* name));
@@ -2590,21 +2555,9 @@ class DescriptorArray: public FixedArray {
// Tells whether the name is present int the array.
bool Contains(String* name) { return kNotFound != Search(name); }
- // Perform a binary search in the instance descriptors represented
- // by this fixed array. low and high are descriptor indices. If there
- // are three instance descriptors in this array it should be called
- // with low=0 and high=2.
- int BinarySearch(String* name, int low, int high);
-
- // Perform a linear search in the instance descriptors represented
- // by this fixed array. len is the number of descriptor indices that are
- // valid.
- int LinearSearch(SearchMode mode, String* name, int len);
-
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
- SharedMode shared_mode);
+ MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors);
// Casting.
static inline DescriptorArray* cast(Object* obj);
@@ -2612,28 +2565,20 @@ class DescriptorArray: public FixedArray {
// Constant for denoting key was not found.
static const int kNotFound = -1;
- static const int kBitField3StorageIndex = 0;
- static const int kEnumerationIndexIndex = 1;
- static const int kTransitionsIndex = 2;
- static const int kFirstIndex = 3;
+ static const int kEnumCacheIndex = 0;
+ static const int kFirstIndex = 1;
// The length of the "bridge" to the enum cache.
- static const int kEnumCacheBridgeLength = 3;
- static const int kEnumCacheBridgeEnumIndex = 0;
- static const int kEnumCacheBridgeCacheIndex = 1;
- static const int kEnumCacheBridgeIndicesCacheIndex = 2;
+ static const int kEnumCacheBridgeLength = 2;
+ static const int kEnumCacheBridgeCacheIndex = 0;
+ static const int kEnumCacheBridgeIndicesCacheIndex = 1;
// Layout description.
- static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
- static const int kEnumerationIndexOffset =
- kBitField3StorageOffset + kPointerSize;
- static const int kTransitionsOffset = kEnumerationIndexOffset + kPointerSize;
- static const int kFirstOffset = kTransitionsOffset + kPointerSize;
+ static const int kEnumCacheOffset = FixedArray::kHeaderSize;
+ static const int kFirstOffset = kEnumCacheOffset + kPointerSize;
// Layout description for the bridge array.
- static const int kEnumCacheBridgeEnumOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheBridgeCacheOffset =
- kEnumCacheBridgeEnumOffset + kPointerSize;
+ static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
// Layout of descriptor.
static const int kDescriptorKey = 0;
@@ -2664,6 +2609,12 @@ class DescriptorArray: public FixedArray {
// fit in a page).
static const int kMaxNumberOfDescriptors = 1024 + 512;
+ // Returns the fixed array length required to hold number_of_descriptors
+ // descriptors.
+ static int LengthFor(int number_of_descriptors) {
+ return ToKeyIndex(number_of_descriptors);
+ }
+
private:
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
@@ -2702,7 +2653,7 @@ class DescriptorArray: public FixedArray {
static inline void NoIncrementalWriteBarrierSwap(
FixedArray* array, int first, int second);
- // Swap descriptor first and second.
+ // Swap first and second descriptor.
inline void NoIncrementalWriteBarrierSwapDescriptors(
int first, int second);
@@ -2710,6 +2661,14 @@ class DescriptorArray: public FixedArray {
};
+template<typename T>
+inline int LinearSearch(T* array, String* name, int len);
+
+
+template<typename T>
+inline int Search(T* array, String* name);
+
+
// HashTable is a subclass of FixedArray that implements a hash table
// that uses open addressing and quadratic probing.
//
@@ -2762,6 +2721,11 @@ class BaseShape {
template<typename Shape, typename Key>
class HashTable: public FixedArray {
public:
+ enum MinimumCapacity {
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_CUSTOM_MINIMUM_CAPACITY
+ };
+
// Wrapper methods
inline uint32_t Hash(Key key) {
if (Shape::UsesSeed) {
@@ -2814,6 +2778,7 @@ class HashTable: public FixedArray {
// Returns a new HashTable object. Might return Failure.
MUST_USE_RESULT static MaybeObject* Allocate(
int at_least_space_for,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY,
PretenureFlag pretenure = NOT_TENURED);
// Computes the required capacity for a table holding the given
@@ -3091,6 +3056,7 @@ class Dictionary: public HashTable<Shape, Key> {
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
+ ASSERT(index != 0);
this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
}
@@ -3164,7 +3130,9 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
}
// Copies enumerable keys to preallocated fixed array.
- void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
+ FixedArray* CopyEnumKeysTo(FixedArray* storage);
+ static void DoGenerateNewEnumerationIndices(
+ Handle<StringDictionary> dictionary);
// For transforming properties of a JSObject.
MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor(
@@ -3174,8 +3142,6 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
// Find entry for key, otherwise return kNotFound. Optimized version of
// HashTable::FindEntry.
int FindEntry(String* key);
-
- bool ContainsTransition(int entry);
};
@@ -3321,12 +3287,12 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
return reinterpret_cast<ObjectHashTable*>(obj);
}
- // Looks up the value associated with the given key. The undefined value is
+ // Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
Object* Lookup(Object* key);
// Adds (or overwrites) the value associated with the given key. Mapping a
- // key to the undefined value causes removal of the whole entry.
+ // key to the hole value causes removal of the whole entry.
MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
private:
@@ -3523,7 +3489,7 @@ class ScopeInfo : public FixedArray {
FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
#undef DECL_INDEX
#undef FOR_EACH_NUMERIC_FIELD
- kVariablePartIndex
+ kVariablePartIndex
};
// The layout of the variable part of a ScopeInfo is as follows:
@@ -4024,7 +3990,7 @@ class DeoptimizationInputData: public FixedArray {
static const int kFirstDeoptEntryIndex = 5;
// Offsets of deopt entry elements relative to the start of the entry.
- static const int kAstIdOffset = 0;
+ static const int kAstIdRawOffset = 0;
static const int kTranslationIndexOffset = 1;
static const int kArgumentsStackHeightOffset = 2;
static const int kPcOffset = 3;
@@ -4056,13 +4022,21 @@ class DeoptimizationInputData: public FixedArray {
set(IndexForEntry(i) + k##name##Offset, value); \
}
- DEFINE_ENTRY_ACCESSORS(AstId, Smi)
+ DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
DEFINE_ENTRY_ACCESSORS(Pc, Smi)
#undef DEFINE_ENTRY_ACCESSORS
+ BailoutId AstId(int i) {
+ return BailoutId(AstIdRaw(i)->value());
+ }
+
+ void SetAstId(int i, BailoutId value) {
+ SetAstIdRaw(i, Smi::FromInt(value.ToInt()));
+ }
+
int DeoptCount() {
return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
}
@@ -4097,8 +4071,15 @@ class DeoptimizationInputData: public FixedArray {
class DeoptimizationOutputData: public FixedArray {
public:
int DeoptPoints() { return length() / 2; }
- Smi* AstId(int index) { return Smi::cast(get(index * 2)); }
- void SetAstId(int index, Smi* id) { set(index * 2, id); }
+
+ BailoutId AstId(int index) {
+ return BailoutId(Smi::cast(get(index * 2))->value());
+ }
+
+ void SetAstId(int index, BailoutId id) {
+ set(index * 2, Smi::FromInt(id.ToInt()));
+ }
+
Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
@@ -4133,8 +4114,8 @@ class TypeFeedbackCells: public FixedArray {
static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
// Accessors for AST ids associated with cache values.
- inline Smi* AstId(int index);
- inline void SetAstId(int index, Smi* id);
+ inline TypeFeedbackId AstId(int index);
+ inline void SetAstId(int index, TypeFeedbackId id);
// Accessors for global property cells holding the cache values.
inline JSGlobalPropertyCell* Cell(int index);
@@ -4174,30 +4155,49 @@ class Code: public HeapObject {
FLAGS_MAX_VALUE = kMaxInt
};
+#define CODE_KIND_LIST(V) \
+ V(FUNCTION) \
+ V(OPTIMIZED_FUNCTION) \
+ V(STUB) \
+ V(BUILTIN) \
+ V(LOAD_IC) \
+ V(KEYED_LOAD_IC) \
+ V(CALL_IC) \
+ V(KEYED_CALL_IC) \
+ V(STORE_IC) \
+ V(KEYED_STORE_IC) \
+ V(UNARY_OP_IC) \
+ V(BINARY_OP_IC) \
+ V(COMPARE_IC) \
+ V(TO_BOOLEAN_IC)
+
enum Kind {
- FUNCTION,
- OPTIMIZED_FUNCTION,
- STUB,
- BUILTIN,
- LOAD_IC,
- KEYED_LOAD_IC,
- CALL_IC,
- KEYED_CALL_IC,
- STORE_IC,
- KEYED_STORE_IC,
- UNARY_OP_IC,
- BINARY_OP_IC,
- COMPARE_IC,
- TO_BOOLEAN_IC,
- // No more than 16 kinds. The value currently encoded in four bits in
- // Flags.
+#define DEFINE_CODE_KIND_ENUM(name) name,
+ CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
+#undef DEFINE_CODE_KIND_ENUM
// Pseudo-kinds.
+ LAST_CODE_KIND = TO_BOOLEAN_IC,
REGEXP = BUILTIN,
FIRST_IC_KIND = LOAD_IC,
LAST_IC_KIND = TO_BOOLEAN_IC
};
+ // No more than 16 kinds. The value is currently encoded in four bits in
+ // Flags.
+ STATIC_ASSERT(LAST_CODE_KIND < 16);
+
+ // Types of stubs.
+ enum StubType {
+ NORMAL,
+ FIELD,
+ CONSTANT_FUNCTION,
+ CALLBACKS,
+ INTERCEPTOR,
+ MAP_TRANSITION,
+ NONEXISTENT
+ };
+
enum {
NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
@@ -4210,7 +4210,7 @@ class Code: public HeapObject {
// Printing
static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
- static const char* PropertyType2String(PropertyType type);
+ static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
inline void Disassemble(const char* name) {
Disassemble(name, stdout);
@@ -4260,7 +4260,7 @@ class Code: public HeapObject {
inline Kind kind();
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline PropertyType type(); // Only valid for monomorphic IC stubs.
+ inline StubType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
@@ -4403,19 +4403,19 @@ class Code: public HeapObject {
Kind kind,
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
- PropertyType type = NORMAL,
+ StubType type = NORMAL,
int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
- PropertyType type,
+ StubType type,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
int argc = -1);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
- static inline PropertyType ExtractTypeFromFlags(Flags flags);
+ static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
@@ -4510,28 +4510,20 @@ class Code: public HeapObject {
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
- static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
- static const int kKindSpecificFlagsSize = 2 * kIntSize;
+ static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
+ static const int kKindSpecificFlags2Offset =
+ kKindSpecificFlags1Offset + kIntSize;
- static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
- kKindSpecificFlagsSize;
+ static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
- // Byte offsets within kKindSpecificFlagsOffset.
- static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset;
- static const int kOptimizableOffset = kKindSpecificFlagsOffset;
- static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
- static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
-
- static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
- static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
- static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
- static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
- static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
+ // Byte offsets within kKindSpecificFlags1Offset.
+ static const int kOptimizableOffset = kKindSpecificFlags1Offset;
+ static const int kCheckTypeOffset = kKindSpecificFlags1Offset;
static const int kFullCodeFlags = kOptimizableOffset + 1;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -4539,26 +4531,90 @@ class Code: public HeapObject {
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
- static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
-
- static const int kCompareOperationOffset = kCompareStateOffset + 1;
-
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
- static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
- static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
-
// Flags layout. BitField<type, shift, size>.
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
- class TypeField: public BitField<PropertyType, 3, 4> {};
- class CacheHolderField: public BitField<InlineCacheHolderFlag, 7, 1> {};
- class KindField: public BitField<Kind, 8, 4> {};
- class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
- class IsPregeneratedField: public BitField<bool, 14, 1> {};
+ class TypeField: public BitField<StubType, 3, 3> {};
+ class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {};
+ class KindField: public BitField<Kind, 7, 4> {};
+ class ExtraICStateField: public BitField<ExtraICState, 11, 2> {};
+ class IsPregeneratedField: public BitField<bool, 13, 1> {};
+
+ // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
+ static const int kStackSlotsFirstBit = 0;
+ static const int kStackSlotsBitCount = 24;
+ static const int kUnaryOpTypeFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kUnaryOpTypeBitCount = 3;
+ static const int kBinaryOpTypeFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kBinaryOpTypeBitCount = 3;
+ static const int kBinaryOpResultTypeFirstBit =
+ kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
+ static const int kBinaryOpResultTypeBitCount = 3;
+ static const int kCompareStateFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kCompareStateBitCount = 3;
+ static const int kCompareOperationFirstBit =
+ kCompareStateFirstBit + kCompareStateBitCount;
+ static const int kCompareOperationBitCount = 4;
+ static const int kToBooleanStateFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kToBooleanStateBitCount = 8;
+ static const int kHasFunctionCacheFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kHasFunctionCacheBitCount = 1;
+
+ STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
+ STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
+ STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
+ STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
+ kBinaryOpResultTypeBitCount <= 32);
+ STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
+ STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
+ STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
+ STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
+
+ class StackSlotsField: public BitField<int,
+ kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
+ class UnaryOpTypeField: public BitField<int,
+ kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
+ class BinaryOpTypeField: public BitField<int,
+ kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
+ class BinaryOpResultTypeField: public BitField<int,
+ kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
+ class CompareStateField: public BitField<int,
+ kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
+ class CompareOperationField: public BitField<int,
+ kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
+ class ToBooleanStateField: public BitField<int,
+ kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
+ class HasFunctionCacheField: public BitField<bool,
+ kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
+
+ // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
+ static const int kStubMajorKeyFirstBit = 0;
+ static const int kSafepointTableOffsetFirstBit =
+ kStubMajorKeyFirstBit + kStubMajorKeyBits;
+ static const int kSafepointTableOffsetBitCount = 26;
+
+ STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
+ STATIC_ASSERT(kSafepointTableOffsetFirstBit +
+ kSafepointTableOffsetBitCount <= 32);
+
+ class SafepointTableOffsetField: public BitField<int,
+ kSafepointTableOffsetFirstBit,
+ kSafepointTableOffsetBitCount> {}; // NOLINT
+ class StubMajorKeyField: public BitField<int,
+ kStubMajorKeyFirstBit, kStubMajorKeyBits> {}; // NOLINT
+
+ // KindSpecificFlags2 layout (FUNCTION)
+ class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
// Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 15;
+ static const int kArgumentsCountShift = 14;
static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
// This constant should be encodable in an ARM instruction.
@@ -4608,12 +4664,15 @@ class Map: public HeapObject {
inline void set_bit_field2(byte value);
// Bit field 3.
- // TODO(1399): It should be possible to make room for bit_field3 in the map
- // without overloading the instance descriptors field (and storing it in the
- // DescriptorArray when the map has one).
inline int bit_field3();
inline void set_bit_field3(int value);
+ class EnumLengthBits: public BitField<int, 0, 11> {};
+ class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
+ class IsShared: public BitField<bool, 22, 1> {};
+ class FunctionWithPrototype: public BitField<bool, 23, 1> {};
+ class DictionaryMap: public BitField<bool, 24, 1> {};
+
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
@@ -4733,8 +4792,16 @@ class Map: public HeapObject {
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
+ inline bool HasTransitionArray();
+ inline bool HasElementsTransition();
inline Map* elements_transition_map();
- inline void set_elements_transition_map(Map* transitioned_map);
+ MUST_USE_RESULT inline MaybeObject* set_elements_transition_map(
+ Map* transitioned_map);
+ inline void SetTransition(int index, Map* target);
+ MUST_USE_RESULT inline MaybeObject* AddTransition(String* key, Map* target);
+ DECL_ACCESSORS(transitions, TransitionArray)
+ inline void ClearTransitions(Heap* heap,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
@@ -4748,6 +4815,13 @@ class Map: public HeapObject {
inline void set_is_shared(bool value);
inline bool is_shared();
+ // Tells whether the map is used for JSObjects in dictionary mode (ie
+ // normalized objects, ie objects for which HasFastProperties returns false).
+ // A map can never be used for both dictionary mode and fast mode JSObjects.
+ // False by default and for HeapObjects that are not JSObjects.
+ inline void set_dictionary_map(bool value);
+ inline bool is_dictionary_map();
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -4761,16 +4835,15 @@ class Map: public HeapObject {
inline JSFunction* unchecked_constructor();
- // Should only be called by the code that initializes map to set initial valid
- // value of the instance descriptor member.
- inline void init_instance_descriptors();
-
// [instance descriptors]: describes the object.
- DECL_ACCESSORS(instance_descriptors, DescriptorArray)
-
- // Sets the instance descriptor array for the map to be an empty descriptor
- // array.
- inline void clear_instance_descriptors();
+ inline DescriptorArray* instance_descriptors();
+ MUST_USE_RESULT inline MaybeObject* SetDescriptors(
+ DescriptorArray* descriptors,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ static void SetDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors);
+ MUST_USE_RESULT inline MaybeObject* InitializeDescriptors(
+ DescriptorArray* descriptors);
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
@@ -4782,6 +4855,7 @@ class Map: public HeapObject {
inline Object* GetBackPointer();
inline void SetBackPointer(Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void init_back_pointer(Object* undefined);
// [prototype transitions]: cache of prototype transitions.
// Prototype transition is a transition that happens
@@ -4791,27 +4865,29 @@ class Map: public HeapObject {
// 1: back pointer that overlaps with prototype transitions field.
// 2 + 2 * i: prototype
// 3 + 2 * i: target map
- DECL_ACCESSORS(prototype_transitions, FixedArray)
+ inline FixedArray* GetPrototypeTransitions();
+ MUST_USE_RESULT inline MaybeObject* SetPrototypeTransitions(
+ FixedArray* prototype_transitions);
+ inline bool HasPrototypeTransitions();
- inline void init_prototype_transitions(Object* undefined);
- inline HeapObject* unchecked_prototype_transitions();
+ inline HeapObject* UncheckedPrototypeTransitions();
+ inline TransitionArray* unchecked_transition_array();
- static const int kProtoTransitionHeaderSize = 2;
+ static const int kProtoTransitionHeaderSize = 1;
static const int kProtoTransitionNumberOfEntriesOffset = 0;
- static const int kProtoTransitionBackPointerOffset = 1;
static const int kProtoTransitionElementsPerEntry = 2;
static const int kProtoTransitionPrototypeOffset = 0;
static const int kProtoTransitionMapOffset = 1;
inline int NumberOfProtoTransitions() {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
if (cache->length() == 0) return 0;
return
Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
}
inline void SetNumberOfProtoTransitions(int value) {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
ASSERT(cache->length() != 0);
cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
Smi::FromInt(value));
@@ -4820,19 +4896,67 @@ class Map: public HeapObject {
// Lookup in the map's instance descriptors and fill out the result
// with the given holder if the name is found. The holder may be
// NULL when this function is used from the compiler.
- void LookupInDescriptors(JSObject* holder,
- String* name,
- LookupResult* result);
+ inline void LookupDescriptor(JSObject* holder,
+ String* name,
+ LookupResult* result);
+
+ inline void LookupTransition(JSObject* holder,
+ String* name,
+ LookupResult* result);
+
+ // The size of transition arrays are limited so they do not end up in large
+ // object space. Otherwise ClearNonLiveTransitions would leak memory while
+ // applying in-place right trimming.
+ inline bool CanHaveMoreTransitions();
+
+ int LastAdded() {
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ ASSERT(number_of_own_descriptors > 0);
+ return number_of_own_descriptors - 1;
+ }
+
+ int NumberOfOwnDescriptors() {
+ return NumberOfOwnDescriptorsBits::decode(bit_field3());
+ }
+ void SetNumberOfOwnDescriptors(int number) {
+ set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
+ }
+
+ int EnumLength() {
+ return EnumLengthBits::decode(bit_field3());
+ }
+
+ void SetEnumLength(int index) {
+ set_bit_field3(EnumLengthBits::update(bit_field3(), index));
+ }
+
+ MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
+ MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
+ MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
+ DescriptorArray* descriptors,
+ String* name,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyInsertDescriptor(Descriptor* descriptor,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyReplaceDescriptor(Descriptor* descriptor,
+ int index,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
+ TransitionFlag flag);
MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
+ inline void AppendDescriptor(Descriptor* desc,
+ const DescriptorArray::WhitenessWitness&);
+
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
- MUST_USE_RESULT MaybeObject* CopyDropTransitions(
- DescriptorArray::SharedMode shared_mode);
+ MUST_USE_RESULT MaybeObject* Copy();
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -4861,6 +4985,11 @@ class Map: public HeapObject {
Handle<Code> code);
MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
+ // Extend the descriptor array of the map with the list of descriptors.
+ // In case of duplicates, the latest descriptor is used.
+ static void CopyAppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors);
+
// Returns the found code or undefined if absent.
Object* FindInCodeCache(String* name, Code::Flags flags);
@@ -4891,10 +5020,6 @@ class Map: public HeapObject {
// allowed.
Map* LookupElementsTransitionMap(ElementsKind elements_kind);
- // Adds a new transitions for changing the elements kind to |elements_kind|.
- MUST_USE_RESULT MaybeObject* CreateNextElementsTransition(
- ElementsKind elements_kind);
-
// Returns the transitioned map for this map with the most generic
// elements_kind that's found in |candidates|, or null handle if no match is
// found at all.
@@ -4906,8 +5031,8 @@ class Map: public HeapObject {
// holding weak references when incremental marking is used, because it also
// iterates over objects that are otherwise unreachable.
#ifdef DEBUG
- void ZapInstanceDescriptors();
void ZapPrototypeTransitions();
+ void ZapTransitions();
#endif
// Dispatched behavior.
@@ -4945,35 +5070,30 @@ class Map: public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
+ // Constant for denoting that the Enum Cache field was not yet used.
+ static const int kInvalidEnumCache = EnumLengthBits::kMax;
+
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
- // Storage for instance descriptors is overloaded to also contain additional
- // map flags when unused (bit_field3). When the map has instance descriptors,
- // the flags are transferred to the instance descriptor array and accessed
- // through an extra indirection.
- // TODO(1399): It should be possible to make room for bit_field3 in the map
- // without overloading the instance descriptors field, but the map is
- // currently perfectly aligned to 32 bytes and extending it at all would
- // double its size. After the increment GC work lands, this size restriction
- // could be loosened and bit_field3 moved directly back in the map.
- static const int kInstanceDescriptorsOrBitField3Offset =
+ // Storage for the transition array is overloaded to directly contain a back
+ // pointer if unused. When the map has transitions, the back pointer is
+ // transferred to the transition array and accessed through an extra
+ // indirection.
+ static const int kTransitionsOrBackPointerOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset =
- kInstanceDescriptorsOrBitField3Offset + kPointerSize;
- static const int kPrototypeTransitionsOrBackPointerOffset =
- kCodeCacheOffset + kPointerSize;
- static const int kPadStart =
- kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+ kTransitionsOrBackPointerOffset + kPointerSize;
+ static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
+ static const int kPadStart = kBitField3Offset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset =
- kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+ static const int kPointerFieldsEndOffset = kBitField3Offset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -5028,10 +5148,6 @@ class Map: public HeapObject {
static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
Map::kElementsKindShift) - 1;
- // Bit positions for bit field 3
- static const int kIsShared = 0;
- static const int kFunctionWithPrototype = 1;
-
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset,
kSize> BodyDescriptor;
@@ -5217,6 +5333,29 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // [optimized_code_map]: Map from native context to optimized code
+ // and a shared literals array or Smi 0 if none.
+ DECL_ACCESSORS(optimized_code_map, Object)
+
+ // Returns index i of the entry with the specified context. At position
+ // i - 1 is the context, position i the code, and i + 1 the literals array.
+ // Returns -1 when no matching entry is found.
+ int SearchOptimizedCodeMap(Context* native_context);
+
+ // Installs optimized code from the code map on the given closure. The
+ // index has to be consistent with a search result as defined above.
+ void InstallFromOptimizedCodeMap(JSFunction* function, int index);
+
+ // Clear optimized code map.
+ void ClearOptimizedCodeMap();
+
+ // Add a new entry to the optimized code map.
+ static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<Code> code,
+ Handle<FixedArray> literals);
+ static const int kEntryLength = 3;
+
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -5324,6 +5463,10 @@ class SharedFunctionInfo: public HeapObject {
// IsInobjectSlackTrackingInProgress is false after this call.
void CompleteInobjectSlackTracking();
+ // Invoked before pointers in SharedFunctionInfo are being marked.
+ // Also clears the optimized code map.
+ inline void BeforeVisitingPointers();
+
// Clears the initial_map before the GC marking phase to ensure the reference
// is weak. IsInobjectSlackTrackingInProgress is false after this call.
void DetachInitialMap();
@@ -5436,6 +5579,12 @@ class SharedFunctionInfo: public HeapObject {
// when doing GC if we expect that the function will no longer be used.
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
+ // Indicates if this function can be lazy compiled without a context.
+ // This is used to determine if we can force compilation without reaching
+ // the function through program execution but through other means (e.g. heap
+ // iteration by the debugger).
+ DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
+
// Indicates how many full GCs this function has survived with assigned
// code object. Used to determine when it is relatively safe to flush
// this code object and replace it with lazy compilation stub.
@@ -5500,6 +5649,9 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that the function cannot be inlined.
DECL_BOOLEAN_ACCESSORS(dont_inline)
+ // Indicates that code for this function cannot be cached.
+ DECL_BOOLEAN_ACCESSORS(dont_cache)
+
// Indicates whether or not the code in the shared function support
// deoptimization.
inline bool has_deoptimization_support();
@@ -5509,12 +5661,12 @@ class SharedFunctionInfo: public HeapObject {
// Disable (further) attempted optimization of all functions sharing this
// shared function info.
- void DisableOptimization();
+ void DisableOptimization(const char* reason);
// Lookup the bailout ID and ASSERT that it exists in the non-optimized
// code, returns whether it asserted (i.e., always true if assertions are
// disabled).
- bool VerifyBailoutId(int id);
+ bool VerifyBailoutId(BailoutId id);
// Check whether a inlined constructor can be generated with the given
// prototype.
@@ -5582,15 +5734,12 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
- // Helpers to compile the shared code. Returns true on success, false on
- // failure (e.g., stack overflow during compilation).
- static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
+ // Helper to compile the shared code. Returns true on success, false on
+ // failure (e.g., stack overflow during compilation). This is only used by
+ // the debugger, it is not possible to compile without a context otherwise.
static bool CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
- void SharedFunctionInfoIterateBody(ObjectVisitor* v);
-
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -5601,7 +5750,8 @@ class SharedFunctionInfo: public HeapObject {
// Pointer fields.
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kScopeInfoOffset = kCodeOffset + kPointerSize;
+ static const int kOptimizedCodeMapOffset = kCodeOffset + kPointerSize;
+ static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
static const int kInstanceClassNameOffset =
kConstructStubOffset + kPointerSize;
@@ -5719,6 +5869,7 @@ class SharedFunctionInfo: public HeapObject {
enum CompilerHints {
kHasOnlySimpleThisPropertyAssignments,
kAllowLazyCompilation,
+ kAllowLazyCompilationWithoutContext,
kLiveObjectsMayExist,
kCodeAgeShift,
kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
@@ -5733,6 +5884,7 @@ class SharedFunctionInfo: public HeapObject {
kIsFunction,
kDontOptimize,
kDontInline,
+ kDontCache,
kCompilerHintsCount // Pseudo entry
};
@@ -5799,6 +5951,9 @@ class JSModule: public JSObject {
// [context]: the context holding the module's locals, or undefined if none.
DECL_ACCESSORS(context, Object)
+ // [scope_info]: Scope info.
+ DECL_ACCESSORS(scope_info, ScopeInfo)
+
// Casting.
static inline JSModule* cast(Object* obj);
@@ -5815,7 +5970,8 @@ class JSModule: public JSObject {
// Layout description.
static const int kContextOffset = JSObject::kHeaderSize;
- static const int kSize = kContextOffset + kPointerSize;
+ static const int kScopeInfoOffset = kContextOffset + kPointerSize;
+ static const int kSize = kScopeInfoOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSModule);
@@ -5864,18 +6020,26 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
+ void MarkForParallelRecompilation();
// Helpers to compile this function. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
+ static bool EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag);
static bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
+ BailoutId osr_ast_id,
ClearExceptionFlag flag);
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
+ inline bool IsMarkedForParallelRecompilation();
+
+ // Tells whether or not the function is on the parallel
+ // recompilation queue.
+ inline bool IsInRecompileQueue();
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -5922,7 +6086,7 @@ class JSFunction: public JSObject {
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
- Object* RemovePrototype();
+ void RemovePrototype();
inline bool should_have_prototype();
// Accessor for this function's initial map's [[class]]
@@ -5934,7 +6098,7 @@ class JSFunction: public JSObject {
// Instances created afterwards will have a map whose [[class]] is
// set to 'value', but there is no guarantees on instances created
// before.
- Object* SetInstanceClassName(String* name);
+ void SetInstanceClassName(String* name);
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -5970,8 +6134,8 @@ class JSFunction: public JSObject {
// Returns the number of allocated literals.
inline int NumberOfLiterals();
- // Retrieve the global context from a function's literal array.
- static Context* GlobalContextFromLiterals(FixedArray* literals);
+ // Retrieve the native context from a function's literal array.
+ static Context* NativeContextFromLiterals(FixedArray* literals);
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
@@ -5988,7 +6152,7 @@ class JSFunction: public JSObject {
// Layout of the literals array.
static const int kLiteralsPrefixSize = 1;
- static const int kLiteralGlobalContextIndex = 0;
+ static const int kLiteralNativeContextIndex = 0;
// Layout of the bound-function binding array.
static const int kBoundFunctionIndex = 0;
@@ -6010,9 +6174,9 @@ class JSFunction: public JSObject {
class JSGlobalProxy : public JSObject {
public:
- // [context]: the owner global context of this global proxy object.
+ // [native_context]: the owner native context of this global proxy object.
// It is null value if this object is not used by any context.
- DECL_ACCESSORS(context, Object)
+ DECL_ACCESSORS(native_context, Object)
// Casting.
static inline JSGlobalProxy* cast(Object* obj);
@@ -6029,8 +6193,8 @@ class JSGlobalProxy : public JSObject {
#endif
// Layout description.
- static const int kContextOffset = JSObject::kHeaderSize;
- static const int kSize = kContextOffset + kPointerSize;
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kSize = kNativeContextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
@@ -6047,7 +6211,10 @@ class GlobalObject: public JSObject {
// [builtins]: the object holding the runtime routines written in JS.
DECL_ACCESSORS(builtins, JSBuiltinsObject)
- // [global context]: the global context corresponding to this global object.
+ // [native context]: the natives corresponding to this global object.
+ DECL_ACCESSORS(native_context, Context)
+
+ // [global context]: the most recent (i.e. innermost) global context.
DECL_ACCESSORS(global_context, Context)
// [global receiver]: the global receiver object of the context
@@ -6078,7 +6245,8 @@ class GlobalObject: public JSObject {
// Layout description.
static const int kBuiltinsOffset = JSObject::kHeaderSize;
- static const int kGlobalContextOffset = kBuiltinsOffset + kPointerSize;
+ static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
+ static const int kGlobalContextOffset = kNativeContextOffset + kPointerSize;
static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize;
static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
@@ -6524,13 +6692,15 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
HashTableKey*> {
public:
// Find cached value for a string key, otherwise return null.
- Object* Lookup(String* src);
+ Object* Lookup(String* src, Context* context);
Object* LookupEval(String* src,
Context* context,
LanguageMode language_mode,
int scope_position);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* src, Object* value);
+ MUST_USE_RESULT MaybeObject* Put(String* src,
+ Context* context,
+ Object* value);
MUST_USE_RESULT MaybeObject* PutEval(String* src,
Context* context,
SharedFunctionInfo* value,
@@ -6704,7 +6874,15 @@ class TypeFeedbackInfo: public Struct {
inline void set_ic_total_count(int count);
inline int ic_with_type_info_count();
- inline void set_ic_with_type_info_count(int count);
+ inline void change_ic_with_type_info_count(int count);
+
+ inline void initialize_storage();
+
+ inline void change_own_type_change_checksum();
+ inline int own_type_change_checksum();
+
+ inline void set_inlined_type_change_checksum(int checksum);
+ inline bool matches_inlined_type_change_checksum(int checksum);
DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
@@ -6720,14 +6898,25 @@ class TypeFeedbackInfo: public Struct {
void TypeFeedbackInfoVerify();
#endif
- static const int kIcTotalCountOffset = HeapObject::kHeaderSize;
- static const int kIcWithTypeinfoCountOffset =
- kIcTotalCountOffset + kPointerSize;
- static const int kTypeFeedbackCellsOffset =
- kIcWithTypeinfoCountOffset + kPointerSize;
+ static const int kStorage1Offset = HeapObject::kHeaderSize;
+ static const int kStorage2Offset = kStorage1Offset + kPointerSize;
+ static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
private:
+ static const int kTypeChangeChecksumBits = 7;
+
+ class ICTotalCountField: public BitField<int, 0,
+ kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
+ class OwnTypeChangeChecksum: public BitField<int,
+ kSmiValueSize - kTypeChangeChecksumBits,
+ kTypeChangeChecksumBits> {}; // NOLINT
+ class ICsWithTypeInfoCountField: public BitField<int, 0,
+ kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
+ class InlinedTypeChangeChecksum: public BitField<int,
+ kSmiValueSize - kTypeChangeChecksumBits,
+ kTypeChangeChecksumBits> {}; // NOLINT
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
};
@@ -7197,7 +7386,7 @@ class String: public HeapObject {
#ifdef V8_HOST_CAN_READ_UNALIGNED
ASSERT(kMaxAsciiCharCode == 0x7F);
const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars <= limit - sizeof(uintptr_t)) {
+ while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
return false;
}
@@ -7456,7 +7645,8 @@ class ConsString: public String {
class SlicedString: public String {
public:
inline String* parent();
- inline void set_parent(String* parent);
+ inline void set_parent(String* parent,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int offset();
inline void set_offset(int offset);
@@ -7759,6 +7949,14 @@ class JSGlobalPropertyCell: public HeapObject {
// Casting.
static inline JSGlobalPropertyCell* cast(Object* obj);
+ static inline JSGlobalPropertyCell* FromValueAddress(Address value) {
+ return cast(FromAddress(value - kValueOffset));
+ }
+
+ inline Address ValueAddress() {
+ return address() + kValueOffset;
+ }
+
#ifdef DEBUG
void JSGlobalPropertyCellVerify();
#endif
@@ -8214,7 +8412,7 @@ class AccessorPair: public Struct {
static inline AccessorPair* cast(Object* obj);
- MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
+ MUST_USE_RESULT MaybeObject* Copy();
Object* get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
@@ -8702,8 +8900,6 @@ class ObjectVisitor BASE_EMBEDDED {
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
- virtual void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {}
-
// Visits a contiguous arrays of external references (references to the C++
// heap) in the half-open range [start, end). Any or all of the values
// may be modified on return.
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
new file mode 100644
index 0000000000..06018dd1a9
--- /dev/null
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -0,0 +1,127 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "optimizing-compiler-thread.h"
+
+#include "v8.h"
+
+#include "hydrogen.h"
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+
+void OptimizingCompilerThread::Run() {
+#ifdef DEBUG
+ thread_id_ = ThreadId::Current().ToInteger();
+#endif
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+
+ int64_t epoch = 0;
+ if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
+
+ while (true) {
+ input_queue_semaphore_->Wait();
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_->Signal();
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_total_ = OS::Ticks() - epoch;
+ }
+ return;
+ }
+
+ int64_t compiling_start = 0;
+ if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
+
+ Heap::RelocationLock relocation_lock(isolate_->heap());
+ OptimizingCompiler* optimizing_compiler = NULL;
+ input_queue_.Dequeue(&optimizing_compiler);
+ Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+
+ ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
+
+ OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ ASSERT(status != OptimizingCompiler::FAILED);
+ // Prevent an unused-variable error in release mode.
+ USE(status);
+
+ output_queue_.Enqueue(optimizing_compiler);
+ isolate_->stack_guard()->RequestCodeReadyEvent();
+
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_compiling_ += OS::Ticks() - compiling_start;
+ }
+ }
+}
+
+
+void OptimizingCompilerThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ input_queue_semaphore_->Signal();
+ stop_semaphore_->Wait();
+
+ if (FLAG_trace_parallel_recompilation) {
+ double compile_time = static_cast<double>(time_spent_compiling_);
+ double total_time = static_cast<double>(time_spent_total_);
+ double percentage = (compile_time * 100) / total_time;
+ PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
+ }
+}
+
+
+void OptimizingCompilerThread::InstallOptimizedFunctions() {
+ HandleScope handle_scope(isolate_);
+ int functions_installed = 0;
+ while (!output_queue_.IsEmpty()) {
+ OptimizingCompiler* compiler = NULL;
+ output_queue_.Dequeue(&compiler);
+ Compiler::InstallOptimizedCode(compiler);
+ functions_installed++;
+ }
+ if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
+ PrintF(" ** Installed %d function(s).\n", functions_installed);
+ }
+}
+
+
+void OptimizingCompilerThread::QueueForOptimization(
+ OptimizingCompiler* optimizing_compiler) {
+ input_queue_.Enqueue(optimizing_compiler);
+ input_queue_semaphore_->Signal();
+}
+
+#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread() {
+ if (!FLAG_parallel_recompilation) return false;
+ return ThreadId::Current().ToInteger() == thread_id_;
+}
+#endif
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
new file mode 100644
index 0000000000..d5627266d0
--- /dev/null
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -0,0 +1,101 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
+#define V8_OPTIMIZING_COMPILER_THREAD_H_
+
+#include "atomicops.h"
+#include "platform.h"
+#include "flags.h"
+#include "unbound-queue.h"
+
+namespace v8 {
+namespace internal {
+
+class HGraphBuilder;
+class OptimizingCompiler;
+
+class OptimizingCompilerThread : public Thread {
+ public:
+ explicit OptimizingCompilerThread(Isolate *isolate) :
+ Thread("OptimizingCompilerThread"),
+ isolate_(isolate),
+ stop_semaphore_(OS::CreateSemaphore(0)),
+ input_queue_semaphore_(OS::CreateSemaphore(0)),
+ time_spent_compiling_(0),
+ time_spent_total_(0) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ }
+
+ void Run();
+ void Stop();
+ void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void InstallOptimizedFunctions();
+
+ inline bool IsQueueAvailable() {
+ // We don't need a barrier since we have a data dependency right
+ // after.
+ Atomic32 current_length = NoBarrier_Load(&queue_length_);
+
+ // This can be queried only from the execution thread.
+ ASSERT(!IsOptimizerThread());
+ // Since only the execution thread increments queue_length_ and
+ // only one thread can run inside an Isolate at one time, a direct
+ // doesn't introduce a race -- queue_length_ may decreased in
+ // meantime, but not increased.
+ return (current_length < FLAG_parallel_recompilation_queue_length);
+ }
+
+#ifdef DEBUG
+ bool IsOptimizerThread();
+#endif
+
+ ~OptimizingCompilerThread() {
+ delete input_queue_semaphore_;
+ delete stop_semaphore_;
+ }
+
+ private:
+ Isolate* isolate_;
+ Semaphore* stop_semaphore_;
+ Semaphore* input_queue_semaphore_;
+ UnboundQueue<OptimizingCompiler*> input_queue_;
+ UnboundQueue<OptimizingCompiler*> output_queue_;
+ volatile AtomicWord stop_thread_;
+ volatile Atomic32 queue_length_;
+ int64_t time_spent_compiling_;
+ int64_t time_spent_total_;
+
+#ifdef DEBUG
+ int thread_id_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_OPTIMIZING_COMPILER_THREAD_H_
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 7c51b694c8..37e903aac9 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -493,10 +493,10 @@ Parser::FunctionState::FunctionState(Parser* parser,
outer_function_state_(parser->current_function_state_),
outer_scope_(parser->top_scope_),
saved_ast_node_id_(isolate->ast_node_id()),
- factory_(isolate) {
+ factory_(isolate, parser->zone()) {
parser->top_scope_ = scope;
parser->current_function_state_ = this;
- isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
+ isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
}
@@ -532,14 +532,13 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
-Parser::Parser(Handle<Script> script,
+Parser::Parser(CompilationInfo* info,
int parser_flags,
v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Zone* zone)
- : isolate_(script->GetIsolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0, zone),
- script_(script),
+ ScriptDataImpl* pre_data)
+ : isolate_(info->isolate()),
+ symbol_cache_(pre_data ? pre_data->symbol_count() : 0, info->zone()),
+ script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
top_scope_(NULL),
@@ -553,7 +552,9 @@ Parser::Parser(Handle<Script> script,
allow_modules_((parser_flags & kAllowModules) != 0),
stack_overflow_(false),
parenthesized_function_(false),
- zone_(zone) {
+ zone_(info->zone()),
+ info_(info) {
+ ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
scanner().SetHarmonyScoping(true);
@@ -564,16 +565,17 @@ Parser::Parser(Handle<Script> script,
}
-FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
- ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
-
+FunctionLiteral* Parser::ParseProgram() {
+ ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
+ int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
source->TryFlatten();
+ FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
// Notice that the stream is destroyed at the end of the branch block.
// The last line of the blocks can't be moved outside, even though they're
@@ -581,12 +583,27 @@ FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(info, source, &zone_scope);
+ result = DoParseProgram(info(), source, &zone_scope);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(info, source, &zone_scope);
+ result = DoParseProgram(info(), source, &zone_scope);
+ }
+
+ if (FLAG_trace_parse && result != NULL) {
+ double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ if (info()->is_eval()) {
+ PrintF("[parsing eval");
+ } else if (info()->script()->name()->IsString()) {
+ String* name = String::cast(info()->script()->name());
+ SmartArrayPointer<char> name_chars = name->ToCString();
+ PrintF("[parsing script: %s", *name_chars);
+ } else {
+ PrintF("[parsing script");
+ }
+ PrintF(" - took %0.3f ms]\n", ms);
}
+ return result;
}
@@ -598,27 +615,29 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (pre_data_ != NULL) pre_data_->Initialize();
// Compute the parsing mode.
- mode_ = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
+ Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL) mode = PARSE_EAGERLY;
+ ParsingModeScope parsing_mode(this, mode);
Handle<String> no_name = isolate()->factory()->empty_symbol();
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
+ if (!info->context().is_null()) {
+ scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
+ }
if (info->is_eval()) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- if (!info->is_global() && (shared.is_null() || shared->is_function())) {
- scope = Scope::DeserializeScopeChain(*info->calling_context(), scope,
- zone());
- }
if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
scope = NewScope(scope, EVAL_SCOPE);
}
+ } else if (info->is_global()) {
+ scope = NewScope(scope, GLOBAL_SCOPE);
}
scope->set_start_position(0);
scope->set_end_position(source->length());
- FunctionState function_state(this, scope, isolate());
+
+ FunctionState function_state(this, scope, isolate()); // Enters 'scope'.
top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
@@ -645,7 +664,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION,
- FunctionLiteral::kGlobalOrEval);
+ FunctionLiteral::kGlobalOrEval,
+ FunctionLiteral::kNotParenthesized);
result->set_ast_properties(factory()->visitor()->ast_properties());
} else if (stack_overflow_) {
isolate()->StackOverflow();
@@ -662,36 +682,42 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
}
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
- ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+FunctionLiteral* Parser::ParseLazy() {
+ ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
+ int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ Handle<SharedFunctionInfo> shared_info = info()->shared_info();
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
// Initialize parser state.
source->TryFlatten();
+ FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
- return result;
+ result = ParseLazy(&stream, &zone_scope);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
- return result;
+ result = ParseLazy(&stream, &zone_scope);
}
+
+ if (FLAG_trace_parse && result != NULL) {
+ double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
+ PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
+ }
+ return result;
}
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
- Utf16CharacterStream* source,
+FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
ZoneScope* zone_scope) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
+ Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
@@ -700,7 +726,7 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
fni_->PushEnclosingName(name);
- mode_ = PARSE_EAGERLY;
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
// Place holder for the result.
FunctionLiteral* result = NULL;
@@ -708,16 +734,16 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
{
// Parse the function literal.
Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
- info->SetGlobalScope(scope);
- if (!info->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info->closure()->context(), scope,
+ info()->SetGlobalScope(scope);
+ if (!info()->closure().is_null()) {
+ scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
FunctionState function_state(this, scope, isolate());
- ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
+ ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
- info->is_extended_mode());
- ASSERT(info->language_mode() == shared_info->language_mode());
+ info()->is_extended_mode());
+ ASSERT(info()->language_mode() == shared_info->language_mode());
scope->SetLanguageMode(shared_info->language_mode());
FunctionLiteral::Type type = shared_info->is_expression()
? (shared_info->is_anonymous()
@@ -1246,12 +1272,10 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
}
-Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
+Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// ModuleDeclaration:
// 'module' Identifier Module
- // Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true, zone());
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
@@ -1275,10 +1299,11 @@ Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
}
#endif
- // TODO(rossberg): Add initialization statement to block.
-
if (names) names->Add(name, zone());
- return block;
+ if (module->body() == NULL)
+ return factory()->NewEmptyStatement();
+ else
+ return module->body();
}
@@ -1314,7 +1339,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
// '{' ModuleElement '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(NULL, 16, false, zone());
+ Block* body = factory()->NewBlock(NULL, 16, false);
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
@@ -1344,16 +1369,23 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
scope->set_end_position(scanner().location().end_pos);
body->set_scope(scope);
- // Instance objects have to be created ahead of time (before code generation
- // linking them) because of potentially cyclic references between them.
- // We create them here, to avoid another pass over the AST.
+ // Check that all exports are bound.
Interface* interface = scope->interface();
+ for (Interface::Iterator it = interface->iterator();
+ !it.done(); it.Advance()) {
+ if (scope->LocalLookup(it.name()) == NULL) {
+ Handle<String> name(it.name());
+ ReportMessage("module_export_undefined",
+ Vector<Handle<String> >(&name, 1));
+ *ok = false;
+ return NULL;
+ }
+ }
+
interface->MakeModule(ok);
- ASSERT(ok);
- interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
- ASSERT(ok);
+ ASSERT(*ok);
interface->Freeze(ok);
- ASSERT(ok);
+ ASSERT(*ok);
return factory()->NewModuleLiteral(body, interface);
}
@@ -1402,8 +1434,8 @@ Module* Parser::ParseModuleVariable(bool* ok) {
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
VariableProxy* proxy = top_scope_->NewUnresolved(
- factory(), name, scanner().location().beg_pos,
- Interface::NewModule(zone()));
+ factory(), name, Interface::NewModule(zone()),
+ scanner().location().beg_pos);
return factory()->NewModuleVariable(proxy);
}
@@ -1424,10 +1456,12 @@ Module* Parser::ParseModuleUrl(bool* ok) {
Module* result = factory()->NewModuleUrl(symbol);
Interface* interface = result->interface();
- interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
- ASSERT(ok);
interface->Freeze(ok);
- ASSERT(ok);
+ ASSERT(*ok);
+ // Create dummy scope to avoid errors as long as the feature isn't finished.
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ interface->Unify(scope->interface(), zone(), ok);
+ ASSERT(*ok);
return result;
}
@@ -1468,7 +1502,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
// Generate a separate declaration for each identifier.
// TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true, zone());
+ Block* block = factory()->NewBlock(NULL, 1, true);
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1491,7 +1525,6 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
Declaration* declaration =
factory()->NewImportDeclaration(proxy, module, top_scope_);
Declare(declaration, true, CHECK_OK);
- // TODO(rossberg): Add initialization statement to block.
}
return block;
@@ -1683,7 +1716,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = factory()->NewBlock(labels, 1, false, zone());
+ Block* result = factory()->NewBlock(labels, 1, false);
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
if (statement) {
@@ -1732,7 +1765,7 @@ VariableProxy* Parser::NewUnresolved(
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
return DeclarationScope(mode)->NewUnresolved(
- factory(), name, scanner().location().beg_pos, interface);
+ factory(), name, interface, scanner().location().beg_pos);
}
@@ -1743,7 +1776,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
Scope* declaration_scope = DeclarationScope(mode);
Variable* var = NULL;
- // If a function scope exists, then we can statically declare this
+ // If a suitable scope exists, then we can statically declare this
// variable and also set its mode. In any case, a Declaration node
// will be added to the scope so that the declaration can be added
// to the corresponding activation frame at runtime if necessary.
@@ -1751,56 +1784,58 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// to the calling function context.
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
- // Also for block scoped let/const bindings the variable can be
- // statically declared.
if (declaration_scope->is_function_scope() ||
declaration_scope->is_strict_or_extended_eval_scope() ||
declaration_scope->is_block_scope() ||
declaration_scope->is_module_scope() ||
- declaration->AsModuleDeclaration() != NULL) {
- // Declare the variable in the function scope.
- var = declaration_scope->LocalLookup(name);
+ declaration_scope->is_global_scope()) {
+ // Declare the variable in the declaration scope.
+ // For the global scope, we have to check for collisions with earlier
+ // (i.e., enclosing) global scopes, to maintain the illusion of a single
+ // global scope.
+ var = declaration_scope->is_global_scope()
+ ? declaration_scope->Lookup(name)
+ : declaration_scope->LocalLookup(name);
if (var == NULL) {
// Declare the name.
var = declaration_scope->DeclareLocal(
name, mode, declaration->initialization(), proxy->interface());
- } else {
+ } else if ((mode != VAR || var->mode() != VAR) &&
+ (!declaration_scope->is_global_scope() ||
+ IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode()))) {
// The name was declared in this scope before; check for conflicting
// re-declarations. We have a conflict if either of the declarations is
- // not a var. There is similar code in runtime.cc in the Declare
+ // not a var (in the global scope, we also have to ignore legacy const for
+ // compatibility). There is similar code in runtime.cc in the Declare
// functions. The function CheckNonConflictingScope checks for conflicting
// var and let bindings from different scopes whereas this is a check for
// conflicting declarations within the same scope. This check also covers
+ // the special case
//
// function () { let x; { var x; } }
//
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
- if ((mode != VAR) || (var->mode() != VAR)) {
- // We only have vars, consts and lets in declarations.
- ASSERT(var->mode() == VAR ||
- var->mode() == CONST ||
- var->mode() == CONST_HARMONY ||
- var->mode() == LET);
- if (is_extended_mode()) {
- // In harmony mode we treat re-declarations as early errors. See
- // ES5 16 for a definition of early errors.
- SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
- *ok = false;
- return;
- }
- const char* type = (var->mode() == VAR)
- ? "var" : var->is_const_mode() ? "const" : "let";
- Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
- Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
- type_string, name);
- declaration_scope->SetIllegalRedeclaration(expression);
+ ASSERT(IsDeclaredVariableMode(var->mode()));
+ if (is_extended_mode()) {
+ // In harmony mode we treat re-declarations as early errors. See
+ // ES5 16 for a definition of early errors.
+ SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
+ const char* elms[2] = { "Variable", *c_string };
+ Vector<const char*> args(elms, 2);
+ ReportMessage("redeclaration", args);
+ *ok = false;
+ return;
}
+ const char* type =
+ (var->mode() == VAR) ? "var" : var->is_const_mode() ? "const" : "let";
+ Handle<String> type_string =
+ isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
+ Expression* expression =
+ NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
+ type_string, name);
+ declaration_scope->SetIllegalRedeclaration(expression);
}
}
@@ -1822,8 +1857,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// Runtime::DeclareContextSlot() calls.
declaration_scope->AddDeclaration(declaration);
- if ((mode == CONST || mode == CONST_HARMONY) &&
- declaration_scope->is_global_scope()) {
+ if (mode == CONST && declaration_scope->is_global_scope()) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
@@ -1948,7 +1982,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- VariableProxy* proxy = NewUnresolved(name, VAR);
+ VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
Declare(declaration, true, CHECK_OK);
@@ -1974,10 +2008,13 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
FunctionLiteral::DECLARATION,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
- // scope, we treat is as such and introduce the function with it's
+ // scope, we treat it as such and introduce the function with its
// initial value upon entering the corresponding scope.
- VariableMode mode = is_extended_mode() ? LET : VAR;
- VariableProxy* proxy = NewUnresolved(name, mode);
+ // In extended mode, a function behaves as a lexical binding, except in the
+ // global scope.
+ VariableMode mode =
+ is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
+ VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
Declare(declaration, true, CHECK_OK);
@@ -1996,7 +2033,7 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = factory()->NewBlock(labels, 16, false, zone());
+ Block* result = factory()->NewBlock(labels, 16, false);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -2019,7 +2056,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// '{' BlockElement* '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false, zone());
+ Block* body = factory()->NewBlock(labels, 16, false);
Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
@@ -2176,7 +2213,7 @@ Block* Parser::ParseVariableDeclarations(
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true, zone());
+ Block* block = factory()->NewBlock(NULL, 1, true);
int nvars = 0; // the number of variables declared
Handle<String> name;
do {
@@ -2209,7 +2246,9 @@ Block* Parser::ParseVariableDeclarations(
// For let/const declarations in harmony mode, we can also immediately
// pre-resolve the proxy because it resides in the same scope as the
// declaration.
- VariableProxy* proxy = NewUnresolved(name, mode);
+ Interface* interface =
+ is_const ? Interface::NewConst() : Interface::NewValue();
+ VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, mode, top_scope_);
Declare(declaration, mode != VAR, CHECK_OK);
@@ -2297,7 +2336,8 @@ Block* Parser::ParseVariableDeclarations(
// declaration statement has been executed. This is important in
// browsers where the global object (window) has lots of
// properties defined in prototype objects.
- if (initialization_scope->is_global_scope()) {
+ if (initialization_scope->is_global_scope() &&
+ !IsLexicalVariableMode(mode)) {
// Compute the arguments for the runtime call.
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(3, zone());
@@ -2370,7 +2410,7 @@ Block* Parser::ParseVariableDeclarations(
// if they are inside a 'with' statement - they may change a 'with' object
// property).
VariableProxy* proxy =
- initialization_scope->NewUnresolved(factory(), name);
+ initialization_scope->NewUnresolved(factory(), name, interface);
Assignment* assignment =
factory()->NewAssignment(init_op, proxy, value, position);
block->AddStatement(factory()->NewExpressionStatement(assignment),
@@ -2787,7 +2827,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block);
statement->set_escaping_targets(try_collector.targets());
- try_block = factory()->NewBlock(NULL, 1, false, zone());
+ try_block = factory()->NewBlock(NULL, 1, false);
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -2878,12 +2918,16 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
for_scope->set_start_position(scanner().location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
+ bool is_const = peek() == Token::CONST;
Handle<String> name;
Block* variable_statement =
ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
if (peek() == Token::IN && !name.is_null()) {
- VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
+ Interface* interface =
+ is_const ? Interface::NewConst() : Interface::NewValue();
+ VariableProxy* each =
+ top_scope_->NewUnresolved(factory(), name, interface);
ForInStatement* loop = factory()->NewForInStatement(labels);
Target target(&this->target_stack_, loop);
@@ -2893,7 +2937,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
loop->Initialize(each, enumerable, body);
- Block* result = factory()->NewBlock(NULL, 2, false, zone());
+ Block* result = factory()->NewBlock(NULL, 2, false);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
top_scope_ = saved_scope;
@@ -2930,7 +2974,9 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// implementing stack allocated block scoped variables.
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
+ Interface* interface = Interface::NewValue();
+ VariableProxy* each =
+ top_scope_->NewUnresolved(factory(), name, interface);
ForInStatement* loop = factory()->NewForInStatement(labels);
Target target(&this->target_stack_, loop);
@@ -2939,7 +2985,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- Block* body_block = factory()->NewBlock(NULL, 3, false, zone());
+ Block* body_block = factory()->NewBlock(NULL, 3, false);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
Statement* assignment_statement =
@@ -3028,7 +3074,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// for (; c; n) b
// }
ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false, zone());
+ Block* result = factory()->NewBlock(NULL, 2, false);
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
@@ -3412,6 +3458,12 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// should not point to the closing brace otherwise it will intersect
// with positions recorded for function literal and confuse debugger.
pos = scanner().peek_location().beg_pos;
+ // Also the trailing parenthesis are a hint that the function will
+ // be called immediately. If we happen to have parsed a preceding
+ // function literal eagerly, we can also compile it eagerly.
+ if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ result->AsFunctionLiteral()->set_parenthesized();
+ }
}
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
@@ -3665,7 +3717,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
#endif
Interface* interface = Interface::NewUnknown(zone());
result = top_scope_->NewUnresolved(
- factory(), name, scanner().location().beg_pos, interface);
+ factory(), name, interface, scanner().location().beg_pos);
break;
}
@@ -4356,6 +4408,7 @@ class SingletonLogger : public ParserRecorder {
int end,
const char* message,
const char* argument_opt) {
+ if (has_error_) return;
has_error_ = true;
start_ = start;
end_ = end;
@@ -4450,6 +4503,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
Handle<FixedArray> this_property_assignments;
FunctionLiteral::ParameterFlag duplicate_parameters =
FunctionLiteral::kNoDuplicateParameters;
+ FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
+ ? FunctionLiteral::kIsParenthesized
+ : FunctionLiteral::kNotParenthesized;
AstProperties ast_properties;
// Parse function body.
{ FunctionState function_state(this, scope, isolate());
@@ -4510,7 +4566,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
fvar = new(zone()) Variable(top_scope_,
function_name, fvar_mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized);
+ Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration =
factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
@@ -4521,7 +4577,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// The heuristics are:
// - It must not have been prohibited by the caller to Parse (some callers
// need a full AST).
- // - The outer scope must be trivial (only global variables in scope).
+ // - The outer scope must allow lazy compilation of inner functions.
// - The function mustn't be a function expression with an open parenthesis
// before; we consider that a hint that the function will be called
// immediately, and it would be a waste of time to make it lazily
@@ -4529,8 +4585,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// These are all things we can know at this point, without looking at the
// function itself.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->outer_scope()->is_global_scope() &&
- top_scope_->HasTrivialOuterContext() &&
+ top_scope_->AllowsLazyCompilation() &&
!parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
@@ -4599,17 +4654,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
}
if (!is_lazily_compiled) {
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy =
- top_scope_->NewUnresolved(factory(), function_name);
+ VariableProxy* fproxy = top_scope_->NewUnresolved(
+ factory(), function_name, Interface::NewConst());
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op,
fproxy,
factory()->NewThisFunction(),
RelocInfo::kNoPosition)),
- zone());
+ zone());
}
ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
@@ -4689,7 +4745,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
num_parameters,
duplicate_parameters,
type,
- FunctionLiteral::kIsFunction);
+ FunctionLiteral::kIsFunction,
+ parenthesized);
function_literal->set_function_token_position(function_token_position);
function_literal->set_ast_properties(&ast_properties);
@@ -4761,6 +4818,13 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
return NULL;
}
+ // Check that the function is defined if it's an inline runtime call.
+ if (function == NULL && name->Get(0) == '_') {
+ ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
+ *ok = false;
+ return NULL;
+ }
+
// We have a valid intrinsics call or a call to a builtin.
return factory()->NewCallRuntime(name, function, args);
}
@@ -5069,8 +5133,10 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
- bool multiline)
+ bool multiline,
+ Zone* zone)
: isolate_(Isolate::Current()),
+ zone_(zone),
error_(error),
captures_(NULL),
in_(in),
@@ -5101,7 +5167,7 @@ void RegExpParser::Advance() {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
ReportError(CStrVector(Isolate::kStackOverflowMessage));
- } else if (isolate()->zone()->excess_allocation()) {
+ } else if (zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
current_ = in()->Get(next_pos_);
@@ -5964,31 +6030,6 @@ static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
}
-// Preparse, but only collect data that is immediately useful,
-// even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
- v8::Extension* extension,
- int flags) {
- bool allow_lazy = FLAG_lazy && (extension == NULL);
- if (!allow_lazy) {
- // Partial preparsing is only about lazily compiled functions.
- // If we don't allow lazy compilation, the log data will be empty.
- return NULL;
- }
- flags |= kAllowLazy;
- PartialParserRecorder recorder;
- int source_length = source->length();
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUtf16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source_length);
- return DoPreParse(&stream, flags, &recorder);
- } else {
- GenericStringUtf16CharacterStream stream(source, 0, source_length);
- return DoPreParse(&stream, flags, &recorder);
- }
-}
-
-
ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
v8::Extension* extension,
int flags) {
@@ -6003,9 +6044,10 @@ ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool multiline,
- RegExpCompileData* result) {
+ RegExpCompileData* result,
+ Zone* zone) {
ASSERT(result != NULL);
- RegExpParser parser(input, &result->error, multiline);
+ RegExpParser parser(input, &result->error, multiline, zone);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
ASSERT(tree == NULL);
@@ -6026,7 +6068,6 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
ASSERT(info->function() == NULL);
FunctionLiteral* result = NULL;
- Handle<Script> script = info->script();
ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
if (!info->is_native() && FLAG_harmony_scoping) {
// Harmony scoping is requested.
@@ -6041,16 +6082,15 @@ bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
}
if (info->is_lazy()) {
ASSERT(!info->is_eval());
- Parser parser(script, parsing_flags, NULL, NULL, info->isolate()->zone());
+ Parser parser(info, parsing_flags, NULL, NULL);
if (info->shared_info()->is_function()) {
- result = parser.ParseLazy(info);
+ result = parser.ParseLazy();
} else {
- result = parser.ParseProgram(info);
+ result = parser.ParseProgram();
}
} else {
ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(script, parsing_flags, info->extension(), pre_data,
- info->isolate()->zone());
+ Parser parser(info, parsing_flags, info->extension(), pre_data);
if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation();
const char* message = pre_data->BuildMessage();
@@ -6063,7 +6103,7 @@ bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
DeleteArray(args.start());
ASSERT(info->isolate()->has_pending_exception());
} else {
- result = parser.ParseProgram(info);
+ result = parser.ParseProgram();
}
}
info->SetFunction(result);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 773d59a5e2..1ab7a141be 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -175,12 +175,6 @@ class ParserApi {
static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
v8::Extension* extension,
int flags);
-
- // Preparser that only does preprocessing that makes sense if only used
- // immediately after.
- static ScriptDataImpl* PartialPreParse(Handle<String> source,
- v8::Extension* extension,
- int flags);
};
// ----------------------------------------------------------------------------
@@ -306,11 +300,13 @@ class RegExpParser {
public:
RegExpParser(FlatStringReader* in,
Handle<String>* error,
- bool multiline_mode);
+ bool multiline_mode,
+ Zone* zone);
static bool ParseRegExp(FlatStringReader* input,
bool multiline,
- RegExpCompileData* result);
+ RegExpCompileData* result,
+ Zone* zone);
RegExpTree* ParsePattern();
RegExpTree* ParseDisjunction();
@@ -398,7 +394,7 @@ class RegExpParser {
};
Isolate* isolate() { return isolate_; }
- Zone* zone() const { return isolate_->zone(); }
+ Zone* zone() const { return zone_; }
uc32 current() { return current_; }
bool has_more() { return has_more_; }
@@ -408,6 +404,7 @@ class RegExpParser {
void ScanForCaptures();
Isolate* isolate_;
+ Zone* zone_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
FlatStringReader* in_;
@@ -431,19 +428,18 @@ class SingletonLogger;
class Parser {
public:
- Parser(Handle<Script> script,
+ Parser(CompilationInfo* info,
int parsing_flags, // Combination of ParsingFlags
v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Zone* zone);
+ ScriptDataImpl* pre_data);
virtual ~Parser() {
delete reusable_preparser_;
reusable_preparser_ = NULL;
}
// Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram(CompilationInfo* info);
- FunctionLiteral* ParseLazy(CompilationInfo* info);
+ FunctionLiteral* ParseProgram();
+ FunctionLiteral* ParseLazy();
void ReportMessageAt(Scanner::Location loc,
const char* message,
@@ -540,15 +536,28 @@ class Parser {
AstNodeFactory<AstConstructionVisitor> factory_;
};
+ class ParsingModeScope BASE_EMBEDDED {
+ public:
+ ParsingModeScope(Parser* parser, Mode mode)
+ : parser_(parser),
+ old_mode_(parser->mode()) {
+ parser_->mode_ = mode;
+ }
+ ~ParsingModeScope() {
+ parser_->mode_ = old_mode_;
+ }
+ private:
+ Parser* parser_;
+ Mode old_mode_;
+ };
-
- FunctionLiteral* ParseLazy(CompilationInfo* info,
- Utf16CharacterStream* source,
+ FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
ZoneScope* zone_scope);
Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
+ CompilationInfo* info() const { return info_; }
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(CompilationInfo* info,
@@ -570,7 +579,7 @@ class Parser {
return top_scope_->is_extended_mode();
}
Scope* DeclarationScope(VariableMode mode) {
- return (mode == LET || mode == CONST_HARMONY)
+ return IsLexicalVariableMode(mode)
? top_scope_ : top_scope_->DeclarationScope();
}
@@ -584,7 +593,7 @@ class Parser {
void* ParseSourceElements(ZoneList<Statement*>* processor,
int end_token, bool is_eval, bool* ok);
Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
- Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
+ Statement* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
Module* ParseModule(bool* ok);
Module* ParseModuleLiteral(bool* ok);
Module* ParseModulePath(bool* ok);
@@ -769,7 +778,7 @@ class Parser {
// Parser support
VariableProxy* NewUnresolved(Handle<String> name,
VariableMode mode,
- Interface* interface = Interface::NewValue());
+ Interface* interface);
void Declare(Declaration* declaration, bool resolve, bool* ok);
bool TargetStackContainsLabel(Handle<String> label);
@@ -837,6 +846,7 @@ class Parser {
bool parenthesized_function_;
Zone* zone_;
+ CompilationInfo* info_;
friend class BlockState;
friend class FunctionState;
};
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index f6db423e42..606d10236e 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -53,6 +53,13 @@
#include <errno.h>
#include <stdarg.h>
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+ defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>
+#endif
+
#undef MAP_TYPE
#include "v8.h"
@@ -132,6 +139,9 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such.
switch (feature) {
+ case VFP2:
+ search_string = "vfp";
+ break;
case VFP3:
search_string = "vfpv3";
break;
@@ -161,48 +171,43 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
}
-// Simple helper function to detect whether the C code is compiled with
-// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
-// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
-// calling this will return 1.0 and otherwise 0.0.
-static void ArmUsingHardFloatHelper() {
- asm("mov r0, #0":::"r0");
-#if defined(__VFP_FP__) && !defined(__SOFTFP__)
- // Load 0x3ff00000 into r1 using instructions available in both ARM
- // and Thumb mode.
- asm("mov r1, #3":::"r1");
- asm("mov r2, #255":::"r2");
- asm("lsl r1, r1, #8":::"r1");
- asm("orr r1, r1, r2":::"r1");
- asm("lsl r1, r1, #20":::"r1");
- // For vmov d0, r0, r1 use ARM mode.
-#ifdef __thumb__
- asm volatile(
- "@ Enter ARM Mode \n\t"
- " adr r3, 1f \n\t"
- " bx r3 \n\t"
- " .ALIGN 4 \n\t"
- " .ARM \n"
- "1: vmov d0, r0, r1 \n\t"
- "@ Enter THUMB Mode\n\t"
- " adr r3, 2f+1 \n\t"
- " bx r3 \n\t"
- " .THUMB \n"
- "2: \n\t":::"r3");
+bool OS::ArmUsingHardFloat() {
+ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+ // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+ // We use these as well as a couple of other defines to statically determine
+ // what FP ABI used.
+ // GCC versions 4.4 and below don't support hard-fp.
+ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+ // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+ return true;
#else
- asm("vmov d0, r0, r1");
-#endif // __thumb__
-#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
- asm("mov r1, #0":::"r1");
-}
+ return false;
+#endif
+
+#elif GCC_VERSION < 40500
+ return false;
+#else
+#if defined(__ARM_PCS_VFP)
+ return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
+ return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for." \
+ "Please report it on this issue" \
+ "http://code.google.com/p/v8/issues/detail?id=2140"
-bool OS::ArmUsingHardFloat() {
- // Cast helper function from returning void to returning double.
- typedef double (*F)();
- F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
- return f() == 1.0;
+#endif
+#endif
+#undef GCC_VERSION
}
+
#endif // def __arm__
@@ -507,9 +512,6 @@ void OS::LogSharedLibraryAddresses() {
}
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
@@ -520,7 +522,7 @@ void OS::SignalCodeMovingGC() {
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(kGCFakeMmap, "w+");
+ FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
void* addr = mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_EXEC,
@@ -909,32 +911,30 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
-// Android runs a fairly new Linux kernel, so signal info is there,
-// but the C library doesn't have the structs defined.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+// Not all versions of Android's C library provide ucontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See http://code.google.com/p/android/issues/detail?id=34784
+
+#if defined(__arm__)
-struct sigcontext {
- uint32_t trap_no;
- uint32_t error_code;
- uint32_t oldmask;
- uint32_t gregs[16];
- uint32_t arm_cpsr;
- uint32_t fault_address;
-};
-typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
+
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
+ // Other fields are not used by V8, don't define them here.
} ucontext_t;
-enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-#elif !defined(__GLIBC__) && defined(__mips__)
+#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
-struct sigcontext {
+typedef struct {
uint32_t regmask;
uint32_t status;
uint64_t pc;
@@ -953,44 +953,44 @@ struct sigcontext {
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
-};
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
+} mcontext_t;
+
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
+ // Other fields are not used by V8, don't define them here.
} ucontext_t;
-#elif !defined(__GLIBC__) && defined(__i386__)
+#elif defined(__i386__)
// x86 version for Android.
-struct sigcontext {
+typedef struct {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
-};
+} mcontext_t;
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
+ // Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
+#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
static int GetThreadID() {
- // Glibc doesn't provide a wrapper for gettid(2).
-#if defined(ANDROID)
- return syscall(__NR_gettid);
+#if defined(__ANDROID__)
+ // Android's C library provides gettid(2).
+ return gettid();
#else
+ // Glibc doesn't provide a wrapper for gettid(2).
return syscall(SYS_gettid);
#endif
}
@@ -1029,8 +1029,10 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
-// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
-#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
+ (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+ // Old GLibc ARM versions used a gregs[] array to access the register
+ // values from mcontext_t.
sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
@@ -1038,7 +1040,8 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
+ // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(mcontext.pc);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index ba33a8444e..408d4dc0f8 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -323,9 +323,6 @@ void OS::LogSharedLibraryAddresses() {
}
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
@@ -336,7 +333,7 @@ void OS::SignalCodeMovingGC() {
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(kGCFakeMmap, "w+");
+ FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index d942d78a55..3bc83733ca 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -153,6 +153,11 @@ double OS::nan_value() {
}
+int OS::GetCurrentProcessId() {
+ return static_cast<int>(getpid());
+}
+
+
// ----------------------------------------------------------------------------
// POSIX date/time support.
//
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 2473949dec..49463be8e0 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -627,6 +627,11 @@ int OS::GetLastError() {
}
+int OS::GetCurrentProcessId() {
+ return static_cast<int>(::GetCurrentProcessId());
+}
+
+
// ----------------------------------------------------------------------------
// Win32 console output.
//
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index a2ddf7a625..a32fbbc028 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -71,6 +71,24 @@ int signbit(double x);
int strncasecmp(const char* s1, const char* s2, int n);
+inline int lrint(double flt) {
+ int intgr;
+#if defined(V8_TARGET_ARCH_IA32)
+ __asm {
+ fld flt
+ fistp intgr
+ };
+#else
+ intgr = static_cast<int>(flt + 0.5);
+ if ((intgr & 1) != 0 && intgr - flt == 0.5) {
+ // If the number is halfway between two integers, round to the even one.
+ intgr--;
+ }
+#endif
+ return intgr;
+}
+
+
#endif // _MSC_VER
// Random is missing on both Visual Studio and MinGW.
@@ -317,6 +335,8 @@ class OS {
static const int kMinComplexMemCopy = 256;
#endif // V8_TARGET_ARCH_IA32
+ static int GetCurrentProcessId();
+
private:
static const int msPerSecond = 1000;
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 0c17eecd6a..21da4f80d4 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -602,14 +602,17 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
if (token == i::Token::CASE) {
Expect(i::Token::CASE, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
- } else if (token == i::Token::DEFAULT) {
- Expect(i::Token::DEFAULT, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
} else {
- ParseStatement(CHECK_OK);
+ Expect(i::Token::DEFAULT, CHECK_OK);
}
+ Expect(i::Token::COLON, CHECK_OK);
token = peek();
+ while (token != i::Token::CASE &&
+ token != i::Token::DEFAULT &&
+ token != i::Token::RBRACE) {
+ ParseStatement(CHECK_OK);
+ token = peek();
+ }
}
Expect(i::Token::RBRACE, ok);
return Statement::Default();
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 6c64350e8d..02e146f14a 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -84,6 +84,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
return gc_entry_;
case JS:
case COMPILER:
+ case PARALLEL_COMPILER_PROLOGUE:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index a3143bea5b..c3b7622b09 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -1711,8 +1711,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
name->IsString()
? collection_->names()->GetName(String::cast(name))
: "");
- } else if (object->IsGlobalContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / GlobalContext");
+ } else if (object->IsNativeContext()) {
+ return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
} else if (object->IsContext()) {
return AddEntry(object, HeapEntry::kHidden, "system / Context");
} else if (object->IsFixedArray() ||
@@ -1946,8 +1946,8 @@ void V8HeapExplorer::ExtractJSObjectReferences(
"builtins", global_obj->builtins(),
GlobalObject::kBuiltinsOffset);
SetInternalReference(global_obj, entry,
- "global_context", global_obj->global_context(),
- GlobalObject::kGlobalContextOffset);
+ "native_context", global_obj->native_context(),
+ GlobalObject::kNativeContextOffset);
SetInternalReference(global_obj, entry,
"global_receiver", global_obj->global_receiver(),
GlobalObject::kGlobalReceiverOffset);
@@ -1982,17 +1982,17 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_INDEX, GlobalObject, global);
- if (context->IsGlobalContext()) {
+ EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
+ if (context->IsNativeContext()) {
TagObject(context->jsfunction_result_caches(),
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->data(), "(context data)");
- GLOBAL_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+ NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
#undef EXTRACT_CONTEXT_FIELD
for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::GLOBAL_CONTEXT_SLOTS;
+ i < Context::NATIVE_CONTEXT_SLOTS;
++i) {
SetWeakReference(context, entry, i, context->get(i),
FixedArray::OffsetOfElementAt(i));
@@ -2007,21 +2007,22 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
SetInternalReference(map, entry,
"constructor", map->constructor(),
Map::kConstructorOffset);
- if (!map->instance_descriptors()->IsEmpty()) {
- TagObject(map->instance_descriptors(), "(map descriptors)");
- SetInternalReference(map, entry,
- "descriptors", map->instance_descriptors(),
- Map::kInstanceDescriptorsOrBitField3Offset);
- }
- if (map->unchecked_prototype_transitions()->IsFixedArray()) {
- TagObject(map->prototype_transitions(), "(prototype transitions)");
- SetInternalReference(map, entry,
- "prototype_transitions", map->prototype_transitions(),
- Map::kPrototypeTransitionsOrBackPointerOffset);
- } else {
+ if (map->HasTransitionArray()) {
+ TransitionArray* transitions = map->transitions();
+ if (!transitions->descriptors()->IsEmpty()) {
+ DescriptorArray* descriptors = transitions->descriptors();
+ TagObject(descriptors, "(map descriptors)");
+ SetInternalReference(transitions, entry,
+ "descriptors", descriptors,
+ TransitionArray::kDescriptorsOffset);
+ IndexedReferencesExtractor refs_extractor(
+ this, transitions, entry);
+ transitions->Iterate(&refs_extractor);
+ }
+ TagObject(transitions, "(transition array)");
SetInternalReference(map, entry,
- "back_pointer", map->GetBackPointer(),
- Map::kPrototypeTransitionsOrBackPointerOffset);
+ "transitions", transitions,
+ Map::kTransitionsOrBackPointerOffset);
}
SetInternalReference(map, entry,
"code_cache", map->code_cache(),
@@ -2182,16 +2183,31 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
+
+ String* k = descs->GetKey(i);
if (index < js_obj->map()->inobject_properties()) {
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), js_obj->InObjectPropertyAt(index),
- NULL,
- js_obj->GetInObjectPropertyOffset(index));
+ Object* value = js_obj->InObjectPropertyAt(index);
+ if (k != heap_->hidden_symbol()) {
+ SetPropertyReference(
+ js_obj, entry,
+ k, value,
+ NULL,
+ js_obj->GetInObjectPropertyOffset(index));
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(
+ js_obj, entry,
+ "hidden_properties", value,
+ js_obj->GetInObjectPropertyOffset(index));
+ }
} else {
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), js_obj->FastPropertyAt(index));
+ Object* value = js_obj->FastPropertyAt(index);
+ if (k != heap_->hidden_symbol()) {
+ SetPropertyReference(js_obj, entry, k, value);
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ }
}
break;
}
@@ -2218,9 +2234,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
case NORMAL: // only in slow mode
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
- case MAP_TRANSITION: // we do not care about transitions here...
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR: // ... and not about "holes"
+ break;
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
@@ -2235,7 +2252,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
Object* value = target->IsJSGlobalPropertyCell()
? JSGlobalPropertyCell::cast(target)->value()
: target;
- if (String::cast(k)->length() > 0) {
+ if (k != heap_->hidden_symbol()) {
SetPropertyReference(js_obj, entry, String::cast(k), value);
} else {
TagObject(value, "(hidden properties)");
@@ -2294,11 +2311,12 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
Object* constructor_prop = NULL;
LookupResult result(heap->isolate());
object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
- if (result.IsProperty()) {
- constructor_prop = result.GetLazyValue();
- }
+ if (!result.IsFound()) return object->constructor_name();
+
+ constructor_prop = result.GetLazyValue();
if (constructor_prop->IsJSFunction()) {
- Object* maybe_name = JSFunction::cast(constructor_prop)->shared()->name();
+ Object* maybe_name =
+ JSFunction::cast(constructor_prop)->shared()->name();
if (maybe_name->IsString()) {
String* name = String::cast(maybe_name);
if (name->length() > 0) return name;
@@ -2560,20 +2578,6 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
}
-void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kShortcut,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_entry);
- }
-}
-
-
void V8HeapExplorer::SetRootGcRootsReference() {
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
@@ -2654,7 +2658,7 @@ class GlobalObjectsEnumerator : public ObjectVisitor {
public:
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- if ((*p)->IsGlobalContext()) {
+ if ((*p)->IsNativeContext()) {
Context* context = Context::cast(*p);
JSObject* proxy = context->global_proxy();
if (proxy->IsJSGlobalProxy()) {
@@ -3345,9 +3349,9 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
- // The buffer needs space for 3 unsigned ints, 3 commas and \0
+ // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
static const int kBufferSize =
- MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 1; // NOLINT
+ MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
EmbeddedVector<char, kBufferSize> buffer;
int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
|| edge->type() == HeapGraphEdge::kHidden
@@ -3362,6 +3366,7 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index d56d874705..04f4a1c71d 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -925,10 +925,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child,
const char* name_format_string = NULL,
int field_offset = -1);
- void SetPropertyShortcutReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child);
void SetUserGlobalReference(Object* user_global);
void SetRootGcRootsReference();
void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index a623fe9b1a..b8fbb3c92a 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -55,20 +55,18 @@ class Smi;
// Must fit in the BitField PropertyDetails::TypeField.
// A copy of this is in mirror-debugger.js.
enum PropertyType {
- NORMAL = 0, // only in slow mode
- FIELD = 1, // only in fast mode
- CONSTANT_FUNCTION = 2, // only in fast mode
+ // Only in slow mode.
+ NORMAL = 0,
+ // Only in fast mode.
+ FIELD = 1,
+ CONSTANT_FUNCTION = 2,
CALLBACKS = 3,
- HANDLER = 4, // only in lookup results, not in descriptors
- INTERCEPTOR = 5, // only in lookup results, not in descriptors
- // All properties before MAP_TRANSITION are real.
- MAP_TRANSITION = 6, // only in fast mode
- CONSTANT_TRANSITION = 7, // only in fast mode
- NULL_DESCRIPTOR = 8, // only in fast mode
- // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
- // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
- // nonexistent properties.
- NONEXISTENT = NULL_DESCRIPTOR
+ // Only in lookup results, not in descriptors.
+ HANDLER = 4,
+ INTERCEPTOR = 5,
+ TRANSITION = 6,
+ // Only used as a marker in LookupResult.
+ NONEXISTENT = 7
};
@@ -79,19 +77,19 @@ class PropertyDetails BASE_EMBEDDED {
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
int index = 0) {
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(index));
-
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
- | StorageField::encode(index);
+ | DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- ASSERT(index == this->index());
+ ASSERT(index == this->dictionary_index());
}
+ int pointer() { return DescriptorPointer::decode(value_); }
+
+ PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
@@ -100,12 +98,18 @@ class PropertyDetails BASE_EMBEDDED {
PropertyAttributes attributes() { return AttributesField::decode(value_); }
- int index() { return StorageField::decode(value_); }
+ int dictionary_index() {
+ return DictionaryStorageField::decode(value_);
+ }
+
+ int descriptor_index() {
+ return DescriptorStorageField::decode(value_);
+ }
inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
- return StorageField::is_valid(index);
+ return DictionaryStorageField::is_valid(index);
}
bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
@@ -115,14 +119,20 @@ class PropertyDetails BASE_EMBEDDED {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 4> {};
- class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
- class DeletedField: public BitField<uint32_t, 7, 1> {};
- class StorageField: public BitField<uint32_t, 8, 32-8> {};
+ class TypeField: public BitField<PropertyType, 0, 3> {};
+ class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+ class DeletedField: public BitField<uint32_t, 6, 1> {};
+ class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
+ class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
+ class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
static const int kInitialIndex = 1;
private:
+ PropertyDetails(int value, int pointer) {
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+
uint32_t value_;
};
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 8c69541be5..05342eea95 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -55,12 +55,6 @@ void LookupResult::Print(FILE* out) {
PrintF(out, " -type = normal\n");
PrintF(out, " -entry = %d", GetDictionaryEntry());
break;
- case MAP_TRANSITION:
- PrintF(out, " -type = map transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
- break;
case CONSTANT_FUNCTION:
PrintF(out, " -type = constant function\n");
PrintF(out, " -function:\n");
@@ -83,14 +77,31 @@ void LookupResult::Print(FILE* out) {
case INTERCEPTOR:
PrintF(out, " -type = lookup interceptor\n");
break;
- case CONSTANT_TRANSITION:
- PrintF(out, " -type = constant property transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
- break;
- case NULL_DESCRIPTOR:
- PrintF(out, " =type = null descriptor\n");
+ case TRANSITION:
+ switch (GetTransitionDetails().type()) {
+ case FIELD:
+ PrintF(out, " -type = map transition\n");
+ PrintF(out, " -map:\n");
+ GetTransitionMap()->Print(out);
+ PrintF(out, "\n");
+ return;
+ case CONSTANT_FUNCTION:
+ PrintF(out, " -type = constant property transition\n");
+ PrintF(out, " -map:\n");
+ GetTransitionMap()->Print(out);
+ PrintF(out, "\n");
+ return;
+ case CALLBACKS:
+ PrintF(out, " -type = callbacks transition\n");
+ PrintF(out, " -callback object:\n");
+ GetCallbackObject()->Print(out);
+ return;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
@@ -101,34 +112,11 @@ void Descriptor::Print(FILE* out) {
GetKey()->ShortPrint(out);
PrintF(out, " @ ");
GetValue()->ShortPrint(out);
- PrintF(out, " %d\n", GetDetails().index());
+ PrintF(out, " %d\n", GetDetails().descriptor_index());
}
#endif
-bool Descriptor::ContainsTransition() {
- switch (details_.type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- return true;
- case CALLBACKS: {
- if (!value_->IsAccessorPair()) return false;
- AccessorPair* accessors = AccessorPair::cast(value_);
- return accessors->getter()->IsMap() || accessors->setter()->IsMap();
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // Keep the compiler happy.
- return false;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index aa851f1c88..6bf52a7019 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -29,6 +29,7 @@
#define V8_PROPERTY_H_
#include "allocation.h"
+#include "transitions.h"
namespace v8 {
namespace internal {
@@ -64,11 +65,10 @@ class Descriptor BASE_EMBEDDED {
#endif
void SetEnumerationIndex(int index) {
- ASSERT(PropertyDetails::IsValidIndex(index));
details_ = PropertyDetails(details_.attributes(), details_.type(), index);
}
- bool ContainsTransition();
+ void SetSortedKey(int index) { details_ = details_.set_pointer(index); }
private:
String* key_;
@@ -93,7 +93,7 @@ class Descriptor BASE_EMBEDDED {
Object* value,
PropertyAttributes attributes,
PropertyType type,
- int index = 0)
+ int index)
: key_(key),
value_(value),
details_(attributes, type, index) { }
@@ -101,27 +101,6 @@ class Descriptor BASE_EMBEDDED {
friend class DescriptorArray;
};
-// A pointer from a map to the new map that is created by adding
-// a named property. These are key to the speed and functioning of V8.
-// The two maps should always have the same prototype, since
-// MapSpace::CreateBackPointers depends on this.
-class MapTransitionDescriptor: public Descriptor {
- public:
- MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
- : Descriptor(key, map, attributes, MAP_TRANSITION) { }
-};
-
-// Marks a field name in a map so that adding the field is guaranteed
-// to create a FIELD descriptor in the new map. Used after adding
-// a constant function the first time, creating a CONSTANT_FUNCTION
-// descriptor in the new map. This avoids creating multiple maps with
-// the same CONSTANT_FUNCTION field.
-class ConstTransitionDescriptor: public Descriptor {
- public:
- explicit ConstTransitionDescriptor(String* key, Map* map)
- : Descriptor(key, map, NONE, CONSTANT_TRANSITION) { }
-};
-
class FieldDescriptor: public Descriptor {
public:
@@ -138,7 +117,7 @@ class ConstantFunctionDescriptor: public Descriptor {
ConstantFunctionDescriptor(String* key,
JSFunction* function,
PropertyAttributes attributes,
- int index = 0)
+ int index)
: Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
};
@@ -153,34 +132,6 @@ class CallbacksDescriptor: public Descriptor {
};
-template <class T>
-bool IsPropertyDescriptor(T* desc) {
- switch (desc->type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- return true;
- case CALLBACKS: {
- Object* callback_object = desc->GetCallbackObject();
- // Non-JavaScript (i.e. native) accessors are always a property, otherwise
- // either the getter or the setter must be an accessor. Put another way:
- // If we only see map transitions and holes in a pair, this is not a
- // property.
- return (!callback_object->IsAccessorPair() ||
- AccessorPair::cast(callback_object)->ContainsAccessor());
- }
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // keep the compiler happy
- return false;
-}
-
-
class LookupResult BASE_EMBEDDED {
public:
explicit LookupResult(Isolate* isolate)
@@ -189,7 +140,7 @@ class LookupResult BASE_EMBEDDED {
lookup_type_(NOT_FOUND),
holder_(NULL),
cacheable_(true),
- details_(NONE, NORMAL) {
+ details_(NONE, NONEXISTENT) {
isolate->SetTopLookupResult(this);
}
@@ -205,6 +156,13 @@ class LookupResult BASE_EMBEDDED {
number_ = number;
}
+ void TransitionResult(JSObject* holder, int number) {
+ lookup_type_ = TRANSITION_TYPE;
+ details_ = PropertyDetails(NONE, TRANSITION);
+ holder_ = holder;
+ number_ = number;
+ }
+
void ConstantResult(JSObject* holder) {
lookup_type_ = CONSTANT_TYPE;
holder_ = holder;
@@ -237,6 +195,7 @@ class LookupResult BASE_EMBEDDED {
void NotFound() {
lookup_type_ = NOT_FOUND;
+ details_ = PropertyDetails(NONE, NONEXISTENT);
holder_ = NULL;
}
@@ -256,24 +215,61 @@ class LookupResult BASE_EMBEDDED {
}
PropertyAttributes GetAttributes() {
+ ASSERT(!IsTransition());
ASSERT(IsFound());
+ ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
PropertyDetails GetPropertyDetails() {
+ ASSERT(!IsTransition());
return details_;
}
- bool IsReadOnly() { return details_.IsReadOnly(); }
+ bool IsFastPropertyType() {
+ ASSERT(IsFound());
+ return IsTransition() || type() != NORMAL;
+ }
+
+ // Property callbacks does not include transitions to callbacks.
+ bool IsPropertyCallbacks() {
+ ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
+ return details_.type() == CALLBACKS;
+ }
+
+ bool IsReadOnly() {
+ ASSERT(IsFound());
+ ASSERT(!IsTransition());
+ ASSERT(details_.type() != NONEXISTENT);
+ return details_.IsReadOnly();
+ }
+
+ bool IsField() {
+ ASSERT(!(details_.type() == FIELD && !IsFound()));
+ return details_.type() == FIELD;
+ }
+
+ bool IsNormal() {
+ ASSERT(!(details_.type() == NORMAL && !IsFound()));
+ return details_.type() == NORMAL;
+ }
+
+ bool IsConstantFunction() {
+ ASSERT(!(details_.type() == CONSTANT_FUNCTION && !IsFound()));
+ return details_.type() == CONSTANT_FUNCTION;
+ }
+
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
bool IsDeleted() { return details_.IsDeleted(); }
bool IsFound() { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
bool IsProperty() {
- return IsFound() && IsPropertyDescriptor(this);
+ return IsFound() && !IsTransition();
}
bool IsCacheable() { return cacheable_; }
@@ -298,31 +294,55 @@ class LookupResult BASE_EMBEDDED {
}
}
+ Map* GetTransitionTarget() {
+ ASSERT(IsTransition());
+ TransitionArray* transitions = holder()->map()->transitions();
+ return transitions->GetTarget(number_);
+ }
+
+ PropertyDetails GetTransitionDetails(Map* map) {
+ ASSERT(IsTransition());
+ TransitionArray* transitions = map->transitions();
+ return transitions->GetTargetDetails(number_);
+ }
+
+ PropertyDetails GetTransitionDetails() {
+ return GetTransitionDetails(holder()->map());
+ }
+
+ bool IsTransitionToField(Map* map) {
+ return IsTransition() && GetTransitionDetails(map).type() == FIELD;
+ }
Map* GetTransitionMap() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION ||
- type() == CONSTANT_TRANSITION);
+ ASSERT(IsTransition());
return Map::cast(GetValue());
}
Map* GetTransitionMapFromMap(Map* map) {
+ ASSERT(IsTransition());
+ return map->transitions()->GetTarget(number_);
+ }
+
+ int GetTransitionIndex() {
+ ASSERT(IsTransition());
+ return number_;
+ }
+
+ int GetDescriptorIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION);
- return Map::cast(map->instance_descriptors()->GetValue(number_));
+ return number_;
}
int GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == FIELD);
+ ASSERT(IsField());
return Descriptor::IndexFromValue(GetValue());
}
int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == FIELD);
- return Descriptor::IndexFromValue(
- map->instance_descriptors()->GetValue(number_)) -
+ ASSERT(IsField());
+ return Descriptor::IndexFromValue(GetValueFromMap(map)) -
map->inobject_properties();
}
@@ -337,16 +357,15 @@ class LookupResult BASE_EMBEDDED {
}
JSFunction* GetConstantFunctionFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(map->instance_descriptors()->GetValue(number_));
+ return JSFunction::cast(GetValueFromMap(map));
}
Object* GetCallbackObject() {
if (lookup_type_ == CONSTANT_TYPE) {
- // For now we only have the __proto__ as constant type.
return HEAP->prototype_accessors();
}
+ ASSERT(!IsTransition());
return GetValue();
}
@@ -356,14 +375,18 @@ class LookupResult BASE_EMBEDDED {
Object* GetValue() {
if (lookup_type_ == DESCRIPTOR_TYPE) {
- DescriptorArray* descriptors = holder()->map()->instance_descriptors();
- return descriptors->GetValue(number_);
+ return GetValueFromMap(holder()->map());
}
// In the dictionary case, the data is held in the value field.
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return holder()->GetNormalizedProperty(this);
}
+ Object* GetValueFromMap(Map* map) const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ return map->instance_descriptors()->GetValue(number_);
+ }
+
void Iterate(ObjectVisitor* visitor);
private:
@@ -374,6 +397,7 @@ class LookupResult BASE_EMBEDDED {
enum {
NOT_FOUND,
DESCRIPTOR_TYPE,
+ TRANSITION_TYPE,
DICTIONARY_TYPE,
HANDLER_TYPE,
INTERCEPTOR_TYPE,
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index d2cd22e9ad..16766cab09 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -38,8 +38,10 @@ namespace internal {
#ifdef V8_INTERPRETED_REGEXP
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
- : buffer_(buffer),
+RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
+ Zone* zone)
+ : RegExpMacroAssembler(zone),
+ buffer_(buffer),
pc_(0),
own_buffer_(false),
advance_current_end_(kInvalidPC) {
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index 7232342dc5..4bc29809bd 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -48,7 +48,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- explicit RegExpMacroAssemblerIrregexp(Vector<byte>);
+ RegExpMacroAssemblerIrregexp(Vector<byte>, Zone* zone);
virtual ~RegExpMacroAssemblerIrregexp();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 3fcd603fff..6541546cb6 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -38,12 +38,12 @@ namespace internal {
class Processor: public AstVisitor {
public:
- explicit Processor(Variable* result)
+ Processor(Variable* result, Zone* zone)
: result_(result),
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(isolate()) { }
+ factory_(isolate(), zone) { }
virtual ~Processor() { }
@@ -230,8 +230,8 @@ EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
-// Assumes code has been parsed and scopes have been analyzed. Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
+// Assumes code has been parsed. Mutates the AST, so the AST should not
+// continue to be used in the case of failure.
bool Rewriter::Rewrite(CompilationInfo* info) {
FunctionLiteral* function = info->function();
ASSERT(function != NULL);
@@ -243,7 +243,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
if (!body->is_empty()) {
Variable* result = scope->NewTemporary(
info->isolate()->factory()->result_symbol());
- Processor processor(result);
+ Processor processor(result, info->zone());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -257,12 +257,12 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// coincides with the end of the with scope which is the position of '1'.
int position = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, position);
+ result->name(), false, Interface::NewValue(), position);
result_proxy->BindTo(result);
Statement* result_statement =
processor.factory()->NewReturnStatement(result_proxy);
result_statement->set_statement_pos(position);
- body->Add(result_statement, info->isolate()->zone());
+ body->Add(result_statement, info->zone());
}
}
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 003b882f36..23f41fa7d2 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -34,6 +34,7 @@
#include "compilation-cache.h"
#include "deoptimizer.h"
#include "execution.h"
+#include "full-codegen.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "mark-compact.h"
@@ -81,7 +82,8 @@ STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
-static const int kMaxSizeEarlyOpt = 500;
+static const int kMaxSizeEarlyOpt =
+ 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
Atomic32 RuntimeProfiler::state_ = 0;
@@ -151,15 +153,20 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF("]\n");
}
- // The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
+ if (FLAG_parallel_recompilation) {
+ function->MarkForParallelRecompilation();
+ } else {
+ // The next call to the function will trigger optimization.
+ function->MarkForLazyRecompilation();
+ }
}
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation());
+ ASSERT(function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
@@ -218,7 +225,10 @@ int RuntimeProfiler::LookupSample(JSFunction* function) {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* sample = sampler_window_[i];
if (sample != NULL) {
- if (function == sample) {
+ bool fits = FLAG_lookup_sample_by_shared
+ ? (function->shared() == JSFunction::cast(sample)->shared())
+ : (function == JSFunction::cast(sample));
+ if (fits) {
weight += sampler_window_weight_[i];
}
}
@@ -275,7 +285,8 @@ void RuntimeProfiler::OptimizeNow() {
if (shared_code->kind() != Code::FUNCTION) continue;
- if (function->IsMarkedForLazyRecompilation()) {
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation()) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
@@ -293,7 +304,7 @@ void RuntimeProfiler::OptimizeNow() {
// Do not record non-optimizable functions.
if (shared->optimization_disabled()) {
- if (shared->deopt_count() >= Compiler::kDefaultMaxOptCount) {
+ if (shared->deopt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable optimization.
int ticks = shared_code->profiler_ticks();
@@ -308,8 +319,6 @@ void RuntimeProfiler::OptimizeNow() {
}
if (!function->IsOptimizable()) continue;
-
-
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
@@ -332,7 +341,7 @@ void RuntimeProfiler::OptimizeNow() {
}
}
} else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index de3a55e5d1..3c9a10dbff 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -54,7 +54,7 @@
#include "runtime-profiler.h"
#include "runtime.h"
#include "scopeinfo.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "string-search.h"
#include "stub-cache.h"
#include "v8threads.h"
@@ -303,7 +303,7 @@ static Handle<Map> ComputeObjectLiteralMap(
}
}
// If we only have symbols and array indices among keys then we can
- // use the map cache in the global context.
+ // use the map cache in the native context.
const int kMaxKeys = 10;
if ((number_of_symbol_keys == number_of_properties) &&
(number_of_symbol_keys < kMaxKeys)) {
@@ -342,14 +342,14 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<FixedArray> constant_properties,
bool should_have_fast_elements,
bool has_function_literal) {
- // Get the global context from the literals array. This is the
+ // Get the native context from the literals array. This is the
// context in which the function was created and we use the object
// function from this context to create the object literal. We do
- // not use the object function from the current global context
+ // not use the object function from the current native context
// because this might be the object function from another context
// which we should not have access to.
Handle<Context> context =
- Handle<Context>(JSFunction::GlobalContextFromLiterals(*literals));
+ Handle<Context>(JSFunction::NativeContextFromLiterals(*literals));
// In case we have function literals, we want the object to be in
// slow properties mode for now. We don't go in the map cache because
@@ -464,7 +464,7 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArray> elements) {
// Create the JSArray.
Handle<JSFunction> constructor(
- JSFunction::GlobalContextFromLiterals(*literals)->array_function());
+ JSFunction::NativeContextFromLiterals(*literals)->array_function());
Handle<JSArray> object =
Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
@@ -474,8 +474,8 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
FixedArrayBase::cast(elements->get(1)));
ASSERT(IsFastElementsKind(constant_elements_kind));
- Context* global_context = isolate->context()->global_context();
- Object* maybe_maps_array = global_context->js_array_maps();
+ Context* native_context = isolate->context()->native_context();
+ Object* maybe_maps_array = native_context->js_array_maps();
ASSERT(!maybe_maps_array->IsUndefined());
Object* maybe_map = FixedArray::cast(maybe_maps_array)->get(
constant_elements_kind);
@@ -635,6 +635,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index), isolate);
if (*boilerplate == isolate->heap()->undefined_value()) {
+ ASSERT(*elements != isolate->heap()->empty_fixed_array());
boilerplate =
Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
@@ -672,7 +673,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
ASSERT(args.length() == 2);
- Object* handler = args[0];
+ CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* prototype = args[1];
Object* used_prototype =
prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
@@ -682,9 +683,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
ASSERT(args.length() == 4);
- Object* handler = args[0];
+ CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* call_trap = args[1];
- Object* construct_trap = args[2];
+ RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
+ CONVERT_ARG_CHECKED(JSFunction, construct_trap, 2);
Object* prototype = args[3];
Object* used_prototype =
prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
@@ -754,7 +756,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetAdd(table, key);
holder->set_table(*table);
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_value();
}
@@ -776,7 +778,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetRemove(table, key);
holder->set_table(*table);
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_value();
}
@@ -794,8 +796,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<Object> key(args[1]);
- return ObjectHashTable::cast(holder->table())->Lookup(*key);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ Handle<ObjectHashTable> new_table =
+ PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ holder->set_table(*new_table);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -803,12 +832,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<Object> key(args[1]);
- Handle<Object> value(args[2]);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
holder->set_table(*new_table);
- return *value;
+ return isolate->heap()->undefined_value();
}
@@ -825,11 +854,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ Handle<ObjectHashTable> new_table =
+ PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ weakmap->set_table(*new_table);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -842,7 +898,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
weakmap->set_table(*new_table);
- return *value;
+ return isolate->heap()->undefined_value();
}
@@ -898,13 +954,13 @@ static void GetOwnPropertyImplementation(JSObject* obj,
LookupResult* result) {
obj->LocalLookupRealNamedProperty(name, result);
- if (!result->IsProperty()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype())
- GetOwnPropertyImplementation(JSObject::cast(proto),
- name, result);
- }
+ if (result->IsFound()) return;
+
+ Object* proto = obj->GetPrototype();
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype())
+ GetOwnPropertyImplementation(JSObject::cast(proto),
+ name, result);
}
@@ -1115,7 +1171,7 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
- bool is_js_accessor = (result.type() == CALLBACKS) &&
+ bool is_js_accessor = result.IsPropertyCallbacks() &&
(result.GetCallbackObject()->IsAccessorPair());
if (is_js_accessor) {
@@ -1190,7 +1246,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
+ Handle<Object> result =
+ RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
if (result.is_null()) return Failure::Exception();
return *result;
}
@@ -1237,14 +1294,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
bool needs_access_checks = old_map->is_access_check_needed();
if (needs_access_checks) {
// Copy map so it won't interfere constructor's initial map.
- Object* new_map;
- { MaybeObject* maybe_new_map =
- old_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ Map* new_map;
+ MaybeObject* maybe_new_map = old_map->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- Map::cast(new_map)->set_is_access_check_needed(false);
- object->set_map(Map::cast(new_map));
+ new_map->set_is_access_check_needed(false);
+ object->set_map(new_map);
}
return isolate->heap()->ToBoolean(needs_access_checks);
}
@@ -1256,14 +1311,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
Map* old_map = object->map();
if (!old_map->is_access_check_needed()) {
// Copy map so it won't interfere constructor's initial map.
- Object* new_map;
- { MaybeObject* maybe_new_map =
- old_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ Map* new_map;
+ MaybeObject* maybe_new_map = old_map->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- Map::cast(new_map)->set_is_access_check_needed(true);
- object->set_map(Map::cast(new_map));
+ new_map->set_is_access_check_needed(true);
+ object->set_map(new_map);
}
return isolate->heap()->undefined_value();
}
@@ -1286,7 +1339,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
ASSERT(args.length() == 3);
HandleScope scope(isolate);
Handle<GlobalObject> global = Handle<GlobalObject>(
- isolate->context()->global());
+ isolate->context()->global_object());
Handle<Context> context = args.at<Context>(0);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
@@ -1311,16 +1364,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
if (is_var || is_const) {
// Lookup the property in the global object, and don't set the
// value of the variable if the property is already there.
- // Do the lookup locally only, see ES5 errata.
+ // Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
- if (FLAG_es52_globals)
- global->LocalLookup(*name, &lookup);
- else
+ if (FLAG_es52_globals) {
+ Object* obj = *global;
+ do {
+ JSObject::cast(obj)->LocalLookup(*name, &lookup);
+ if (lookup.IsFound()) break;
+ obj = obj->GetPrototype();
+ } while (obj->IsJSObject() &&
+ JSObject::cast(obj)->map()->is_hidden_prototype());
+ } else {
global->Lookup(*name, &lookup);
- if (lookup.IsProperty()) {
+ }
+ if (lookup.IsFound()) {
// We found an existing property. Unless it was an interceptor
// that claims the property is absent, skip this declaration.
- if (lookup.type() != INTERCEPTOR) continue;
+ if (!lookup.IsInterceptor()) continue;
PropertyAttributes attributes = global->GetPropertyAttribute(*name);
if (attributes != ABSENT) continue;
// Fall-through and introduce the absent property by using
@@ -1353,12 +1413,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- if (!lookup.IsProperty() || is_function || is_module) {
+ if (!lookup.IsFound() || is_function || is_module) {
// If the local property exists, check that we can reconfigure it
// as required for function declarations.
- if (lookup.IsProperty() && lookup.IsDontDelete()) {
+ if (lookup.IsFound() && lookup.IsDontDelete()) {
if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
- lookup.type() == CALLBACKS) {
+ lookup.IsPropertyCallbacks()) {
return ThrowRedeclarationError(
isolate, is_function ? "function" : "module", name);
}
@@ -1387,7 +1447,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
- // Declarations are always made in a function or global context. In the
+ // Declarations are always made in a function or native context. In the
// case of eval code, the context passed is the context of the caller,
// which may be some nested context and not the declaration context.
RUNTIME_ASSERT(args[0]->IsContext());
@@ -1426,7 +1486,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
}
} else {
// Slow case: The property is in the context extension object of a
- // function context or the global object of a global context.
+ // function context or the global object of a native context.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
@@ -1467,7 +1527,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
!object->IsJSContextExtensionObject()) {
LookupResult lookup(isolate);
object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.type() == CALLBACKS)) {
+ if (lookup.IsPropertyCallbacks()) {
return ThrowRedeclarationError(isolate, "const", name);
}
}
@@ -1497,7 +1557,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
bool assign = args.length() == 3;
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- GlobalObject* global = isolate->context()->global();
+ GlobalObject* global = isolate->context()->global_object();
RUNTIME_ASSERT(args[1]->IsSmi());
CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
@@ -1520,7 +1580,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
JSObject::cast(object)->map()->is_hidden_prototype()) {
JSObject* raw_holder = JSObject::cast(object);
raw_holder->LocalLookup(*name, &lookup);
- if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsInterceptor()) {
HandleScope handle_scope(isolate);
Handle<JSObject> holder(raw_holder);
PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
@@ -1540,7 +1600,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
}
// Reload global in case the loop above performed a GC.
- global = isolate->context()->global();
+ global = isolate->context()->global_object();
if (assign) {
return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
}
@@ -1557,7 +1617,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
Handle<Object> value = args.at<Object>(1);
// Get the current global object from top.
- GlobalObject* global = isolate->context()->global();
+ GlobalObject* global = isolate->context()->global_object();
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable. Since it's a const, it must be READ_ONLY too.
@@ -1571,7 +1631,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// We use SetLocalPropertyIgnoreAttributes instead
LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
- if (!lookup.IsProperty()) {
+ if (!lookup.IsFound()) {
return global->SetLocalPropertyIgnoreAttributes(*name,
*value,
attributes);
@@ -1581,7 +1641,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Restore global object from context (in case of GC) and continue
// with setting the value.
HandleScope handle_scope(isolate);
- Handle<GlobalObject> global(isolate->context()->global());
+ Handle<GlobalObject> global(isolate->context()->global_object());
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
@@ -1598,14 +1658,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// constant. For now, we determine this by checking if the
// current value is the hole.
// Strict mode handling not needed (const is disallowed in strict mode).
- PropertyType type = lookup.type();
- if (type == FIELD) {
+ if (lookup.IsField()) {
FixedArray* properties = global->properties();
int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
- } else if (type == NORMAL) {
+ } else if (lookup.IsNormal()) {
if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
!lookup.IsReadOnly()) {
global->SetNormalizedProperty(&lookup, *value);
@@ -1613,7 +1672,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
} else {
// Ignore re-initialization of constants that have already been
// assigned a function value.
- ASSERT(lookup.IsReadOnly() && type == CONSTANT_FUNCTION);
+ ASSERT(lookup.IsReadOnly() && lookup.IsConstantFunction());
}
// Use the set value as the result of the operation.
@@ -1628,7 +1687,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<Object> value(args[0], isolate);
ASSERT(!value->IsTheHole());
- // Initializations are always done in a function or global context.
+ // Initializations are always done in a function or native context.
RUNTIME_ASSERT(args[1]->IsContext());
Handle<Context> context(Context::cast(args[1])->declaration_context());
@@ -1656,7 +1715,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// global object.
if (attributes == ABSENT) {
Handle<JSObject> global = Handle<JSObject>(
- isolate->context()->global());
+ isolate->context()->global_object());
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
@@ -1689,14 +1748,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
ASSERT(lookup.IsFound()); // the property was declared
ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
- PropertyType type = lookup.type();
- if (type == FIELD) {
+ if (lookup.IsField()) {
FixedArray* properties = object->properties();
int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
- } else if (type == NORMAL) {
+ } else if (lookup.IsNormal()) {
if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
object->SetNormalizedProperty(&lookup, *value);
}
@@ -1750,8 +1808,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index,
- last_match_info,
- isolate->zone());
+ last_match_info);
if (result.is_null()) return Failure::Exception();
return *result;
}
@@ -1779,7 +1836,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
AssertNoAllocation no_gc;
HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
- set_map(isolate->global_context()->regexp_result_map());
+ set_map(isolate->native_context()->regexp_result_map());
}
JSArray* array = JSArray::cast(new_object);
array->set_properties(isolate->heap()->empty_fixed_array());
@@ -1931,9 +1988,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
// Returns undefined for strict or native functions, or
// the associated global receiver for "normal" functions.
- Context* global_context =
- function->context()->global()->global_context();
- return global_context->global()->global_receiver();
+ Context* native_context =
+ function->context()->global_object()->native_context();
+ return native_context->global_object()->global_receiver();
}
@@ -1948,11 +2005,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
// Get the RegExp function from the context in the literals array.
// This is the RegExp function from the context in which the
// function was created. We do not use the RegExp function from the
- // current global context because this might be the RegExp function
+ // current native context because this might be the RegExp function
// from another context which we should not have access to.
Handle<JSFunction> constructor =
Handle<JSFunction>(
- JSFunction::GlobalContextFromLiterals(*literals)->regexp_function());
+ JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
// Compute the regular expression literal.
bool has_pending_exception;
Handle<Object> regexp =
@@ -2010,8 +2067,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
- Object* obj = f->RemovePrototype();
- if (obj->IsFailure()) return obj;
+ f->RemovePrototype();
return isolate->heap()->undefined_value();
}
@@ -2104,40 +2160,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- MaybeObject* maybe_name =
- isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
- String* name;
- if (!maybe_name->To(&name)) return maybe_name;
+ String* name = isolate->heap()->prototype_symbol();
if (function->HasFastProperties()) {
// Construct a new field descriptor with updated attributes.
DescriptorArray* instance_desc = function->map()->instance_descriptors();
- int index = instance_desc->Search(name);
+
+ int index = instance_desc->SearchWithCache(name);
ASSERT(index != DescriptorArray::kNotFound);
PropertyDetails details = instance_desc->GetDetails(index);
+
CallbacksDescriptor new_desc(name,
instance_desc->GetValue(index),
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.index());
- // Construct a new field descriptors array containing the new descriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked =
- instance_desc->CopyInsert(&new_desc, REMOVE_TRANSITIONS);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
- }
- }
- DescriptorArray* new_descriptors =
- DescriptorArray::cast(descriptors_unchecked);
+ details.descriptor_index());
+
// Create a new map featuring the new field descriptors array.
- Object* map_unchecked;
- { MaybeObject* maybe_map_unchecked = function->map()->CopyDropDescriptors();
- if (!maybe_map_unchecked->ToObject(&map_unchecked)) {
- return maybe_map_unchecked;
- }
- }
- Map* new_map = Map::cast(map_unchecked);
- new_map->set_instance_descriptors(new_descriptors);
+ Map* new_map;
+ MaybeObject* maybe_map =
+ function->map()->CopyReplaceDescriptor(
+ &new_desc, index, OMIT_TRANSITION);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
function->set_map(new_map);
} else { // Dictionary properties.
// Directly manipulate the property details.
@@ -2147,7 +2191,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
PropertyDetails new_details(
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
details.type(),
- details.index());
+ details.dictionary_index());
function->property_dictionary()->DetailsAtPut(entry, new_details);
}
return function;
@@ -2185,8 +2229,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
Handle<SharedFunctionInfo> target_shared(target->shared());
Handle<SharedFunctionInfo> source_shared(source->shared());
- if (!source->is_compiled() &&
- !JSFunction::CompileLazy(source, KEEP_EXCEPTION)) {
+ if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -2219,14 +2262,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
Handle<FixedArray> literals =
isolate->factory()->NewFixedArray(number_of_literals, TENURED);
if (number_of_literals > 0) {
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
+ literals->set(JSFunction::kLiteralNativeContextIndex,
+ context->native_context());
}
target->set_context(*context);
target->set_literals(*literals);
target->set_next_function_link(isolate->heap()->undefined_value());
- if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
+ if (isolate->logger()->is_logging_code_events() ||
+ CpuProfiler::is_profiling(isolate)) {
isolate->logger()->LogExistingFunction(
source_shared, Handle<Code>(source_shared->code()));
}
@@ -2263,19 +2307,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, subject, 0);
- Object* index = args[1];
- RUNTIME_ASSERT(index->IsNumber());
-
- uint32_t i = 0;
- if (index->IsSmi()) {
- int value = Smi::cast(index)->value();
- if (value < 0) return isolate->heap()->nan_value();
- i = value;
- } else {
- ASSERT(index->IsHeapNumber());
- double value = HeapNumber::cast(index)->value();
- i = static_cast<uint32_t>(DoubleToInteger(value));
- }
+ CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
@@ -2369,18 +2401,13 @@ class FixedArrayBuilder {
return array_->length();
}
- Handle<JSArray> ToJSArray() {
- Handle<JSArray> result_array = FACTORY->NewJSArrayWithElements(array_);
- result_array->set_length(Smi::FromInt(length_));
- return result_array;
- }
-
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
FACTORY->SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
+
private:
Handle<FixedArray> array_;
int length_;
@@ -2499,10 +2526,6 @@ class ReplacementStringBuilder {
character_count_ += by;
}
- Handle<JSArray> GetParts() {
- return array_builder_.ToJSArray();
- }
-
private:
Handle<SeqAsciiString> NewRawAsciiString(int length) {
return heap_->isolate()->factory()->NewRawAsciiString(length);
@@ -2531,28 +2554,24 @@ class ReplacementStringBuilder {
class CompiledReplacement {
public:
explicit CompiledReplacement(Zone* zone)
- : parts_(1, zone), replacement_substrings_(0, zone),
- simple_hint_(false),
- zone_(zone) {}
+ : parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
- void Compile(Handle<String> replacement,
+ // Return whether the replacement is simple.
+ bool Compile(Handle<String> replacement,
int capture_count,
int subject_length);
+ // Use Apply only if Compile returned false.
void Apply(ReplacementStringBuilder* builder,
int match_from,
int match_to,
- Handle<JSArray> last_match_info);
+ int32_t* match);
// Number of distinct parts of the replacement pattern.
int parts() {
return parts_.length();
}
- bool simple_hint() {
- return simple_hint_;
- }
-
Zone* zone() const { return zone_; }
private:
@@ -2613,11 +2632,11 @@ class CompiledReplacement {
};
template<typename Char>
- static bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
- Vector<Char> characters,
- int capture_count,
- int subject_length,
- Zone* zone) {
+ bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+ Vector<Char> characters,
+ int capture_count,
+ int subject_length,
+ Zone* zone) {
int length = characters.length();
int last = 0;
for (int i = 0; i < length; i++) {
@@ -2711,7 +2730,7 @@ class CompiledReplacement {
}
if (length > last) {
if (last == 0) {
- parts->Add(ReplacementPart::ReplacementString(), zone);
+ // Replacement is simple. Do not use Apply to do the replacement.
return true;
} else {
parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
@@ -2722,33 +2741,35 @@ class CompiledReplacement {
ZoneList<ReplacementPart> parts_;
ZoneList<Handle<String> > replacement_substrings_;
- bool simple_hint_;
Zone* zone_;
};
-void CompiledReplacement::Compile(Handle<String> replacement,
+bool CompiledReplacement::Compile(Handle<String> replacement,
int capture_count,
int subject_length) {
{
AssertNoAllocation no_alloc;
String::FlatContent content = replacement->GetFlatContent();
ASSERT(content.IsFlat());
+ bool simple = false;
if (content.IsAscii()) {
- simple_hint_ = ParseReplacementPattern(&parts_,
- content.ToAsciiVector(),
- capture_count,
- subject_length,
- zone());
+ simple = ParseReplacementPattern(&parts_,
+ content.ToAsciiVector(),
+ capture_count,
+ subject_length,
+ zone());
} else {
ASSERT(content.IsTwoByte());
- simple_hint_ = ParseReplacementPattern(&parts_,
- content.ToUC16Vector(),
- capture_count,
- subject_length,
- zone());
+ simple = ParseReplacementPattern(&parts_,
+ content.ToUC16Vector(),
+ capture_count,
+ subject_length,
+ zone());
}
+ if (simple) return true;
}
+
Isolate* isolate = replacement->GetIsolate();
// Find substrings of replacement string and create them as String objects.
int substring_index = 0;
@@ -2768,13 +2789,15 @@ void CompiledReplacement::Compile(Handle<String> replacement,
substring_index++;
}
}
+ return false;
}
void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
int match_from,
int match_to,
- Handle<JSArray> last_match_info) {
+ int32_t* match) {
+ ASSERT_LT(0, parts_.length());
for (int i = 0, n = parts_.length(); i < n; i++) {
ReplacementPart part = parts_[i];
switch (part.tag) {
@@ -2790,9 +2813,8 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
}
case SUBJECT_CAPTURE: {
int capture = part.data;
- FixedArray* match_info = FixedArray::cast(last_match_info->elements());
- int from = RegExpImpl::GetCapture(match_info, capture * 2);
- int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1);
+ int from = match[capture * 2];
+ int to = match[capture * 2 + 1];
if (from >= 0 && to > from) {
builder->AddSubjectSlice(from, to);
}
@@ -2914,85 +2936,19 @@ void FindStringIndicesDispatch(Isolate* isolate,
}
-// Two smis before and after the match, for very long strings.
-const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
-
-static void SetLastMatchInfoNoCaptures(Handle<String> subject,
- Handle<JSArray> last_match_info,
- int match_start,
- int match_end) {
- // Fill last_match_info with a single capture.
- last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_info->elements());
- RegExpImpl::SetLastCaptureCount(elements, 2);
- RegExpImpl::SetLastInput(elements, *subject);
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetCapture(elements, 0, match_start);
- RegExpImpl::SetCapture(elements, 1, match_end);
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- String* pattern_string,
- FixedArrayBuilder* builder,
- int* match_pos) {
- int pos = *match_pos;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- int max_search_start = subject_length - pattern_length;
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- // Position of end of previous match.
- int match_end = pos + pattern_length;
- int new_pos = search.Search(subject, match_end);
- if (new_pos >= 0) {
- // A match.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
- }
- pos = new_pos;
- builder->Add(pattern_string);
- } else {
- break;
- }
- }
-
- if (pos < max_search_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- pos + pattern_length,
- subject_length);
- }
- *match_pos = pos;
- return true;
-}
-
-
-
-
template<typename ResultSeqString>
MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> pattern_regexp,
Handle<String> replacement,
- Handle<JSArray> last_match_info,
- Zone* zone) {
+ Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- ZoneScope zone_space(isolate, DELETE_ON_EXIT);
- ZoneList<int> indices(8, isolate->zone());
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_space(zone, DELETE_ON_EXIT);
+ ZoneList<int> indices(8, zone);
ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -3000,8 +2956,8 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
int pattern_len = pattern->length();
int replacement_len = replacement->length();
- FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff,
- zone);
+ FindStringIndicesDispatch(
+ isolate, *subject, pattern, &indices, 0xffffffff, zone);
int matches = indices.length();
if (matches == 0) return *subject;
@@ -3056,10 +3012,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
subject_len);
}
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- indices.at(matches - 1),
- indices.at(matches - 1) + pattern_len);
+ int32_t match_indices[] = { indices.at(matches - 1),
+ indices.at(matches - 1) + pattern_len };
+ RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
return *result;
}
@@ -3067,138 +3022,101 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
Isolate* isolate,
- String* subject,
- JSRegExp* regexp,
- String* replacement,
- JSArray* last_match_info,
- Zone* zone) {
+ Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<String> replacement,
+ Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- HandleScope handles(isolate);
-
- int length = subject->length();
- Handle<String> subject_handle(subject);
- Handle<JSRegExp> regexp_handle(regexp);
- Handle<String> replacement_handle(replacement);
- Handle<JSArray> last_match_info_handle(last_match_info);
- Handle<Object> match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- 0,
- last_match_info_handle,
- isolate->zone());
- if (match.is_null()) {
- return Failure::Exception();
- }
- if (match->IsNull()) {
- return *subject_handle;
- }
-
- int capture_count = regexp_handle->CaptureCount();
+ bool is_global = regexp->GetFlags().is_global();
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
// CompiledReplacement uses zone allocation.
- ZoneScope zonescope(isolate, DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement(isolate->zone());
- compiled_replacement.Compile(replacement_handle,
- capture_count,
- length);
-
- bool is_global = regexp_handle->GetFlags().is_global();
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zonescope(zone, DELETE_ON_EXIT);
+ CompiledReplacement compiled_replacement(zone);
+ bool simple_replace = compiled_replacement.Compile(replacement,
+ capture_count,
+ subject_length);
// Shortcut for simple non-regexp global replacements
if (is_global &&
- regexp_handle->TypeTag() == JSRegExp::ATOM &&
- compiled_replacement.simple_hint()) {
- if (subject_handle->HasOnlyAsciiChars() &&
- replacement_handle->HasOnlyAsciiChars()) {
+ regexp->TypeTag() == JSRegExp::ATOM &&
+ simple_replace) {
+ if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) {
return StringReplaceAtomRegExpWithString<SeqAsciiString>(
- isolate,
- subject_handle,
- regexp_handle,
- replacement_handle,
- last_match_info_handle,
- zone);
+ isolate, subject, regexp, replacement, last_match_info);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
- isolate,
- subject_handle,
- regexp_handle,
- replacement_handle,
- last_match_info_handle,
- zone);
+ isolate, subject, regexp, replacement, last_match_info);
}
}
+ RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
+
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) {
+ if (global_cache.HasException()) return Failure::Exception();
+ return *subject;
+ }
+
// Guessing the number of parts that the final result string is built
// from. Global regexps can match any number of times, so we guess
// conservatively.
int expected_parts =
(compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
ReplacementStringBuilder builder(isolate->heap(),
- subject_handle,
+ subject,
expected_parts);
- // Index of end of last match.
- int prev = 0;
-
// Number of parts added by compiled replacement plus preceeding
// string and possibly suffix after last match. It is possible for
// all components to use two elements when encoded as two smis.
const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
- bool matched = true;
+
+ int prev = 0;
+
do {
- ASSERT(last_match_info_handle->HasFastObjectElements());
- // Increase the capacity of the builder before entering local handle-scope,
- // so its internal buffer can safely allocate a new handle if it grows.
builder.EnsureCapacity(parts_added_per_loop);
- HandleScope loop_scope(isolate);
- int start, end;
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
-
- ASSERT_EQ(capture_count * 2 + 2,
- RegExpImpl::GetLastCaptureCount(match_info_array));
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
+ int start = current_match[0];
+ int end = current_match[1];
if (prev < start) {
builder.AddSubjectSlice(prev, start);
}
- compiled_replacement.Apply(&builder,
- start,
- end,
- last_match_info_handle);
+
+ if (simple_replace) {
+ builder.AddString(replacement);
+ } else {
+ compiled_replacement.Apply(&builder,
+ start,
+ end,
+ current_match);
+ }
prev = end;
// Only continue checking for global regexps.
if (!is_global) break;
- // Continue from where the match ended, unless it was an empty match.
- int next = end;
- if (start == end) {
- next = end + 1;
- if (next > length) break;
- }
+ current_match = global_cache.FetchNext();
+ } while (current_match != NULL);
- match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- next,
- last_match_info_handle,
- isolate->zone());
- if (match.is_null()) {
- return Failure::Exception();
- }
- matched = !match->IsNull();
- } while (matched);
+ if (global_cache.HasException()) return Failure::Exception();
- if (prev < length) {
- builder.AddSubjectSlice(prev, length);
+ if (prev < subject_length) {
+ builder.EnsureCapacity(2);
+ builder.AddSubjectSlice(prev, subject_length);
}
+ RegExpImpl::SetLastMatchInfo(last_match_info,
+ subject,
+ capture_count,
+ global_cache.LastSuccessfulMatch());
+
return *(builder.ToString());
}
@@ -3206,70 +3124,51 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
template <typename ResultSeqString>
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Isolate* isolate,
- String* subject,
- JSRegExp* regexp,
- JSArray* last_match_info,
- Zone* zone) {
+ Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
- HandleScope handles(isolate);
-
- Handle<String> subject_handle(subject);
- Handle<JSRegExp> regexp_handle(regexp);
- Handle<JSArray> last_match_info_handle(last_match_info);
+ bool is_global = regexp->GetFlags().is_global();
// Shortcut for simple non-regexp global replacements
- if (regexp_handle->GetFlags().is_global() &&
- regexp_handle->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string_handle(HEAP->empty_string());
- if (subject_handle->HasOnlyAsciiChars()) {
+ if (is_global &&
+ regexp->TypeTag() == JSRegExp::ATOM) {
+ Handle<String> empty_string(HEAP->empty_string());
+ if (subject->HasOnlyAsciiChars()) {
return StringReplaceAtomRegExpWithString<SeqAsciiString>(
isolate,
- subject_handle,
- regexp_handle,
- empty_string_handle,
- last_match_info_handle,
- zone);
+ subject,
+ regexp,
+ empty_string,
+ last_match_info);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
isolate,
- subject_handle,
- regexp_handle,
- empty_string_handle,
- last_match_info_handle,
- zone);
+ subject,
+ regexp,
+ empty_string,
+ last_match_info);
}
}
- Handle<Object> match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- 0,
- last_match_info_handle,
- isolate->zone());
- if (match.is_null()) return Failure::Exception();
- if (match->IsNull()) return *subject_handle;
-
- ASSERT(last_match_info_handle->HasFastObjectElements());
-
- int start, end;
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
+ RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) {
+ if (global_cache.HasException()) return Failure::Exception();
+ return *subject;
}
- bool global = regexp_handle->GetFlags().is_global();
+ int start = current_match[0];
+ int end = current_match[1];
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
- if (start == end && !global) return *subject_handle;
+ int new_length = subject_length - (end - start);
+ if (new_length == 0) return isolate->heap()->empty_string();
- int length = subject_handle->length();
- int new_length = length - (end - start);
- if (new_length == 0) {
- return isolate->heap()->empty_string();
- }
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
@@ -3279,74 +3178,55 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
isolate->factory()->NewRawTwoByteString(new_length));
}
- // If the regexp isn't global, only match once.
- if (!global) {
- if (start > 0) {
- String::WriteToFlat(*subject_handle,
- answer->GetChars(),
- 0,
- start);
- }
- if (end < length) {
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + start,
- end,
- length);
+ if (!is_global) {
+ RegExpImpl::SetLastMatchInfo(
+ last_match_info, subject, capture_count, current_match);
+ if (start == end) {
+ return *subject;
+ } else {
+ if (start > 0) {
+ String::WriteToFlat(*subject, answer->GetChars(), 0, start);
+ }
+ if (end < subject_length) {
+ String::WriteToFlat(
+ *subject, answer->GetChars() + start, end, subject_length);
+ }
+ return *answer;
}
- return *answer;
}
- int prev = 0; // Index of end of last match.
- int next = 0; // Start of next search (prev unless last match was empty).
+ int prev = 0;
int position = 0;
do {
+ start = current_match[0];
+ end = current_match[1];
if (prev < start) {
// Add substring subject[prev;start] to answer string.
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + position,
- prev,
- start);
+ String::WriteToFlat(
+ *subject, answer->GetChars() + position, prev, start);
position += start - prev;
}
prev = end;
- next = end;
- // Continue from where the match ended, unless it was an empty match.
- if (start == end) {
- next++;
- if (next > length) break;
- }
- match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- next,
- last_match_info_handle,
- isolate->zone());
- if (match.is_null()) return Failure::Exception();
- if (match->IsNull()) break;
-
- ASSERT(last_match_info_handle->HasFastObjectElements());
- HandleScope loop_scope(isolate);
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
- } while (true);
- if (prev < length) {
+ current_match = global_cache.FetchNext();
+ } while (current_match != NULL);
+
+ if (global_cache.HasException()) return Failure::Exception();
+
+ RegExpImpl::SetLastMatchInfo(last_match_info,
+ subject,
+ capture_count,
+ global_cache.LastSuccessfulMatch());
+
+ if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + position,
- prev,
- length);
- position += length - prev;
+ String::WriteToFlat(
+ *subject, answer->GetChars() + position, prev, subject_length);
+ position += subject_length - prev;
}
- if (position == 0) {
- return isolate->heap()->empty_string();
- }
+ if (position == 0) return isolate->heap()->empty_string();
// Shorten string and fill
int string_size = ResultSeqString::SizeFor(position);
@@ -3369,59 +3249,40 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(String, subject, 0);
- if (!subject->IsFlat()) {
- Object* flat_subject;
- { MaybeObject* maybe_flat_subject = subject->TryFlatten();
- if (!maybe_flat_subject->ToObject(&flat_subject)) {
- return maybe_flat_subject;
- }
- }
- subject = String::cast(flat_subject);
- }
+ HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(String, replacement, 2);
- if (!replacement->IsFlat()) {
- Object* flat_replacement;
- { MaybeObject* maybe_flat_replacement = replacement->TryFlatten();
- if (!maybe_flat_replacement->ToObject(&flat_replacement)) {
- return maybe_flat_replacement;
- }
- }
- replacement = String::cast(flat_replacement);
- }
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
+ if (!subject->IsFlat()) subject = FlattenGetString(subject);
+
+ if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
ASSERT(last_match_info->HasFastObjectElements());
- Zone* zone = isolate->zone();
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
- isolate, subject, regexp, last_match_info, zone);
+ isolate, subject, regexp, last_match_info);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
- isolate, subject, regexp, last_match_info, zone);
+ isolate, subject, regexp, last_match_info);
}
}
- return StringReplaceRegExpWithString(isolate,
- subject,
- regexp,
- replacement,
- last_match_info,
- zone);
+ return StringReplaceRegExpWithString(
+ isolate, subject, regexp, replacement, last_match_info);
}
-Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit) {
+Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
+ Handle<String> subject,
+ Handle<String> search,
+ Handle<String> replace,
+ bool* found,
+ int recursion_limit) {
if (recursion_limit == 0) return Handle<String>::null();
if (subject->IsConsString()) {
ConsString* cons = ConsString::cast(*subject);
@@ -3449,7 +3310,7 @@ Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
return subject;
} else {
- int index = StringMatch(isolate, subject, search, 0);
+ int index = Runtime::StringMatch(isolate, subject, search, 0);
if (index == -1) return subject;
*found = true;
Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
@@ -3472,20 +3333,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
// retry with a flattened subject string.
const int kRecursionLimit = 0x1000;
bool found = false;
- Handle<String> result =
- Runtime::StringReplaceOneCharWithString(isolate,
- subject,
- search,
- replace,
- &found,
- kRecursionLimit);
+ Handle<String> result = StringReplaceOneCharWithString(isolate,
+ subject,
+ search,
+ replace,
+ &found,
+ kRecursionLimit);
if (!result.is_null()) return *result;
- return *Runtime::StringReplaceOneCharWithString(isolate,
- FlattenGetString(subject),
- search,
- replace,
- &found,
- kRecursionLimit);
+ return *StringReplaceOneCharWithString(isolate,
+ FlattenGetString(subject),
+ search,
+ replace,
+ &found,
+ kRecursionLimit);
}
@@ -3716,8 +3576,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
} else {
CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
- start = FastD2I(from_number);
- end = FastD2I(to_number);
+ start = FastD2IChecked(from_number);
+ end = FastD2IChecked(to_number);
}
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
@@ -3735,48 +3595,45 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
HandleScope handles;
- Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info,
- isolate->zone());
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
- if (match.is_null()) {
- return Failure::Exception();
+ int capture_count = regexp->CaptureCount();
+
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_space(zone, DELETE_ON_EXIT);
+ ZoneList<int> offsets(8, zone);
+
+ while (true) {
+ int32_t* match = global_cache.FetchNext();
+ if (match == NULL) break;
+ offsets.Add(match[0], zone); // start
+ offsets.Add(match[1], zone); // end
}
- if (match->IsNull()) {
+
+ if (global_cache.HasException()) return Failure::Exception();
+
+ if (offsets.length() == 0) {
+ // Not a single match.
return isolate->heap()->null_value();
}
- int length = subject->length();
- Zone* zone = isolate->zone();
- ZoneScope zone_space(isolate, DELETE_ON_EXIT);
- ZoneList<int> offsets(8, zone);
- int start;
- int end;
- do {
- {
- AssertNoAllocation no_alloc;
- FixedArray* elements = FixedArray::cast(regexp_info->elements());
- start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
- end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
- }
- offsets.Add(start, zone);
- offsets.Add(end, zone);
- if (start == end) if (++end > length) break;
- match = RegExpImpl::Exec(regexp, subject, end, regexp_info,
- isolate->zone());
- if (match.is_null()) {
- return Failure::Exception();
- }
- } while (!match->IsNull());
+ RegExpImpl::SetLastMatchInfo(regexp_info,
+ subject,
+ capture_count,
+ global_cache.LastSuccessfulMatch());
+
int matches = offsets.length() / 2;
Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- Handle<String> substring = isolate->factory()->
- NewSubString(subject, offsets.at(0), offsets.at(1));
+ Handle<String> substring =
+ isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
elements->set(0, *substring);
- for (int i = 1; i < matches ; i++) {
+ for (int i = 1; i < matches; i++) {
+ HandleScope temp_scope(isolate);
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- Handle<String> substring = isolate->factory()->
- NewProperSubString(subject, from, to);
+ Handle<String> substring =
+ isolate->factory()->NewProperSubString(subject, from, to);
elements->set(i, *substring);
}
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
@@ -3785,298 +3642,154 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
}
-static bool SearchStringMultiple(Isolate* isolate,
- Handle<String> subject,
- Handle<String> pattern,
- Handle<JSArray> last_match_info,
- FixedArrayBuilder* builder) {
- ASSERT(subject->IsFlat());
- ASSERT(pattern->IsFlat());
-
- // Treating as if a previous match was before first character.
- int match_pos = -pattern->length();
-
- for (;;) { // Break when search complete.
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- AssertNoAllocation no_gc;
- String::FlatContent subject_content = subject->GetFlatContent();
- String::FlatContent pattern_content = pattern->GetFlatContent();
- if (subject_content.IsAscii()) {
- Vector<const char> subject_vector = subject_content.ToAsciiVector();
- if (pattern_content.IsAscii()) {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- *pattern,
- builder,
- &match_pos)) break;
- } else {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- *pattern,
- builder,
- &match_pos)) break;
- }
- } else {
- Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (pattern_content.IsAscii()) {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- *pattern,
- builder,
- &match_pos)) break;
- } else {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- *pattern,
- builder,
- &match_pos)) break;
- }
- }
- }
-
- if (match_pos >= 0) {
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- match_pos,
- match_pos + pattern->length());
- return true;
- }
- return false; // No matches at all.
-}
-
-
-static int SearchRegExpNoCaptureMultiple(
+// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
+// separate last match info. See comment on that function.
+template<bool has_capture>
+static MaybeObject* SearchRegExpMultiple(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
- FixedArrayBuilder* builder) {
+ Handle<JSArray> result_array) {
ASSERT(subject->IsFlat());
- ASSERT(regexp->CaptureCount() == 0);
- int match_start = -1;
- int match_end = 0;
- int pos = 0;
- int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject,
- isolate->zone());
- if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION;
-
- int max_matches;
- int num_registers = RegExpImpl::GlobalOffsetsVectorSize(regexp,
- registers_per_match,
- &max_matches);
- OffsetsVector registers(num_registers, isolate);
- Vector<int32_t> register_vector(registers.vector(), registers.length());
- int subject_length = subject->length();
- bool first = true;
- for (;;) { // Break on failure, return on exception.
- int num_matches = RegExpImpl::IrregexpExecRaw(regexp,
- subject,
- pos,
- register_vector,
- isolate->zone());
- if (num_matches > 0) {
- for (int match_index = 0; match_index < num_matches; match_index++) {
- int32_t* current_match = &register_vector[match_index * 2];
- match_start = current_match[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
- }
- match_end = current_match[1];
- HandleScope loop_scope(isolate);
- if (!first) {
- builder->Add(*isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end));
- } else {
- builder->Add(*isolate->factory()->NewSubString(subject,
- match_start,
- match_end));
- first = false;
- }
- }
+ ASSERT_NE(has_capture, regexp->CaptureCount() == 0);
- // If we did not get the maximum number of matches, we can stop here
- // since there are no matches left.
- if (num_matches < max_matches) break;
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
- if (match_start != match_end) {
- pos = match_end;
- } else {
- pos = match_end + 1;
- if (pos > subject_length) break;
- }
- } else if (num_matches == 0) {
- break;
- } else {
- ASSERT_EQ(num_matches, RegExpImpl::RE_EXCEPTION);
- return RegExpImpl::RE_EXCEPTION;
- }
- }
+ static const int kMinLengthToCache = 0x1000;
- if (match_start >= 0) {
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- subject_length);
+ if (subject_length > kMinLengthToCache) {
+ Handle<Object> cached_answer(RegExpResultsCache::Lookup(
+ isolate->heap(),
+ *subject,
+ regexp->data(),
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES));
+ if (*cached_answer != Smi::FromInt(0)) {
+ Handle<FixedArray> cached_fixed_array =
+ Handle<FixedArray>(FixedArray::cast(*cached_answer));
+ // The cache FixedArray is a COW-array and can therefore be reused.
+ isolate->factory()->SetContent(result_array, cached_fixed_array);
+ // The actual length of the result array is stored in the last element of
+ // the backing store (the backing FixedArray may have a larger capacity).
+ Object* cached_fixed_array_last_element =
+ cached_fixed_array->get(cached_fixed_array->length() - 1);
+ Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
+ result_array->set_length(js_array_length);
+ RegExpImpl::SetLastMatchInfo(
+ last_match_array, subject, capture_count, NULL);
+ return *result_array;
}
- SetLastMatchInfoNoCaptures(subject,
- last_match_array,
- match_start,
- match_end);
- return RegExpImpl::RE_SUCCESS;
- } else {
- return RegExpImpl::RE_FAILURE; // No matches at all.
}
-}
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
-// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
-// separate last match info. See comment on that function.
-static int SearchRegExpMultiple(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
- FixedArrayBuilder* builder) {
-
- ASSERT(subject->IsFlat());
- int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject,
- isolate->zone());
- if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION;
-
- int max_matches;
- int num_registers = RegExpImpl::GlobalOffsetsVectorSize(regexp,
- registers_per_match,
- &max_matches);
- OffsetsVector registers(num_registers, isolate);
- Vector<int32_t> register_vector(registers.vector(), registers.length());
-
- int num_matches = RegExpImpl::IrregexpExecRaw(regexp,
- subject,
- 0,
- register_vector,
- isolate->zone());
+ Handle<FixedArray> result_elements;
+ if (result_array->HasFastObjectElements()) {
+ result_elements =
+ Handle<FixedArray>(FixedArray::cast(result_array->elements()));
+ }
+ if (result_elements.is_null() || result_elements->length() < 16) {
+ result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
+ }
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
+ FixedArrayBuilder builder(result_elements);
// Position to search from.
- int pos = 0;
- // End of previous match. Differs from pos if match was empty.
+ int match_start = -1;
int match_end = 0;
bool first = true;
- if (num_matches > 0) {
- do {
- int match_start = 0;
- for (int match_index = 0; match_index < num_matches; match_index++) {
- int32_t* current_match =
- &register_vector[match_index * registers_per_match];
- match_start = current_match[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
- }
- match_end = current_match[1];
-
- {
- // Avoid accumulating new handles inside loop.
- HandleScope temp_scope(isolate);
- // Arguments array to replace function is match, captures, index and
- // subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements =
- isolate->factory()->NewFixedArray(3 + capture_count);
- Handle<String> match;
- if (!first) {
- match = isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end);
- } else {
- match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
- }
- elements->set(0, *match);
- for (int i = 1; i <= capture_count; i++) {
- int start = current_match[i * 2];
- if (start >= 0) {
- int end = current_match[i * 2 + 1];
- ASSERT(start <= end);
- Handle<String> substring;
- if (!first) {
- substring =
- isolate->factory()->NewProperSubString(subject, start, end);
- } else {
- substring =
- isolate->factory()->NewSubString(subject, start, end);
- }
- elements->set(i, *substring);
- } else {
- ASSERT(current_match[i * 2 + 1] < 0);
- elements->set(i, isolate->heap()->undefined_value());
- }
- }
- elements->set(capture_count + 1, Smi::FromInt(match_start));
- elements->set(capture_count + 2, *subject);
- builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
- }
+ // Two smis before and after the match, for very long strings.
+ static const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+ while (true) {
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) break;
+ match_start = current_match[0];
+ builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(&builder,
+ match_end,
+ match_start);
+ }
+ match_end = current_match[1];
+ {
+ // Avoid accumulating new handles inside loop.
+ HandleScope temp_scope(isolate);
+ Handle<String> match;
+ if (!first) {
+ match = isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end);
+ } else {
+ match = isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end);
first = false;
}
- // If we did not get the maximum number of matches, we can stop here
- // since there are no matches left.
- if (num_matches < max_matches) break;
-
- if (match_end > match_start) {
- pos = match_end;
- } else {
- pos = match_end + 1;
- if (pos > subject_length) {
- break;
+ if (has_capture) {
+ // Arguments array to replace function is match, captures, index and
+ // subject, i.e., 3 + capture count in total.
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(3 + capture_count);
+
+ elements->set(0, *match);
+ for (int i = 1; i <= capture_count; i++) {
+ int start = current_match[i * 2];
+ if (start >= 0) {
+ int end = current_match[i * 2 + 1];
+ ASSERT(start <= end);
+ Handle<String> substring =
+ isolate->factory()->NewSubString(subject, start, end);
+ elements->set(i, *substring);
+ } else {
+ ASSERT(current_match[i * 2 + 1] < 0);
+ elements->set(i, isolate->heap()->undefined_value());
+ }
}
+ elements->set(capture_count + 1, Smi::FromInt(match_start));
+ elements->set(capture_count + 2, *subject);
+ builder.Add(*isolate->factory()->NewJSArrayWithElements(elements));
+ } else {
+ builder.Add(*match);
}
+ }
+ }
- num_matches = RegExpImpl::IrregexpExecRaw(regexp,
- subject,
- pos,
- register_vector,
- isolate->zone());
- } while (num_matches > 0);
-
- if (num_matches != RegExpImpl::RE_EXCEPTION) {
- // Finished matching, with at least one match.
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- subject_length);
- }
+ if (global_cache.HasException()) return Failure::Exception();
- int last_match_capture_count = (capture_count + 1) * 2;
- int last_match_array_size =
- last_match_capture_count + RegExpImpl::kLastMatchOverhead;
- last_match_array->EnsureSize(last_match_array_size);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_array->elements());
- // We have to set this even though the rest of the last match array is
- // ignored.
- RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
- // These are also read without consulting the override.
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetLastInput(elements, *subject);
- return RegExpImpl::RE_SUCCESS;
+ if (match_start >= 0) {
+ // Finished matching, with at least one match.
+ if (match_end < subject_length) {
+ ReplacementStringBuilder::AddSubjectSlice(&builder,
+ match_end,
+ subject_length);
}
+
+ RegExpImpl::SetLastMatchInfo(
+ last_match_array, subject, capture_count, NULL);
+
+ if (subject_length > kMinLengthToCache) {
+ // Store the length of the result array into the last element of the
+ // backing FixedArray.
+ builder.EnsureCapacity(1);
+ Handle<FixedArray> fixed_array = builder.array();
+ fixed_array->set(fixed_array->length() - 1,
+ Smi::FromInt(builder.length()));
+ // Cache the result and turn the FixedArray into a COW array.
+ RegExpResultsCache::Enter(isolate->heap(),
+ *subject,
+ regexp->data(),
+ *fixed_array,
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ }
+ return *builder.ToJSArray(result_array);
+ } else {
+ return isolate->heap()->null_value(); // No matches at all.
}
- // No matches at all, return failure or exception result directly.
- return num_matches;
}
@@ -4095,47 +3808,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
ASSERT(last_match_info->HasFastObjectElements());
ASSERT(regexp->GetFlags().is_global());
- Handle<FixedArray> result_elements;
- if (result_array->HasFastObjectElements()) {
- result_elements =
- Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- }
- if (result_elements.is_null() || result_elements->length() < 16) {
- result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
- }
- FixedArrayBuilder builder(result_elements);
-
- if (regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> pattern(
- String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
- ASSERT(pattern->IsFlat());
- if (SearchStringMultiple(isolate, subject, pattern,
- last_match_info, &builder)) {
- return *builder.ToJSArray(result_array);
- }
- return isolate->heap()->null_value();
- }
-
- ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
- int result;
if (regexp->CaptureCount() == 0) {
- result = SearchRegExpNoCaptureMultiple(isolate,
- subject,
- regexp,
- last_match_info,
- &builder);
+ return SearchRegExpMultiple<false>(
+ isolate, subject, regexp, last_match_info, result_array);
} else {
- result = SearchRegExpMultiple(isolate,
- subject,
- regexp,
- last_match_info,
- &builder);
+ return SearchRegExpMultiple<true>(
+ isolate, subject, regexp, last_match_info, result_array);
}
- if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
- if (result == RegExpImpl::RE_FAILURE) return isolate->heap()->null_value();
- ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
- return Failure::Exception();
}
@@ -4190,7 +3870,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2I(f_number);
+ int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
MaybeObject* res =
@@ -4215,7 +3895,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2I(f_number);
+ int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
MaybeObject* res =
@@ -4240,7 +3920,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2I(f_number);
+ int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
MaybeObject* res =
@@ -4376,7 +4056,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// appropriate.
LookupResult result(isolate);
receiver->LocalLookup(key, &result);
- if (result.IsFound() && result.type() == FIELD) {
+ if (result.IsField()) {
int offset = result.GetFieldIndex();
keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
@@ -4488,7 +4168,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
js_object->LocalLookupRealNamedProperty(*name, &result);
// Special case for callback properties.
- if (result.IsFound() && result.type() == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
Object* callback = result.GetCallbackObject();
// To be compatible with Safari we do not change the value on API objects
// in Object.defineProperty(). Firefox disagrees here, and actually changes
@@ -4515,8 +4195,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// map. The current version of SetObjectProperty does not handle attributes
// correctly in the case where a property is a field and is reset with
// new attributes.
- if (result.IsProperty() &&
- (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
+ if (result.IsFound() &&
+ (attr != result.GetAttributes() || result.IsPropertyCallbacks())) {
// New attributes - normalize to avoid writing to instance descriptor
if (js_object->IsJSGlobalProxy()) {
// Since the result is a property, the prototype will exist so
@@ -4864,14 +4544,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
- CONVERT_ARG_CHECKED(Object, callback, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
HandleScope scope(isolate);
- Handle<SharedFunctionInfo> shared_info(JSFunction::cast(callback)->shared());
// When leaving the callback, step out has been activated, but not performed
// if we do not leave the builtin. To be able to step into the callback
// again, we need to clear the step out at this point.
debug->ClearStepOut();
- debug->FloodWithOneShot(shared_info);
+ debug->FloodWithOneShot(callback);
#endif // ENABLE_DEBUGGER_SUPPORT
return isolate->heap()->undefined_value();
}
@@ -6453,11 +6132,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
RUNTIME_ASSERT(pattern_length > 0);
if (limit == 0xffffffffu) {
- Handle<Object> cached_answer(StringSplitCache::Lookup(
- isolate->heap()->string_split_cache(),
+ Handle<Object> cached_answer(RegExpResultsCache::Lookup(
+ isolate->heap(),
*subject,
- *pattern));
+ *pattern,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS));
if (*cached_answer != Smi::FromInt(0)) {
+ // The cache FixedArray is a COW-array and can therefore be reused.
Handle<JSArray> result =
isolate->factory()->NewJSArrayWithElements(
Handle<FixedArray>::cast(cached_answer));
@@ -6473,8 +6154,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
static const int kMaxInitialListCapacity = 16;
- Zone* zone = isolate->zone();
- ZoneScope scope(isolate, DELETE_ON_EXIT);
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope scope(zone, DELETE_ON_EXIT);
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
@@ -6517,11 +6198,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
if (limit == 0xffffffffu) {
if (result->HasFastObjectElements()) {
- StringSplitCache::Enter(isolate->heap(),
- isolate->heap()->string_split_cache(),
- *subject,
- *pattern,
- *elements);
+ RegExpResultsCache::Enter(isolate->heap(),
+ *subject,
+ *pattern,
+ *elements,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
}
}
@@ -7787,8 +7468,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
isolate->heap()->non_strict_arguments_elements_map());
Handle<Map> old_map(result->map());
- Handle<Map> new_map =
- isolate->factory()->CopyMapDropTransitions(old_map);
+ Handle<Map> new_map = isolate->factory()->CopyMap(old_map);
new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_map(*new_map);
@@ -8145,17 +7825,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// instead of a new JSFunction object. This way, errors are
// reported the same way whether or not 'Function' is called
// using 'new'.
- return isolate->context()->global();
+ return isolate->context()->global_object();
}
}
// The function should be compiled for the optimization hints to be
- // available. We cannot use EnsureCompiled because that forces a
- // compilation through the shared function info which makes it
- // impossible for us to optimize.
- if (!function->is_compiled()) {
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- }
+ // available.
+ JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!function->has_initial_map() &&
@@ -8247,7 +7923,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
function->shared()->code()->set_profiler_ticks(0);
if (JSFunction::CompileOptimized(function,
- AstNode::kNoNumber,
+ BailoutId::None(),
CLEAR_EXCEPTION)) {
return function->code();
}
@@ -8261,6 +7937,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation);
+ Compiler::RecompileParallel(args.at<JSFunction>(0));
+ return *isolate->factory()->undefined_value();
+}
+
+
class ActivationsFinder : public ThreadVisitor {
public:
explicit ActivationsFinder(JSFunction* function)
@@ -8349,11 +8033,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Find other optimized activations of the function.
+ // Find other optimized activations of the function or functions that
+ // share the same optimized code.
bool has_other_activations = false;
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == *function) {
+ JSFunction* other_function = JSFunction::cast(frame->function());
+ if (frame->is_optimized() && other_function->code() == function->code()) {
has_other_activations = true;
break;
}
@@ -8376,6 +8062,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
} else {
Deoptimizer::DeoptimizeFunction(*function);
}
+ // Flush optimized code cache for this function.
+ function->shared()->ClearOptimizedCodeMap();
+
return isolate->heap()->undefined_value();
}
@@ -8452,6 +8141,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
return Smi::FromInt(4); // 4 == "never".
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (FLAG_parallel_recompilation) {
+ if (function->IsMarkedForLazyRecompilation()) {
+ return Smi::FromInt(5);
+ }
+ }
if (FLAG_always_opt) {
// We may have always opt, but that is more best-effort than a real
// promise, so we still say "no" if it is not optimized.
@@ -8498,7 +8192,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
}
}
- int ast_id = AstNode::kNoNumber;
+ BailoutId ast_id = BailoutId::None();
if (succeeded) {
// The top JS function is this one, the PC is somewhere in the
// unoptimized code.
@@ -8519,14 +8213,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
// Table entries are (AST id, pc offset) pairs.
uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
if (pc_offset == target_pc_offset) {
- ast_id = static_cast<int>(Memory::uint32_at(table_cursor));
+ ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
break;
}
table_cursor += 2 * kIntSize;
}
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d in ", ast_id);
+ PrintF("[replacing on-stack at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
@@ -8543,7 +8237,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
PrintF("[on-stack replacement offset %d in optimized code]\n",
data->OsrPcOffset()->value());
}
- ASSERT(data->OsrAstId()->value() == ast_id);
+ ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
} else {
// We may never generate the desired OSR entry if we emit an
// early deoptimize.
@@ -8582,7 +8276,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
// frame to an optimized one.
if (succeeded) {
ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id);
+ return Smi::FromInt(ast_id.ToInt());
} else {
if (function->IsMarkedForLazyRecompilation()) {
function->ReplaceCode(function->shared()->code());
@@ -8691,19 +8385,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 1);
+ Context* result;
+ MaybeObject* maybe_result =
+ isolate->heap()->AllocateGlobalContext(function, scope_info);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ ASSERT(function->context() == isolate->context());
+ ASSERT(function->context()->global_object() == result->global_object());
+ isolate->set_context(result);
+ result->global_object()->set_global_context(result);
+
+ return result; // non-failure
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
int length = function->shared()->scope_info()->ContextLength();
- Object* result;
- { MaybeObject* maybe_result =
- isolate->heap()->AllocateFunctionContext(length, function);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Context* result;
+ MaybeObject* maybe_result =
+ isolate->heap()->AllocateFunctionContext(length, function);
+ if (!maybe_result->To(&result)) return maybe_result;
- isolate->set_context(Context::cast(result));
+ isolate->set_context(result);
return result; // non-failure
}
@@ -8736,8 +8449,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
- // gotten from the global context.
- function = isolate->context()->global_context()->closure();
+ // gotten from the native context.
+ function = isolate->context()->native_context()->closure();
} else {
function = JSFunction::cast(args[1]);
}
@@ -8762,8 +8475,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
if (args[2]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
- // gotten from the global context.
- function = isolate->context()->global_context()->closure();
+ // gotten from the native context.
+ function = isolate->context()->native_context()->closure();
} else {
function = JSFunction::cast(args[2]);
}
@@ -8787,8 +8500,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
- // gotten from the global context.
- function = isolate->context()->global_context()->closure();
+ // gotten from the native context.
+ function = isolate->context()->native_context()->closure();
} else {
function = JSFunction::cast(args[1]);
}
@@ -8803,19 +8516,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ return isolate->heap()->ToBoolean(obj->IsJSModule());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 1);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0);
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateModuleContext(isolate->context(),
- scope_info);
- if (!maybe_context->To(&context)) return maybe_context;
- // Also initialize the context slot of the instance object.
- instance->set_context(context);
+ Context* context = Context::cast(instance->context());
+ Context* previous = isolate->context();
+ ASSERT(context->IsModuleContext());
+ // Initialize the context links.
+ context->set_previous(previous);
+ context->set_closure(previous->closure());
+ context->set_global_object(previous->global_object());
isolate->set_context(context);
return context;
@@ -8901,7 +8620,7 @@ static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
Context* top = isolate->context();
// Get the context extension function.
JSFunction* context_extension_function =
- top->global_context()->context_extension_function();
+ top->native_context()->context_extension_function();
// If the holder isn't a context extension object, we just return it
// as the receiver. This allows arguments objects to be used as
// receivers, but only if they are put in the context scope chain
@@ -9080,7 +8799,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
// In non-strict mode, the property is added to the global object.
attributes = NONE;
- object = Handle<JSObject>(isolate->context()->global());
+ object = Handle<JSObject>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
@@ -9134,6 +8853,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_date_object", HandleVector<Object>(NULL, 0)));
+}
+
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
ASSERT(args.length() == 0);
@@ -9323,7 +9051,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Zone* zone = isolate->zone();
+ Zone* zone = isolate->runtime_zone();
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
@@ -9363,10 +9091,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- // Extract global context.
- Handle<Context> context(isolate->context()->global_context());
+ // Extract native context.
+ Handle<Context> context(isolate->context()->native_context());
- // Check if global context allows code generation from
+ // Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
if (context->allow_code_gen_from_strings()->IsFalse() &&
!CodeGenerationFromStringsAllowed(isolate, context)) {
@@ -9374,7 +9102,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
"code_gen_from_strings", HandleVector<Object>(NULL, 0)));
}
- // Compile source string in the global context.
+ // Compile source string in the native context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
if (shared.is_null()) return Failure::Exception();
@@ -9392,12 +9120,12 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
LanguageMode language_mode,
int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
- Handle<Context> global_context = Handle<Context>(context->global_context());
+ Handle<Context> native_context = Handle<Context>(context->native_context());
- // Check if global context allows code generation from
+ // Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (global_context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, global_context)) {
+ if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, native_context)) {
isolate->Throw(*isolate->factory()->NewError(
"code_gen_from_strings", HandleVector<Object>(NULL, 0)));
return MakePair(Failure::Exception(), NULL);
@@ -9408,7 +9136,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
Handle<Context>(isolate->context()),
- context->IsGlobalContext(),
+ context->IsNativeContext(),
language_mode,
scope_position);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
@@ -9430,7 +9158,7 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
// (And even if it is, but the first argument isn't a string, just let
// execution default to an indirect call to eval, which will also return
// the first argument without doing anything).
- if (*callee != isolate->global_context()->global_eval_fun() ||
+ if (*callee != isolate->native_context()->global_eval_fun() ||
!args[1]->IsString()) {
return MakePair(*callee, isolate->heap()->the_hole_value());
}
@@ -10203,11 +9931,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(String, name, 1);
CONVERT_SMI_ARG_CHECKED(flag, 2);
AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
- return obj->LookupAccessor(name, component);
+ if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
+ return JSObject::cast(receiver)->LookupAccessor(name, component);
}
@@ -10298,11 +10027,10 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
}
}
case INTERCEPTOR:
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
+ case TRANSITION:
return heap->undefined_value();
case HANDLER:
+ case NONEXISTENT:
UNREACHABLE();
return heap->undefined_value();
}
@@ -10335,7 +10063,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
// entered (if the debugger is entered). The reason for switching context here
// is that for some property lookups (accessors and interceptors) callbacks
// into the embedding application can occour, and the embedding application
- // could have the assumption that its own global context is the current
+ // could have the assumption that its own native context is the current
// context and not some internal debugger context.
SaveContext save(isolate);
if (isolate->debug()->InDebugger()) {
@@ -10374,13 +10102,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
for (int i = 0; i < length; i++) {
LookupResult result(isolate);
jsproto->LocalLookup(*name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
// LookupResult is not GC safe as it holds raw object pointers.
// GC can happen later in this code so put the required fields into
// local variables using handles when required for later use.
- PropertyType result_type = result.type();
Handle<Object> result_callback_obj;
- if (result_type == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
result_callback_obj = Handle<Object>(result.GetCallbackObject(),
isolate);
}
@@ -10398,7 +10125,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
- bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+ bool hasJavaScriptAccessors = result.IsPropertyCallbacks() &&
result_callback_obj->IsAccessorPair();
Handle<FixedArray> details =
isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
@@ -10432,7 +10159,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
LookupResult result(isolate);
obj->Lookup(*name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
}
return isolate->heap()->undefined_value();
@@ -10462,7 +10189,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(details.index());
+ // TODO(verwaest): Depends on the type of details.
+ return Smi::FromInt(details.dictionary_index());
}
@@ -10881,12 +10609,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// value object is not converted into a wrapped JS objects. To
// hide this optimization from the debugger, we wrap the receiver
// by creating correct wrapper object based on the calling frame's
- // global context.
+ // native context.
it.Advance();
- Handle<Context> calling_frames_global_context(
- Context::cast(Context::cast(it.frame()->context())->global_context()));
+ Handle<Context> calling_frames_native_context(
+ Context::cast(Context::cast(it.frame()->context())->native_context()));
receiver =
- isolate->factory()->ToObject(receiver, calling_frames_global_context);
+ isolate->factory()->ToObject(receiver, calling_frames_native_context);
}
details->set(kFrameDetailsReceiverIndex, *receiver);
@@ -10978,7 +10706,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
// These will be variables introduced by eval.
if (function_context->closure() == *function) {
if (function_context->has_extension() &&
- !function_context->IsGlobalContext()) {
+ !function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
bool threw = false;
Handle<FixedArray> keys =
@@ -11165,7 +10893,7 @@ class ScopeIterator {
}
// Get the debug info (create it if it does not exist).
- if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
+ if (!isolate->debug()->EnsureDebugInfo(shared_info, function_)) {
// Return if ensuring debug info failed.
return;
}
@@ -11190,7 +10918,6 @@ class ScopeIterator {
if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
} else {
// Reparse the code and analyze the scopes.
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
Handle<Script> script(Script::cast(shared_info->script()));
Scope* scope = NULL;
@@ -11198,36 +10925,25 @@ class ScopeIterator {
Handle<ScopeInfo> scope_info(shared_info->scope_info());
if (scope_info->Type() != FUNCTION_SCOPE) {
// Global or eval code.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
if (scope_info->Type() == GLOBAL_SCOPE) {
info.MarkAsGlobal();
} else {
ASSERT(scope_info->Type() == EVAL_SCOPE);
info.MarkAsEval();
- info.SetCallingContext(Handle<Context>(function_->context()));
+ info.SetContext(Handle<Context>(function_->context()));
}
if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
+ RetrieveScopeChain(scope, shared_info);
} else {
// Function code
- CompilationInfo info(shared_info);
+ CompilationInfoWithZone info(shared_info);
if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
- }
-
- // Retrieve the scope chain for the current position.
- if (scope != NULL) {
- int source_position = shared_info->code()->SourcePosition(frame_->pc());
- scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
- } else {
- // A failed reparse indicates that the preparser has diverged from the
- // parser or that the preparse data given to the initial parse has been
- // faulty. We fail in debug mode but in release mode we only provide the
- // information we get from the context chain but nothing about
- // completely stack allocated scopes or stack allocated locals.
- UNREACHABLE();
+ RetrieveScopeChain(scope, shared_info);
}
}
}
@@ -11252,7 +10968,7 @@ class ScopeIterator {
ScopeType scope_type = Type();
if (scope_type == ScopeTypeGlobal) {
// The global scope is always the last in the chain.
- ASSERT(context_->IsGlobalContext());
+ ASSERT(context_->IsNativeContext());
context_ = Handle<Context>();
return;
}
@@ -11280,7 +10996,7 @@ class ScopeIterator {
ASSERT(context_->IsModuleContext());
return ScopeTypeModule;
case GLOBAL_SCOPE:
- ASSERT(context_->IsGlobalContext());
+ ASSERT(context_->IsNativeContext());
return ScopeTypeGlobal;
case WITH_SCOPE:
ASSERT(context_->IsWithContext());
@@ -11296,8 +11012,8 @@ class ScopeIterator {
UNREACHABLE();
}
}
- if (context_->IsGlobalContext()) {
- ASSERT(context_->global()->IsGlobalObject());
+ if (context_->IsNativeContext()) {
+ ASSERT(context_->global_object()->IsGlobalObject());
return ScopeTypeGlobal;
}
if (context_->IsFunctionContext()) {
@@ -11320,7 +11036,7 @@ class ScopeIterator {
Handle<JSObject> ScopeObject() {
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
- return Handle<JSObject>(CurrentContext()->global());
+ return Handle<JSObject>(CurrentContext()->global_object());
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
ASSERT(nested_scope_chain_.length() == 1);
@@ -11427,6 +11143,21 @@ class ScopeIterator {
Handle<Context> context_;
List<Handle<ScopeInfo> > nested_scope_chain_;
+ void RetrieveScopeChain(Scope* scope,
+ Handle<SharedFunctionInfo> shared_info) {
+ if (scope != NULL) {
+ int source_position = shared_info->code()->SourcePosition(frame_->pc());
+ scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+ } else {
+ // A failed reparse indicates that the preparser has diverged from the
+ // parser or that the preparse data given to the initial parse has been
+ // faulty. We fail in debug mode but in release mode we only provide the
+ // information we get from the context chain but nothing about
+ // completely stack allocated scopes or stack allocated locals.
+ UNREACHABLE();
+ }
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
@@ -11688,110 +11419,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
}
-// Set a break point in a function
+// Set a break point in a function.
// args[0]: function
// args[1]: number: break source position (within the function source)
// args[2]: number: break point object
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- Handle<SharedFunctionInfo> shared(fun->shared());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point.
- isolate->debug()->SetBreakPoint(shared, break_point_object_arg,
+ isolate->debug()->SetBreakPoint(function, break_point_object_arg,
&source_position);
return Smi::FromInt(source_position);
}
-Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
- Handle<Script> script,
- int position) {
- // Iterate the heap looking for SharedFunctionInfo generated from the
- // script. The inner most SharedFunctionInfo containing the source position
- // for the requested break point is found.
- // NOTE: This might require several heap iterations. If the SharedFunctionInfo
- // which is found is not compiled it is compiled and the heap is iterated
- // again as the compilation might create inner functions from the newly
- // compiled function and the actual requested break point might be in one of
- // these functions.
- bool done = false;
- // The current candidate for the source position:
- int target_start_position = RelocInfo::kNoPosition;
- Handle<SharedFunctionInfo> target;
- while (!done) {
- { // Extra scope for iterator and no-allocation.
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contain only one function
- // declartion the source for the top-level and the
- // function is the same. In that case prefer the non
- // top-level function.
- if (!shared->is_toplevel()) {
- target_start_position = start_position;
- target = shared;
- }
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function
- // inside a top-level function can share either start or end
- // position with the top-level function.
- target_start_position = start_position;
- target = shared;
- }
- }
- }
- }
- }
- } // End for loop.
- } // End No allocation scope.
-
- if (target.is_null()) {
- return isolate->heap()->undefined_value();
- }
-
- // If the candidate found is compiled we are done. NOTE: when lazy
- // compilation of inner functions is introduced some additional checking
- // needs to be done here to compile inner functions.
- done = target->is_compiled();
- if (!done) {
- // If the candidate is not compiled compile it to reveal any inner
- // functions which might contain the requested source position.
- SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
- }
- } // End while loop.
-
- return *target;
-}
-
-
// Changes the state of a break point in a script and returns source position
// where break point was set. NOTE: Regarding performance see the NOTE for
// GetScriptFromScriptData.
@@ -11810,23 +11457,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
RUNTIME_ASSERT(wrapper->value()->IsScript());
Handle<Script> script(Script::cast(wrapper->value()));
- Object* result = Runtime::FindSharedFunctionInfoInScript(
- isolate, script, source_position);
- if (!result->IsUndefined()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
- // Find position within function. The script position might be before the
- // source position of the first function.
- int position;
- if (shared->start_position() > source_position) {
- position = 0;
- } else {
- position = source_position - shared->start_position();
- }
- isolate->debug()->SetBreakPoint(shared, break_point_object_arg, &position);
- position += shared->start_position();
- return Smi::FromInt(position);
+ // Set break point.
+ if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
+ &source_position)) {
+ return isolate->heap()->undefined_value();
}
- return isolate->heap()->undefined_value();
+
+ return Smi::FromInt(source_position);
}
@@ -12116,7 +11753,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Context> context =
isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
go_between);
- context->set_extension(*local_scope);
+
+ // Use the materialized local scope in a with context.
+ context =
+ isolate->factory()->NewWithContext(go_between, context, local_scope);
+
// Copy any with contexts present and chain them in front of this context.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context;
@@ -12151,7 +11792,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(function_source,
context,
- context->IsGlobalContext(),
+ context->IsNativeContext(),
CLASSIC_MODE,
RelocInfo::kNoPosition);
if (shared.is_null()) return Failure::Exception();
@@ -12222,9 +11863,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
isolate->set_context(*top->context());
}
- // Get the global context now set to the top context from before the
+ // Get the native context now set to the top context from before the
// debugger was invoked.
- Handle<Context> context = isolate->global_context();
+ Handle<Context> context = isolate->native_context();
bool is_global = true;
@@ -12255,7 +11896,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// Invoke the result of the compilation to get the evaluation function.
bool has_pending_exception;
- Handle<Object> receiver = isolate->global();
+ Handle<Object> receiver = isolate->global_object();
Handle<Object> result =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
@@ -12389,7 +12030,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
- isolate->context()->global_context()->arguments_boilerplate();
+ isolate->context()->native_context()->arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -12418,7 +12059,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Return result as JS array.
Object* result;
MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->global_context()->array_function());
+ isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
return JSArray::cast(result)->SetContent(instances);
}
@@ -12499,7 +12140,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Return result as JS array.
Object* result;
{ MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->global_context()->array_function());
+ isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return JSArray::cast(result)->SetContent(instances);
@@ -12524,7 +12165,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
- Handle<String> source(String::cast(args[1]));
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
RUNTIME_ASSERT(script_wrapper->value()->IsScript());
Handle<Script> script(Script::cast(script_wrapper->value()));
@@ -12550,7 +12191,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::CompileLazy(func, KEEP_EXCEPTION)) {
+ if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->code()->PrintLn();
@@ -12565,7 +12206,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::CompileLazy(func, KEEP_EXCEPTION)) {
+ if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->shared()->construct_stub()->PrintLn();
@@ -12613,11 +12254,12 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
// in OpaqueReferences.
RUNTIME_FUNCTION(MaybeObject*,
Runtime_LiveEditFindSharedFunctionInfosForScript) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, script_value, 0);
-
+ RUNTIME_ASSERT(script_value->value()->IsScript());
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
const int kBufferSize = 32;
@@ -12659,10 +12301,13 @@ RUNTIME_FUNCTION(MaybeObject*,
// each function with all its descendant is always stored in a continues range
// with the function itself going first. The root function is a script function.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+
+ RUNTIME_ASSERT(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
@@ -12678,6 +12323,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 3);
HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
@@ -12701,6 +12347,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
@@ -12710,6 +12357,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
// Replaces code of SharedFunctionInfo with a new one.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
@@ -12720,6 +12368,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
// Connects SharedFunctionInfo to another script.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
Handle<Object> function_object(args[0], isolate);
@@ -12746,6 +12395,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
// In a code of a parent function replaces original function as embedded object
// with a substitution one.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 3);
HandleScope scope(isolate);
@@ -12766,6 +12416,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
@@ -12780,19 +12431,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
// Returns array of the same length with corresponding results of
// LiveEdit::FunctionPatchabilityStatus type.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
- isolate->zone());
+ isolate->runtime_zone());
}
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
@@ -12802,9 +12455,50 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
}
+// Restarts a call frame and completely drops all frames above.
+// Returns true if successful. Otherwise returns undefined or an error message.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
+ CHECK(isolate->debugger()->live_edit_enabled());
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ // Check arguments.
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ Heap* heap = isolate->heap();
+
+ // Find the relevant frame with the requested index.
+ StackFrame::Id id = isolate->debug()->break_frame_id();
+ if (id == StackFrame::NO_ID) {
+ // If there are no JavaScript stack frames return undefined.
+ return heap->undefined_value();
+ }
+
+ int count = 0;
+ JavaScriptFrameIterator it(isolate, id);
+ for (; !it.done(); it.Advance()) {
+ if (index < count + it.frame()->GetInlineCount()) break;
+ count += it.frame()->GetInlineCount();
+ }
+ if (it.done()) return heap->undefined_value();
+
+ const char* error_message =
+ LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
+ if (error_message) {
+ return *(isolate->factory()->LookupAsciiSymbol(error_message));
+ }
+ return heap->true_value();
+}
+
+
// A testing entry. Returns statement position which is the closest to
// source_position.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -12851,11 +12545,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, isolate->global(), 0, NULL,
+ result = Execution::Call(function, isolate->global_object(), 0, NULL,
&pending_exception);
} else {
EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global(), 0, NULL,
+ result = Execution::Call(function, isolate->global_object(), 0, NULL,
&pending_exception);
}
}
@@ -13334,7 +13028,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(isolate->global_context()->global());
+ Handle<Object> receiver(isolate->native_context()->global_object());
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
bool pending_exception;
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index f5a4f50206..da60ee10b7 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -86,6 +86,7 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
+ F(ParallelRecompile, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
@@ -283,6 +284,9 @@ namespace internal {
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
+ /* Harmony modules */ \
+ F(IsJSModule, 1, 1) \
+ \
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
F(CreateJSFunctionProxy, 4, 1) \
@@ -302,11 +306,15 @@ namespace internal {
/* Harmony maps */ \
F(MapInitialize, 1, 1) \
F(MapGet, 2, 1) \
+ F(MapHas, 2, 1) \
+ F(MapDelete, 2, 1) \
F(MapSet, 3, 1) \
\
/* Harmony weakmaps */ \
F(WeakMapInitialize, 1, 1) \
F(WeakMapGet, 2, 1) \
+ F(WeakMapHas, 2, 1) \
+ F(WeakMapDelete, 2, 1) \
F(WeakMapSet, 3, 1) \
\
/* Statements */ \
@@ -317,16 +325,18 @@ namespace internal {
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
+ F(ThrowNotDateError, 0, 1) \
F(StackGuard, 0, 1) \
F(Interrupt, 0, 1) \
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
+ F(NewGlobalContext, 2, 1) \
F(NewFunctionContext, 1, 1) \
F(PushWithContext, 2, 1) \
F(PushCatchContext, 3, 1) \
F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
+ F(PushModuleContext, 1, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -442,6 +452,7 @@ namespace internal {
F(LiveEditPatchFunctionPositions, 2, 1) \
F(LiveEditCheckAndDropActivations, 2, 1) \
F(LiveEditCompareStrings, 2, 1) \
+ F(LiveEditRestartFrame, 2, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
F(ExecuteInDebugContext, 2, 1) \
\
@@ -637,13 +648,6 @@ class Runtime : public AllStatic {
// Get the intrinsic function with the given FunctionId.
static const Function* FunctionForId(FunctionId id);
- static Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit);
-
// General-purpose helper functions for runtime system.
static int StringMatch(Isolate* isolate,
Handle<String> sub,
@@ -686,11 +690,6 @@ class Runtime : public AllStatic {
Handle<Object> object,
Handle<Object> key);
- // This function is used in FunctionNameUsing* tests.
- static Object* FindSharedFunctionInfoInScript(Isolate* isolate,
- Handle<Script> script,
- int position);
-
// Helper functions used stubs.
static void PerformGC(Object* result);
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index f24af2ed26..bd2db58182 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -1077,6 +1077,7 @@ bool Scanner::ScanRegExpFlags() {
if (!ScanLiteralUnicodeEscape()) {
break;
}
+ Advance();
}
}
literal.Complete();
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 25f02f6320..02b4323980 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -193,7 +193,8 @@ int ScopeInfo::ContextLength() {
bool has_context = context_locals > 0 ||
function_name_context_slot ||
Type() == WITH_SCOPE ||
- (Type() == FUNCTION_SCOPE && CallsEval());
+ (Type() == FUNCTION_SCOPE && CallsEval()) ||
+ Type() == MODULE_SCOPE;
if (has_context) {
return Context::MIN_CONTEXT_SLOTS + context_locals +
(function_name_context_slot ? 1 : 0);
@@ -222,11 +223,7 @@ bool ScopeInfo::HasHeapAllocatedLocals() {
bool ScopeInfo::HasContext() {
- if (length() > 0) {
- return ContextLength() > 0;
- } else {
- return false;
- }
+ return ContextLength() > 0;
}
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index ad6692e57f..c9612577af 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -29,6 +29,7 @@
#include "scopes.h"
+#include "accessors.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "messages.h"
@@ -117,10 +118,8 @@ Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
already_resolved_(false),
zone_(zone) {
SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
- // At some point we might want to provide outer scopes to
- // eval scopes (by walking the stack and reading the scope info).
- // In that case, the ASSERT below needs to be adjusted.
- ASSERT_EQ(type == GLOBAL_SCOPE, outer_scope == NULL);
+ // The outermost scope must be a global scope.
+ ASSERT(type == GLOBAL_SCOPE || outer_scope != NULL);
ASSERT(!HasIllegalRedeclaration());
}
@@ -214,7 +213,7 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
bool contains_with = false;
- while (!context->IsGlobalContext()) {
+ while (!context->IsNativeContext()) {
if (context->IsWithContext()) {
Scope* with_scope = new(zone) Scope(current_scope,
WITH_SCOPE,
@@ -226,6 +225,18 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
s->scope_inside_with_ = true;
}
+ } else if (context->IsGlobalContext()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ current_scope = new(zone) Scope(current_scope,
+ GLOBAL_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
+ } else if (context->IsModuleContext()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
+ current_scope = new(zone) Scope(current_scope,
+ MODULE_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
} else if (context->IsFunctionContext()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
current_scope = new(zone) Scope(current_scope,
@@ -274,7 +285,8 @@ bool Scope::Analyze(CompilationInfo* info) {
// Allocate the variables.
{
- AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate());
+ AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate(),
+ info->zone());
if (!top->AllocateVariables(info, &ast_node_factory)) return false;
}
@@ -473,17 +485,14 @@ Variable* Scope::DeclareLocal(Handle<String> name,
// This function handles VAR and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
- ASSERT(mode == VAR ||
- mode == CONST ||
- mode == CONST_HARMONY ||
- mode == LET);
+ ASSERT(IsDeclaredVariableMode(mode));
++num_var_or_const_;
return variables_.Declare(
this, name, mode, true, Variable::NORMAL, init_flag, interface);
}
-Variable* Scope::DeclareGlobal(Handle<String> name) {
+Variable* Scope::DeclareDynamicGlobal(Handle<String> name) {
ASSERT(is_global_scope());
return variables_.Declare(this,
name,
@@ -586,6 +595,21 @@ VariableProxy* Scope::CheckAssignmentToConst() {
}
+class VarAndOrder {
+ public:
+ VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
+ Variable* var() const { return var_; }
+ int order() const { return order_; }
+ static int Compare(const VarAndOrder* a, const VarAndOrder* b) {
+ return a->order_ - b->order_;
+ }
+
+ private:
+ Variable* var_;
+ int order_;
+};
+
+
void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ZoneList<Variable*>* context_locals) {
ASSERT(stack_locals != NULL);
@@ -600,17 +624,25 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
}
}
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+
// Collect declared local variables.
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var->is_used()) {
- if (var->IsStackLocal()) {
- stack_locals->Add(var, zone());
- } else if (var->IsContextSlot()) {
- context_locals->Add(var, zone());
- }
+ vars.Add(VarAndOrder(var, p->order), zone());
+ }
+ }
+ vars.Sort(VarAndOrder::Compare);
+ int var_count = vars.length();
+ for (int i = 0; i < var_count; i++) {
+ Variable* var = vars[i].var();
+ if (var->IsStackLocal()) {
+ stack_locals->Add(var, zone());
+ } else if (var->IsContextSlot()) {
+ context_locals->Add(var, zone());
}
}
}
@@ -633,12 +665,13 @@ bool Scope::AllocateVariables(CompilationInfo* info,
// 3) Allocate variables.
AllocateVariablesRecursively();
- return true;
-}
-
+ // 4) Allocate and link module instance objects.
+ if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+ AllocateModules(info);
+ LinkModules(info);
+ }
-bool Scope::AllowsLazyCompilation() const {
- return !force_eager_compilation_ && HasTrivialOuterContext();
+ return true;
}
@@ -666,23 +699,36 @@ bool Scope::HasTrivialOuterContext() const {
}
-bool Scope::AllowsLazyRecompilation() const {
- return !force_eager_compilation_ &&
- !TrivialDeclarationScopesBeforeWithScope();
-}
-
-
-bool Scope::TrivialDeclarationScopesBeforeWithScope() const {
+bool Scope::HasLazyCompilableOuterContext() const {
Scope* outer = outer_scope_;
- if (outer == NULL) return false;
+ if (outer == NULL) return true;
+ // There are several reasons that prevent lazy compilation:
+ // - This scope is inside a with scope and all declaration scopes between
+ // them have empty contexts. Such declaration scopes become invisible
+ // during scope info deserialization.
+ // - This scope is inside a strict eval scope with variables that are
+ // potentially context allocated in an artificial function scope that
+ // is not deserialized correctly.
outer = outer->DeclarationScope();
- while (outer != NULL) {
- if (outer->is_with_scope()) return true;
- if (outer->is_declaration_scope() && outer->num_heap_slots() > 0)
- return false;
- outer = outer->outer_scope_;
+ bool found_non_trivial_declarations = false;
+ for (const Scope* scope = outer; scope != NULL; scope = scope->outer_scope_) {
+ if (scope->is_eval_scope()) return false;
+ if (scope->is_with_scope() && !found_non_trivial_declarations) return false;
+ if (scope->is_declaration_scope() && scope->num_heap_slots() > 0) {
+ found_non_trivial_declarations = true;
+ }
}
- return false;
+ return true;
+}
+
+
+bool Scope::AllowsLazyCompilation() const {
+ return !force_eager_compilation_ && HasLazyCompilableOuterContext();
+}
+
+
+bool Scope::AllowsLazyCompilationWithoutContext() const {
+ return !force_eager_compilation_ && HasTrivialOuterContext();
}
@@ -990,7 +1036,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// gave up on it (e.g. by encountering a local with the same in the outer
// scope which was not promoted to a context, this can happen if we use
// debugger to evaluate arbitrary expressions at a break point).
- if (var->is_global()) {
+ if (var->IsGlobalObjectProperty()) {
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
} else if (var->is_dynamic()) {
var = NonLocal(proxy->name(), DYNAMIC);
@@ -1002,8 +1048,8 @@ bool Scope::ResolveVariable(CompilationInfo* info,
break;
case UNBOUND:
- // No binding has been found. Declare a variable in global scope.
- var = info->global_scope()->DeclareGlobal(proxy->name());
+ // No binding has been found. Declare a variable on the global object.
+ var = info->global_scope()->DeclareDynamicGlobal(proxy->name());
break;
case UNBOUND_EVAL_SHADOWED:
@@ -1110,11 +1156,13 @@ bool Scope::MustAllocate(Variable* var) {
inner_scope_calls_eval_ ||
scope_contains_with_ ||
is_catch_scope() ||
- is_block_scope())) {
+ is_block_scope() ||
+ is_module_scope() ||
+ is_global_scope())) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
- return !var->is_global() && var->is_used();
+ return !var->IsGlobalObjectProperty() && var->is_used();
}
@@ -1128,11 +1176,11 @@ bool Scope::MustAllocateInContext(Variable* var) {
// catch-bound variables are always allocated in a context.
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
+ if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
- scope_contains_with_ ||
- var->is_global();
+ scope_contains_with_;
}
@@ -1233,11 +1281,19 @@ void Scope::AllocateNonParameterLocals() {
AllocateNonParameterLocal(temps_[i]);
}
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
- AllocateNonParameterLocal(var);
+ vars.Add(VarAndOrder(var, p->order), zone());
+ }
+
+ vars.Sort(VarAndOrder::Compare);
+ int var_count = vars.length();
+ for (int i = 0; i < var_count; i++) {
+ AllocateNonParameterLocal(vars[i].var());
}
// For now, function_ must be allocated at the very end. If it gets
@@ -1298,4 +1354,77 @@ int Scope::ContextLocalCount() const {
(function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
}
+
+void Scope::AllocateModules(CompilationInfo* info) {
+ ASSERT(is_global_scope() || is_module_scope());
+
+ if (is_module_scope()) {
+ ASSERT(interface_->IsFrozen());
+ ASSERT(scope_info_.is_null());
+
+ // TODO(rossberg): This has to be the initial compilation of this code.
+ // We currently do not allow recompiling any module definitions.
+ Handle<ScopeInfo> scope_info = GetScopeInfo();
+ Factory* factory = info->isolate()->factory();
+ Handle<Context> context = factory->NewModuleContext(scope_info);
+ Handle<JSModule> instance = factory->NewJSModule(context, scope_info);
+ context->set_module(*instance);
+
+ bool ok;
+ interface_->MakeSingleton(instance, &ok);
+ ASSERT(ok);
+ }
+
+ // Allocate nested modules.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* inner_scope = inner_scopes_.at(i);
+ if (inner_scope->is_module_scope()) {
+ inner_scope->AllocateModules(info);
+ }
+ }
+}
+
+
+void Scope::LinkModules(CompilationInfo* info) {
+ ASSERT(is_global_scope() || is_module_scope());
+
+ if (is_module_scope()) {
+ Handle<JSModule> instance = interface_->Instance();
+
+ // Populate the module instance object.
+ const PropertyAttributes ro_attr =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
+ const PropertyAttributes rw_attr =
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM);
+ for (Interface::Iterator it = interface_->iterator();
+ !it.done(); it.Advance()) {
+ if (it.interface()->IsModule()) {
+ Handle<Object> value = it.interface()->Instance();
+ ASSERT(!value.is_null());
+ JSReceiver::SetProperty(
+ instance, it.name(), value, ro_attr, kStrictMode);
+ } else {
+ Variable* var = LocalLookup(it.name());
+ ASSERT(var != NULL && var->IsContextSlot());
+ PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr;
+ Handle<AccessorInfo> info =
+ Accessors::MakeModuleExport(it.name(), var->index(), attr);
+ Handle<Object> result = SetAccessor(instance, info);
+ ASSERT(!(result.is_null() || result->IsUndefined()));
+ USE(result);
+ }
+ }
+ USE(JSObject::PreventExtensions(instance));
+ }
+
+ // Link nested modules.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* inner_scope = inner_scopes_.at(i);
+ if (inner_scope->is_module_scope()) {
+ inner_scope->LinkModules(info);
+ }
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index decd74d232..6e35d05a7f 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -160,20 +160,20 @@ class Scope: public ZoneObject {
// global scope. The variable was introduced (possibly from an inner
// scope) by a reference to an unresolved variable with no intervening
// with statements or eval calls.
- Variable* DeclareGlobal(Handle<String> name);
+ Variable* DeclareDynamicGlobal(Handle<String> name);
// Create a new unresolved variable.
template<class Visitor>
VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
Handle<String> name,
- int position = RelocInfo::kNoPosition,
- Interface* interface = Interface::NewValue()) {
+ Interface* interface = Interface::NewValue(),
+ int position = RelocInfo::kNoPosition) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
ASSERT(!already_resolved());
VariableProxy* proxy =
- factory->NewVariableProxy(name, false, position, interface);
+ factory->NewVariableProxy(name, false, interface, position);
unresolved_.Add(proxy, zone_);
return proxy;
}
@@ -280,7 +280,8 @@ class Scope: public ZoneObject {
bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return type_ == WITH_SCOPE; }
bool is_declaration_scope() const {
- return is_eval_scope() || is_function_scope() || is_global_scope();
+ return is_eval_scope() || is_function_scope() ||
+ is_module_scope() || is_global_scope();
}
bool is_classic_mode() const {
return language_mode() == CLASSIC_MODE;
@@ -374,16 +375,14 @@ class Scope: public ZoneObject {
// Determine if we can use lazy compilation for this scope.
bool AllowsLazyCompilation() const;
- // True if we can lazily recompile functions with this scope.
- bool AllowsLazyRecompilation() const;
+ // Determine if we can use lazy compilation for this scope without a context.
+ bool AllowsLazyCompilationWithoutContext() const;
- // True if the outer context of this scope is always the global context.
+ // True if the outer context of this scope is always the native context.
bool HasTrivialOuterContext() const;
- // True if this scope is inside a with scope and all declaration scopes
- // between them have empty contexts. Such declaration scopes become
- // invisible during scope info deserialization.
- bool TrivialDeclarationScopesBeforeWithScope() const;
+ // True if the outer context allows lazy compilation of this scope.
+ bool HasLazyCompilableOuterContext() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
@@ -592,6 +591,13 @@ class Scope: public ZoneObject {
bool AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory);
+ // Instance objects have to be created ahead of time (before code generation)
+ // because of potentially cyclic references between them.
+ // Linking also has to be a separate stage, since populating one object may
+ // potentially require (forward) references to others.
+ void AllocateModules(CompilationInfo* info);
+ void LinkModules(CompilationInfo* info);
+
private:
// Construct a scope based on the scope info.
Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index cf8e5e18e6..792f25c51d 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -37,6 +37,7 @@
#include "platform.h"
#include "runtime.h"
#include "serialize.h"
+#include "snapshot.h"
#include "stub-cache.h"
#include "v8threads.h"
@@ -510,6 +511,18 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
47,
"date_cache_stamp");
+ Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
+ UNCLASSIFIED,
+ 48,
+ "address_of_pending_message_obj");
+ Add(ExternalReference::address_of_has_pending_message(isolate).address(),
+ UNCLASSIFIED,
+ 49,
+ "address_of_has_pending_message");
+ Add(ExternalReference::address_of_pending_message_script(isolate).address(),
+ UNCLASSIFIED,
+ 50,
+ "pending_message_script");
}
@@ -666,33 +679,35 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
void Deserializer::Deserialize() {
isolate_ = Isolate::Current();
ASSERT(isolate_ != NULL);
- // Don't GC while deserializing - just expand the heap.
- AlwaysAllocateScope always_allocate;
- // Don't use the free lists while deserializing.
- LinearAllocationScope allocate_linearly;
- // No active threads.
- ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
- // No active handles.
- ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
- // Make sure the entire partial snapshot cache is traversed, filling it with
- // valid object pointers.
- isolate_->set_serialize_partial_snapshot_cache_length(
- Isolate::kPartialSnapshotCacheCapacity);
- ASSERT_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder();
- isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
- isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
-
- isolate_->heap()->set_global_contexts_list(
- isolate_->heap()->undefined_value());
-
- // Update data pointers to the external strings containing natives sources.
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = isolate_->heap()->natives_source_cache()->get(i);
- if (!source->IsUndefined()) {
- ExternalAsciiString::cast(source)->update_data_cache();
+ {
+ // Don't GC while deserializing - just expand the heap.
+ AlwaysAllocateScope always_allocate;
+ // Don't use the free lists while deserializing.
+ LinearAllocationScope allocate_linearly;
+ // No active threads.
+ ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
+ // No active handles.
+ ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
+ ASSERT_EQ(NULL, external_reference_decoder_);
+ external_reference_decoder_ = new ExternalReferenceDecoder();
+ isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
+
+ isolate_->heap()->set_native_contexts_list(
+ isolate_->heap()->undefined_value());
+
+ // Update data pointers to the external strings containing natives sources.
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Object* source = isolate_->heap()->natives_source_cache()->get(i);
+ if (!source->IsUndefined()) {
+ ExternalAsciiString::cast(source)->update_data_cache();
+ }
}
}
+
+ // Issue code events for newly deserialized code objects.
+ LOG_CODE_EVENT(isolate_, LogCodeObjects());
+ LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
}
@@ -705,7 +720,17 @@ void Deserializer::DeserializePartial(Object** root) {
if (external_reference_decoder_ == NULL) {
external_reference_decoder_ = new ExternalReferenceDecoder();
}
+
+ // Keep track of the code space start and end pointers in case new
+ // code objects were unserialized
+ OldSpace* code_space = isolate_->heap()->code_space();
+ Address start_address = code_space->top();
VisitPointer(root);
+
+ // There's no code deserialized here. If this assert fires
+ // then that's changed and logging should be added to notify
+ // the profiler et al of the new code.
+ CHECK_EQ(start_address, code_space->top());
}
@@ -841,10 +866,18 @@ void Deserializer::ReadChunk(Object** current,
new_object = HeapObject::FromAddress(object_address); \
} \
} \
- if (within == kFirstInstruction) { \
- Code* new_code_object = reinterpret_cast<Code*>(new_object); \
- new_object = reinterpret_cast<Object*>( \
- new_code_object->instruction_start()); \
+ if (within == kInnerPointer) { \
+ if (space_number != CODE_SPACE || new_object->IsCode()) { \
+ Code* new_code_object = reinterpret_cast<Code*>(new_object); \
+ new_object = reinterpret_cast<Object*>( \
+ new_code_object->instruction_start()); \
+ } else { \
+ ASSERT(space_number == CODE_SPACE || space_number == kLargeCode);\
+ JSGlobalPropertyCell* cell = \
+ JSGlobalPropertyCell::cast(new_object); \
+ new_object = reinterpret_cast<Object*>( \
+ cell->ValueAddress()); \
+ } \
} \
if (how == kFromCode) { \
Address location_of_branch_data = \
@@ -982,11 +1015,13 @@ void Deserializer::ReadChunk(Object** current,
// Deserialize a new object and write a pointer to it to the current
// object.
ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
- // Support for direct instruction pointers in functions
- ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction)
+ // Support for direct instruction pointers in functions. It's an inner
+ // pointer because it points at the entry point, not at the start of the
+ // code object.
+ ONE_PER_CODE_SPACE(kNewObject, kPlain, kInnerPointer)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
- ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
+ ONE_PER_SPACE(kNewObject, kFromCode, kInnerPointer)
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
@@ -1009,16 +1044,16 @@ void Deserializer::ReadChunk(Object** current,
// current allocation point and write a pointer to its first instruction
// to the current code object or the instruction pointer in a function
// object.
- ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
- ALL_SPACES(kBackref, kPlain, kFirstInstruction)
+ ALL_SPACES(kBackref, kFromCode, kInnerPointer)
+ ALL_SPACES(kBackref, kPlain, kInnerPointer)
// Find an already deserialized object using its offset from the start
// and write a pointer to it to the current object.
ALL_SPACES(kFromStart, kPlain, kStartOfObject)
- ALL_SPACES(kFromStart, kPlain, kFirstInstruction)
+ ALL_SPACES(kFromStart, kPlain, kInnerPointer)
// Find an already deserialized code object using its offset from the
// start and write a pointer to its first instruction to the current code
// object.
- ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
+ ALL_SPACES(kFromStart, kFromCode, kInnerPointer)
// Find an object in the roots array and write a pointer to it to the
// current object.
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
@@ -1033,10 +1068,10 @@ void Deserializer::ReadChunk(Object** current,
kUnknownOffsetFromStart)
// Find an code entry in the partial snapshots cache and
// write a pointer to it to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0)
+ CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
CASE_BODY(kPartialSnapshotCache,
kPlain,
- kFirstInstruction,
+ kInnerPointer,
0,
kUnknownOffsetFromStart)
// Find an external reference and write a pointer to it to the current
@@ -1149,22 +1184,6 @@ void StartupSerializer::SerializeStrongReferences() {
void PartialSerializer::Serialize(Object** object) {
this->VisitPointer(object);
- Isolate* isolate = Isolate::Current();
-
- // After we have done the partial serialization the partial snapshot cache
- // will contain some references needed to decode the partial snapshot. We
- // fill it up with undefineds so it has a predictable length so the
- // deserialization code doesn't need to know the length.
- for (int index = isolate->serialize_partial_snapshot_cache_length();
- index < Isolate::kPartialSnapshotCacheCapacity;
- index++) {
- isolate->serialize_partial_snapshot_cache()[index] =
- isolate->heap()->undefined_value();
- startup_serializer_->VisitPointer(
- &isolate->serialize_partial_snapshot_cache()[index]);
- }
- isolate->set_serialize_partial_snapshot_cache_length(
- Isolate::kPartialSnapshotCacheCapacity);
}
@@ -1194,26 +1213,29 @@ void Serializer::VisitPointers(Object** start, Object** end) {
// This ensures that the partial snapshot cache keeps things alive during GC and
// tracks their movement. When it is called during serialization of the startup
-// snapshot the partial snapshot is empty, so nothing happens. When the partial
-// (context) snapshot is created, this array is populated with the pointers that
-// the partial snapshot will need. As that happens we emit serialized objects to
-// the startup snapshot that correspond to the elements of this cache array. On
-// deserialization we therefore need to visit the cache array. This fills it up
-// with pointers to deserialized objects.
+// snapshot nothing happens. When the partial (context) snapshot is created,
+// this array is populated with the pointers that the partial snapshot will
+// need. As that happens we emit serialized objects to the startup snapshot
+// that correspond to the elements of this cache array. On deserialization we
+// therefore need to visit the cache array. This fills it up with pointers to
+// deserialized objects.
void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+ if (Serializer::enabled()) return;
Isolate* isolate = Isolate::Current();
- visitor->VisitPointers(
- isolate->serialize_partial_snapshot_cache(),
- &isolate->serialize_partial_snapshot_cache()[
- isolate->serialize_partial_snapshot_cache_length()]);
-}
-
-
-// When deserializing we need to set the size of the snapshot cache. This means
-// the root iteration code (above) will iterate over array elements, writing the
-// references to deserialized objects in them.
-void SerializerDeserializer::SetSnapshotCacheSize(int size) {
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(size);
+ for (int i = 0; ; i++) {
+ if (isolate->serialize_partial_snapshot_cache_length() <= i) {
+ // Extend the array ready to get a value from the visitor when
+ // deserializing.
+ isolate->PushToPartialSnapshotCache(Smi::FromInt(0));
+ }
+ Object** cache = isolate->serialize_partial_snapshot_cache();
+ visitor->VisitPointers(&cache[i], &cache[i + 1]);
+ // Sentinel is the undefined object, which is a root so it will not normally
+ // be found in the cache.
+ if (cache[i] == isolate->heap()->undefined_value()) {
+ break;
+ }
+ }
}
@@ -1231,14 +1253,11 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
// then visit the pointer so that it becomes part of the startup snapshot
// and we can refer to it from the partial snapshot.
int length = isolate->serialize_partial_snapshot_cache_length();
- CHECK(length < Isolate::kPartialSnapshotCacheCapacity);
- isolate->serialize_partial_snapshot_cache()[length] = heap_object;
- startup_serializer_->VisitPointer(
- &isolate->serialize_partial_snapshot_cache()[length]);
+ isolate->PushToPartialSnapshotCache(heap_object);
+ startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
// We don't recurse from the startup snapshot generator into the partial
// snapshot generator.
- ASSERT(length == isolate->serialize_partial_snapshot_cache_length());
- isolate->set_serialize_partial_snapshot_cache_length(length + 1);
+ ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1);
return length;
}
@@ -1337,12 +1356,14 @@ void StartupSerializer::SerializeObject(
void StartupSerializer::SerializeWeakReferences() {
- for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length();
- i < Isolate::kPartialSnapshotCacheCapacity;
- i++) {
- sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
- sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
- }
+ // This phase comes right after the partial serialization (of the snapshot).
+ // After we have done the partial serialization the partial snapshot cache
+ // will contain some references needed to decode the partial snapshot. We
+ // add one entry with 'undefined' which is the sentinel that the deserializer
+ // uses to know it is done deserializing the array.
+ Isolate* isolate = Isolate::Current();
+ Object* undefined = isolate->heap()->undefined_value();
+ VisitPointer(&undefined);
HEAP->IterateWeakRoots(this, VISIT_ALL);
}
@@ -1557,7 +1578,7 @@ void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
Address target_start = rinfo->target_address_address();
OutputRawData(target_start);
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
+ serializer_->SerializeObject(target, kFromCode, kInnerPointer);
bytes_processed_so_far_ += rinfo->target_address_size();
}
@@ -1565,15 +1586,17 @@ void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
OutputRawData(entry_address);
- serializer_->SerializeObject(target, kPlain, kFirstInstruction);
+ serializer_->SerializeObject(target, kPlain, kInnerPointer);
bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- // We shouldn't have any global property cell references in code
- // objects in the snapshot.
- UNREACHABLE();
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(rinfo->target_cell());
+ OutputRawData(rinfo->pc());
+ serializer_->SerializeObject(cell, kPlain, kInnerPointer);
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index f50e23eac8..8beb88e8b2 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -210,7 +210,6 @@ class SnapshotByteSource {
class SerializerDeserializer: public ObjectVisitor {
public:
static void Iterate(ObjectVisitor* visitor);
- static void SetSnapshotCacheSize(int size);
protected:
// Where the pointed-to object can be found:
@@ -243,7 +242,7 @@ class SerializerDeserializer: public ObjectVisitor {
// Where to point within the object.
enum WhereToPoint {
kStartOfObject = 0,
- kFirstInstruction = 0x80,
+ kInnerPointer = 0x80, // First insn in code object or payload of cell.
kWhereToPointMask = 0x80
};
diff --git a/deps/v8/src/smart-array-pointer.h b/deps/v8/src/smart-pointers.h
index 00721c1a02..345c4d47fb 100644
--- a/deps/v8/src/smart-array-pointer.h
+++ b/deps/v8/src/smart-pointers.h
@@ -25,34 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_SMART_ARRAY_POINTER_H_
-#define V8_SMART_ARRAY_POINTER_H_
+#ifndef V8_SMART_POINTERS_H_
+#define V8_SMART_POINTERS_H_
namespace v8 {
namespace internal {
-// A 'scoped array pointer' that calls DeleteArray on its pointer when the
-// destructor is called.
-template<typename T>
-class SmartArrayPointer {
+template<typename Deallocator, typename T>
+class SmartPointerBase {
public:
// Default constructor. Constructs an empty scoped pointer.
- inline SmartArrayPointer() : p_(NULL) {}
+ inline SmartPointerBase() : p_(NULL) {}
// Constructs a scoped pointer from a plain one.
- explicit inline SmartArrayPointer(T* ptr) : p_(ptr) {}
+ explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
// Copy constructor removes the pointer from the original to avoid double
// freeing.
- inline SmartArrayPointer(const SmartArrayPointer<T>& rhs) : p_(rhs.p_) {
- const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+ inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
+ : p_(rhs.p_) {
+ const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
}
// When the destructor of the scoped pointer is executed the plain pointer
// is deleted using DeleteArray. This implies that you must allocate with
// NewArray.
- inline ~SmartArrayPointer() { if (p_) DeleteArray(p_); }
+ inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
inline T* operator->() const { return p_; }
@@ -81,10 +80,11 @@ class SmartArrayPointer {
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
- inline SmartArrayPointer& operator=(const SmartArrayPointer<T>& rhs) {
+ inline SmartPointerBase<Deallocator, T>& operator=(
+ const SmartPointerBase<Deallocator, T>& rhs) {
ASSERT(is_empty());
T* tmp = rhs.p_; // swap to handle self-assignment
- const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+ const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
p_ = tmp;
return *this;
}
@@ -95,6 +95,45 @@ class SmartArrayPointer {
T* p_;
};
+// A 'scoped array pointer' that calls DeleteArray on its pointer when the
+// destructor is called.
+
+template<typename T>
+struct ArrayDeallocator {
+ static void Delete(T* array) {
+ DeleteArray(array);
+ }
+};
+
+
+template<typename T>
+class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
+ public:
+ inline SmartArrayPointer() { }
+ explicit inline SmartArrayPointer(T* ptr)
+ : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
+ inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
+ : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
+};
+
+
+template<typename T>
+struct ObjectDeallocator {
+ static void Delete(T* array) {
+ Malloced::Delete(array);
+ }
+};
+
+template<typename T>
+class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
+ public:
+ inline SmartPointer() { }
+ explicit inline SmartPointer(T* ptr)
+ : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
+ inline SmartPointer(const SmartPointer<T>& rhs)
+ : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
+};
+
} } // namespace v8::internal
-#endif // V8_SMART_ARRAY_POINTER_H_
+#endif // V8_SMART_POINTERS_H_
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index ef89a5ef70..3a4ac70edf 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -60,6 +60,11 @@ bool Snapshot::Initialize(const char* snapshot_file) {
}
+bool Snapshot::HaveASnapshotToStartFrom() {
+ return size_ != 0;
+}
+
+
Handle<Context> Snapshot::NewContextFromSnapshot() {
if (context_size_ == 0) {
return Handle<Context>();
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index 4f01a2d629..ab4529e517 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -40,6 +40,8 @@ class Snapshot {
// could be found.
static bool Initialize(const char* snapshot_file = NULL);
+ static bool HaveASnapshotToStartFrom();
+
// Create a new context using the internal partial snapshot.
static Handle<Context> NewContextFromSnapshot();
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index a0c8f2cba1..62873fa1cf 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -2027,15 +2027,16 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
+ owner_->heap()->incremental_marking()->OldSpaceStep(
+ size_in_bytes - old_linear_size);
+
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+ reinterpret_cast<Object**>(new_node->address())[i] =
+ Smi::FromInt(kCodeZapValue);
}
#endif
- owner_->heap()->incremental_marking()->OldSpaceStep(
- size_in_bytes - old_linear_size);
-
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index b0ecc5d004..6602c899df 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -2496,6 +2496,10 @@ class LargeObjectSpace : public Space {
return objects_size_;
}
+ intptr_t CommittedMemory() {
+ return Size();
+ }
+
int PageCount() {
return page_count_;
}
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 270fe5a40d..51aa2bb329 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -469,7 +469,7 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
Add("(Function context is outside heap)\n");
return;
}
- Object* token = context->global_context()->security_token();
+ Object* token = context->native_context()->security_token();
if (token != isolate->string_stream_current_security_token()) {
Add("Security context: %o\n", token);
isolate->set_string_stream_current_security_token(token);
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 27948919db..411914719c 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -44,7 +44,7 @@ namespace internal {
StubCache::StubCache(Isolate* isolate, Zone* zone)
- : isolate_(isolate), zone_(zone) {
+ : isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
}
@@ -119,7 +119,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT);
Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -138,7 +138,7 @@ Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
Handle<JSObject> holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -158,7 +158,8 @@ Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
Handle<AccessorInfo> callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -177,7 +178,8 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
Handle<JSObject> holder,
Handle<JSFunction> getter) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -197,7 +199,7 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -215,7 +217,8 @@ Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -240,7 +243,8 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
Handle<JSGlobalPropertyCell> cell,
bool is_dont_delete) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -259,7 +263,8 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
Handle<JSObject> holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -278,8 +283,8 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> holder,
Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
+ Code::CONSTANT_FUNCTION);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -298,7 +303,7 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> holder) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -318,7 +323,7 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<AccessorInfo> callback) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -335,7 +340,7 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
Handle<JSArray> receiver) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -351,7 +356,7 @@ Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
Handle<String> receiver) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Map> map(receiver->map());
Handle<Object> probe(map->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -369,7 +374,7 @@ Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
Handle<String> name,
Handle<JSFunction> receiver) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -387,7 +392,8 @@ Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
int field_index,
Handle<Map> transition,
StrictModeFlag strict_mode) {
- PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
+ Code::StubType type =
+ (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
@@ -415,7 +421,7 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
Code::ComputeMonomorphicFlags(
stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
: Code::KEYED_STORE_IC,
- NORMAL,
+ Code::NORMAL,
extra_state);
Handle<String> name;
switch (stub_kind) {
@@ -483,7 +489,7 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
Handle<JSGlobalPropertyCell> cell,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, NORMAL, strict_mode);
+ Code::STORE_IC, Code::NORMAL, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -498,16 +504,18 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Handle<AccessorInfo> callback,
StrictModeFlag strict_mode) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, CALLBACKS, strict_mode);
+ Code::STORE_IC, Code::CALLBACKS, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreCallback(receiver, callback, name);
+ Handle<Code> code =
+ compiler.CompileStoreCallback(name, receiver, holder, callback);
PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
@@ -517,15 +525,17 @@ Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Handle<JSFunction> setter,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, CALLBACKS, strict_mode);
+ Code::STORE_IC, Code::CALLBACKS, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreViaSetter(receiver, setter, name);
+ Handle<Code> code =
+ compiler.CompileStoreViaSetter(name, receiver, holder, setter);
PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
@@ -537,7 +547,7 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, INTERCEPTOR, strict_mode);
+ Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -554,7 +564,8 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
int field_index,
Handle<Map> transition,
StrictModeFlag strict_mode) {
- PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
+ Code::StubType type =
+ (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
@@ -597,7 +608,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, CONSTANT_FUNCTION, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -635,7 +646,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, FIELD, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -672,7 +683,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, INTERCEPTOR, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -702,7 +713,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
IC::GetCodeCacheForObject(*receiver, *holder);
Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, NORMAL, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -735,7 +746,7 @@ Code* StubCache::FindCallInitialize(int argc,
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
// Use raw_unchecked... so we don't get assert failures during GC.
UnseededNumberDictionary* dictionary =
@@ -756,7 +767,7 @@ Handle<Code> StubCache::ComputeCallInitialize(int argc,
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -785,7 +796,7 @@ Handle<Code> StubCache::ComputeCallPreMonomorphic(
Code::Kind kind,
Code::ExtraICState extra_state) {
Code::Flags flags =
- Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -802,7 +813,7 @@ Handle<Code> StubCache::ComputeCallNormal(int argc,
Code::Kind kind,
Code::ExtraICState extra_state) {
Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, MONOMORPHIC, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -819,7 +830,7 @@ Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
ASSERT(kind == Code::KEYED_CALL_IC);
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -838,7 +849,7 @@ Handle<Code> StubCache::ComputeCallMegamorphic(
Code::ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -858,7 +869,7 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
// and monomorphic stubs are not mixed up together in the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
- NORMAL, argc, OWN_MAP);
+ Code::NORMAL, argc, OWN_MAP);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -878,7 +889,7 @@ Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
// the actual call ic to carry out the work.
Code::Flags flags =
Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -897,7 +908,7 @@ Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
// the actual call ic to carry out the work.
Code::Flags flags =
Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -927,7 +938,8 @@ void StubCache::Clear() {
void StubCache::CollectMatchingMaps(SmallMapList* types,
String* name,
Code::Flags flags,
- Handle<Context> global_context) {
+ Handle<Context> native_context,
+ Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
if (primary_[i].key == name) {
Map* map = primary_[i].value->FindFirstMap();
@@ -937,8 +949,8 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
int offset = PrimaryOffset(name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
- types->Add(Handle<Map>(map), zone());
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->Add(Handle<Map>(map), zone);
}
}
}
@@ -961,8 +973,8 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
// Lookup in secondary table and add matches.
int offset = SecondaryOffset(name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
- types->Add(Handle<Map>(map), zone());
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->Add(Handle<Map>(map), zone);
}
}
}
@@ -993,7 +1005,9 @@ RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return HEAP->undefined_value();
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
@@ -1058,6 +1072,8 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
+ Handle<Object> result = v8::Utils::OpenHandle(*r);
+ result->VerifyApiCallResultType();
return *v8::Utils::OpenHandle(*r);
}
}
@@ -1114,7 +1130,9 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
*attrs = NONE;
- return *v8::Utils::OpenHandle(*r);
+ Handle<Object> result = v8::Utils::OpenHandle(*r);
+ result->VerifyApiCallResultType();
+ return *result;
}
}
@@ -1351,16 +1369,14 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
Handle<String> name,
LookupResult* lookup) {
holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsProperty()) return;
-
- lookup->NotFound();
+ if (lookup->IsFound()) return;
if (holder->GetPrototype()->IsNull()) return;
-
holder->GetPrototype()->Lookup(*name, lookup);
}
-Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
+Handle<Code> LoadStubCompiler::GetCode(Code::StubType type,
+ Handle<String> name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
@@ -1369,7 +1385,7 @@ Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
}
-Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
+Handle<Code> KeyedLoadStubCompiler::GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state) {
Code::Flags flags = Code::ComputeFlags(
@@ -1381,7 +1397,7 @@ Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
}
-Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
+Handle<Code> StoreStubCompiler::GetCode(Code::StubType type,
Handle<String> name) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
@@ -1392,7 +1408,7 @@ Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
}
-Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
+Handle<Code> KeyedStoreStubCompiler::GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state) {
Code::ExtraICState extra_state =
@@ -1470,7 +1486,8 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
}
-Handle<Code> CallStubCompiler::GetCode(PropertyType type, Handle<String> name) {
+Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
+ Handle<String> name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
@@ -1486,7 +1503,7 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
if (function->shared()->name()->IsString()) {
function_name = Handle<String>(String::cast(function->shared()->name()));
}
- return GetCode(CONSTANT_FUNCTION, function_name);
+ return GetCode(Code::CONSTANT_FUNCTION, function_name);
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index cd0414319e..005c537ab1 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -159,11 +159,13 @@ class StubCache {
Handle<Code> ComputeStoreCallback(Handle<String> name,
Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Handle<AccessorInfo> callback,
StrictModeFlag strict_mode);
Handle<Code> ComputeStoreViaSetter(Handle<String> name,
Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Handle<JSFunction> setter,
StrictModeFlag strict_mode);
@@ -260,7 +262,8 @@ class StubCache {
void CollectMatchingMaps(SmallMapList* types,
String* name,
Code::Flags flags,
- Handle<Context> global_context);
+ Handle<Context> native_context,
+ Zone* zone);
// Generate code for probing the stub cache table.
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
@@ -310,7 +313,6 @@ class StubCache {
Isolate* isolate() { return isolate_; }
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
- Zone* zone() const { return zone_; }
private:
StubCache(Isolate* isolate, Zone* zone);
@@ -386,7 +388,6 @@ class StubCache {
Entry primary_[kPrimaryTableSize];
Entry secondary_[kSecondaryTableSize];
Isolate* isolate_;
- Zone* zone_;
friend class Isolate;
friend class SCTableReference;
@@ -550,10 +551,20 @@ class StubCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss);
+ void GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss);
+
void GenerateLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -608,6 +619,9 @@ class LoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
+ static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter);
+
Handle<Code> CompileLoadViaGetter(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
@@ -629,7 +643,7 @@ class LoadStubCompiler: public StubCompiler {
bool is_dont_delete);
private:
- Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<String> name);
};
@@ -677,7 +691,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
- Handle<Code> GetCode(PropertyType type,
+ Handle<Code> GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state = MONOMORPHIC);
};
@@ -694,13 +708,18 @@ class StoreStubCompiler: public StubCompiler {
Handle<Map> transition,
Handle<String> name);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name);
+ Handle<Code> CompileStoreCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback);
- Handle<Code> CompileStoreViaSetter(Handle<JSObject> receiver,
- Handle<JSFunction> setter,
- Handle<String> name);
+ static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<JSFunction> setter);
+
+ Handle<Code> CompileStoreViaSetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter);
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
Handle<String> name);
@@ -710,7 +729,7 @@ class StoreStubCompiler: public StubCompiler {
Handle<String> name);
private:
- Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<String> name);
StrictModeFlag strict_mode_;
};
@@ -751,7 +770,7 @@ class KeyedStoreStubCompiler: public StubCompiler {
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
private:
- Handle<Code> GetCode(PropertyType type,
+ Handle<Code> GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state = MONOMORPHIC);
@@ -831,7 +850,7 @@ class CallStubCompiler: public StubCompiler {
Handle<JSFunction> function,
Handle<String> name);
- Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<String> name);
Handle<Code> GetCode(Handle<JSFunction> function);
const ParameterCount& arguments() { return arguments_; }
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
new file mode 100644
index 0000000000..385bdd1199
--- /dev/null
+++ b/deps/v8/src/transitions-inl.h
@@ -0,0 +1,219 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TRANSITIONS_INL_H_
+#define V8_TRANSITIONS_INL_H_
+
+#include "objects-inl.h"
+#include "transitions.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+#define WRITE_FIELD(p, offset, value) \
+ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ if (heap->InNewSpace(value)) { \
+ heap->RecordWrite(object->address(), offset); \
+ } \
+ }
+
+
+TransitionArray* TransitionArray::cast(Object* object) {
+ ASSERT(object->IsTransitionArray());
+ return reinterpret_cast<TransitionArray*>(object);
+}
+
+
+Map* TransitionArray::elements_transition() {
+ Object* transition_map = get(kElementsTransitionIndex);
+ return Map::cast(transition_map);
+}
+
+
+void TransitionArray::ClearElementsTransition() {
+ WRITE_FIELD(this, kElementsTransitionOffset, Smi::FromInt(0));
+}
+
+
+bool TransitionArray::HasElementsTransition() {
+ return get(kElementsTransitionIndex) != Smi::FromInt(0);
+}
+
+
+void TransitionArray::set_elements_transition(Map* transition_map,
+ WriteBarrierMode mode) {
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kElementsTransitionOffset, transition_map);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kElementsTransitionOffset, transition_map, mode);
+}
+
+
+DescriptorArray* TransitionArray::descriptors() {
+ return DescriptorArray::cast(get(kDescriptorsIndex));
+}
+
+
+void TransitionArray::set_descriptors(DescriptorArray* descriptors,
+ WriteBarrierMode mode) {
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kDescriptorsOffset, descriptors);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kDescriptorsOffset, descriptors, mode);
+}
+
+
+Object** TransitionArray::GetDescriptorsSlot() {
+ return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+ kDescriptorsOffset);
+}
+
+
+Object* TransitionArray::back_pointer_storage() {
+ return get(kBackPointerStorageIndex);
+}
+
+
+void TransitionArray::set_back_pointer_storage(Object* back_pointer,
+ WriteBarrierMode mode) {
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kBackPointerStorageOffset, back_pointer);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kBackPointerStorageOffset, back_pointer, mode);
+}
+
+
+bool TransitionArray::HasPrototypeTransitions() {
+ Object* prototype_transitions = get(kPrototypeTransitionsIndex);
+ return prototype_transitions != Smi::FromInt(0);
+}
+
+
+FixedArray* TransitionArray::GetPrototypeTransitions() {
+ Object* prototype_transitions = get(kPrototypeTransitionsIndex);
+ return FixedArray::cast(prototype_transitions);
+}
+
+
+HeapObject* TransitionArray::UncheckedPrototypeTransitions() {
+ ASSERT(HasPrototypeTransitions());
+ return reinterpret_cast<HeapObject*>(get(kPrototypeTransitionsIndex));
+}
+
+
+void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
+ WriteBarrierMode mode) {
+ ASSERT(this != NULL);
+ ASSERT(transitions->IsFixedArray());
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kPrototypeTransitionsOffset, transitions, mode);
+}
+
+
+Object** TransitionArray::GetPrototypeTransitionsSlot() {
+ return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+ kPrototypeTransitionsOffset);
+}
+
+
+Object** TransitionArray::GetKeySlot(int transition_number) {
+ ASSERT(transition_number < number_of_transitions());
+ return HeapObject::RawField(
+ reinterpret_cast<HeapObject*>(this),
+ OffsetOfElementAt(ToKeyIndex(transition_number)));
+}
+
+
+String* TransitionArray::GetKey(int transition_number) {
+ ASSERT(transition_number < number_of_transitions());
+ return String::cast(get(ToKeyIndex(transition_number)));
+}
+
+
+void TransitionArray::SetKey(int transition_number, String* key) {
+ ASSERT(transition_number < number_of_transitions());
+ set(ToKeyIndex(transition_number), key);
+}
+
+
+Map* TransitionArray::GetTarget(int transition_number) {
+ ASSERT(transition_number < number_of_transitions());
+ return Map::cast(get(ToTargetIndex(transition_number)));
+}
+
+
+void TransitionArray::SetTarget(int transition_number, Map* value) {
+ ASSERT(transition_number < number_of_transitions());
+ set(ToTargetIndex(transition_number), value);
+}
+
+
+PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
+ Map* map = GetTarget(transition_number);
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int descriptor = map->LastAdded();
+ return descriptors->GetDetails(descriptor);
+}
+
+
+int TransitionArray::Search(String* name) {
+ return internal::Search(this, name);
+}
+
+
+void TransitionArray::Set(int transition_number,
+ String* key,
+ Map* target,
+ const WhitenessWitness&) {
+ NoIncrementalWriteBarrierSet(this,
+ ToKeyIndex(transition_number),
+ key);
+ NoIncrementalWriteBarrierSet(this,
+ ToTargetIndex(transition_number),
+ target);
+}
+
+
+#undef FIELD_ADDR
+#undef WRITE_FIELD
+#undef CONDITIONAL_WRITE_BARRIER
+
+
+} } // namespace v8::internal
+
+#endif // V8_TRANSITIONS_INL_H_
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
new file mode 100644
index 0000000000..6f8b2fec5a
--- /dev/null
+++ b/deps/v8/src/transitions.cc
@@ -0,0 +1,128 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "objects.h"
+#include "transitions-inl.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+
+MaybeObject* TransitionArray::Allocate(int number_of_transitions) {
+ Heap* heap = Isolate::Current()->heap();
+ // Use FixedArray to not use DescriptorArray::cast on incomplete object.
+ FixedArray* array;
+ MaybeObject* maybe_array =
+ heap->AllocateFixedArray(ToKeyIndex(number_of_transitions));
+ if (!maybe_array->To(&array)) return maybe_array;
+
+ array->set(kElementsTransitionIndex, Smi::FromInt(0));
+ array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
+ return array;
+}
+
+
+void TransitionArray::CopyFrom(TransitionArray* origin,
+ int origin_transition,
+ int target_transition,
+ const WhitenessWitness& witness) {
+ Set(target_transition,
+ origin->GetKey(origin_transition),
+ origin->GetTarget(origin_transition),
+ witness);
+}
+
+
+static bool InsertionPointFound(String* key1, String* key2) {
+ return key1->Hash() > key2->Hash();
+}
+
+
+MaybeObject* TransitionArray::NewWith(String* name, Map* target) {
+ TransitionArray* result;
+
+ MaybeObject* maybe_array = TransitionArray::Allocate(1);
+ if (!maybe_array->To(&result)) return maybe_array;
+
+ FixedArray::WhitenessWitness witness(result);
+
+ result->Set(0, name, target, witness);
+ return result;
+}
+
+
+MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) {
+ TransitionArray* result;
+
+ int number_of_transitions = this->number_of_transitions();
+ int new_size = number_of_transitions;
+
+ int insertion_index = this->Search(name);
+ if (insertion_index == kNotFound) ++new_size;
+
+ MaybeObject* maybe_array;
+ maybe_array = TransitionArray::Allocate(new_size);
+ if (!maybe_array->To(&result)) return maybe_array;
+
+ if (HasElementsTransition()) {
+ result->set_elements_transition(elements_transition());
+ }
+
+ if (HasPrototypeTransitions()) {
+ result->SetPrototypeTransitions(GetPrototypeTransitions());
+ }
+
+ FixedArray::WhitenessWitness witness(result);
+
+ if (insertion_index != kNotFound) {
+ for (int i = 0; i < number_of_transitions; ++i) {
+ if (i != insertion_index) result->CopyFrom(this, i, i, witness);
+ }
+ result->Set(insertion_index, name, target, witness);
+ return result;
+ }
+
+ insertion_index = 0;
+ for (; insertion_index < number_of_transitions; ++insertion_index) {
+ if (InsertionPointFound(GetKey(insertion_index), name)) break;
+ result->CopyFrom(this, insertion_index, insertion_index, witness);
+ }
+
+ result->Set(insertion_index, name, target, witness);
+
+ for (; insertion_index < number_of_transitions; ++insertion_index) {
+ result->CopyFrom(this, insertion_index, insertion_index + 1, witness);
+ }
+
+ return result;
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
new file mode 100644
index 0000000000..63e52badcd
--- /dev/null
+++ b/deps/v8/src/transitions.h
@@ -0,0 +1,190 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TRANSITIONS_H_
+#define V8_TRANSITIONS_H_
+
+#include "elements-kind.h"
+#include "heap.h"
+#include "isolate.h"
+#include "objects.h"
+#include "v8checks.h"
+
+namespace v8 {
+namespace internal {
+
+
+// TransitionArrays are fixed arrays used to hold map transitions for property,
+// constant, and element changes.
+// The format of the these objects is:
+// [0] Descriptor array
+// [1] Undefined or back pointer map
+// [2] Smi(0) or elements transition map
+// [3] Smi(0) or fixed array of prototype transitions
+// [4] First transition
+// [length() - kTransitionSize] Last transition
+class TransitionArray: public FixedArray {
+ public:
+ // Accessors for fetching instance transition at transition number.
+ inline String* GetKey(int transition_number);
+ inline void SetKey(int transition_number, String* value);
+ inline Object** GetKeySlot(int transition_number);
+ int GetSortedKeyIndex(int transition_number) { return transition_number; }
+
+ String* GetSortedKey(int transition_number) {
+ return GetKey(transition_number);
+ }
+
+ inline Map* GetTarget(int transition_number);
+ inline void SetTarget(int transition_number, Map* target);
+
+ inline PropertyDetails GetTargetDetails(int transition_number);
+
+ inline Map* elements_transition();
+ inline void set_elements_transition(
+ Map* target,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline bool HasElementsTransition();
+ inline void ClearElementsTransition();
+
+ inline DescriptorArray* descriptors();
+ inline void set_descriptors(DescriptorArray* descriptors,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline Object** GetDescriptorsSlot();
+
+ inline Object* back_pointer_storage();
+ inline void set_back_pointer_storage(
+ Object* back_pointer,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ inline FixedArray* GetPrototypeTransitions();
+ inline void SetPrototypeTransitions(
+ FixedArray* prototype_transitions,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline Object** GetPrototypeTransitionsSlot();
+ inline bool HasPrototypeTransitions();
+ inline HeapObject* UncheckedPrototypeTransitions();
+
+ // Returns the number of transitions in the array.
+ int number_of_transitions() {
+ ASSERT(length() >= kFirstIndex);
+ int len = length();
+ return len <= kFirstIndex ? 0 : (len - kFirstIndex) / kTransitionSize;
+ }
+
+ inline int number_of_entries() { return number_of_transitions(); }
+
+ // Allocate a new transition array with a single entry.
+ static MUST_USE_RESULT MaybeObject* NewWith(String* name, Map* target);
+
+ // Copy the transition array, inserting a new transition.
+ // TODO(verwaest): This should not cause an existing transition to be
+ // overwritten.
+ MUST_USE_RESULT MaybeObject* CopyInsert(String* name, Map* target);
+
+ // Copy a single transition from the origin array.
+ inline void CopyFrom(TransitionArray* origin,
+ int origin_transition,
+ int target_transition,
+ const WhitenessWitness& witness);
+
+ // Search a transition for a given property name.
+ inline int Search(String* name);
+
+ // Allocates a TransitionArray.
+ MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
+
+ // Casting.
+ static inline TransitionArray* cast(Object* obj);
+
+ // Constant for denoting key was not found.
+ static const int kNotFound = -1;
+
+ static const int kDescriptorsIndex = 0;
+ static const int kBackPointerStorageIndex = 1;
+ static const int kElementsTransitionIndex = 2;
+ static const int kPrototypeTransitionsIndex = 3;
+ static const int kFirstIndex = 4;
+
+ // Layout transition array header.
+ static const int kDescriptorsOffset = FixedArray::kHeaderSize;
+ static const int kBackPointerStorageOffset = kDescriptorsOffset +
+ kPointerSize;
+ static const int kElementsTransitionOffset = kBackPointerStorageOffset +
+ kPointerSize;
+ static const int kPrototypeTransitionsOffset = kElementsTransitionOffset +
+ kPointerSize;
+
+ // Layout of map transition.
+ static const int kTransitionKey = 0;
+ static const int kTransitionTarget = 1;
+ static const int kTransitionSize = 2;
+
+#ifdef OBJECT_PRINT
+ // Print all the transitions.
+ inline void PrintTransitions() {
+ PrintTransitions(stdout);
+ }
+ void PrintTransitions(FILE* out);
+#endif
+
+#ifdef DEBUG
+ bool IsSortedNoDuplicates();
+ bool IsConsistentWithBackPointers(Map* current_map);
+ bool IsEqualTo(TransitionArray* other);
+#endif
+
+ // The maximum number of transitions we want in a transition array (should
+ // fit in a page).
+ static const int kMaxNumberOfTransitions = 1024 + 512;
+
+ private:
+ // Conversion from transition number to array indices.
+ static int ToKeyIndex(int transition_number) {
+ return kFirstIndex +
+ (transition_number * kTransitionSize) +
+ kTransitionKey;
+ }
+
+ static int ToTargetIndex(int transition_number) {
+ return kFirstIndex +
+ (transition_number * kTransitionSize) +
+ kTransitionTarget;
+ }
+
+ inline void Set(int transition_number,
+ String* key,
+ Map* target,
+ const WhitenessWitness&);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_TRANSITIONS_H_
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index f5e9106742..bc6a46b4b6 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -60,10 +60,10 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context,
+ Handle<Context> native_context,
Isolate* isolate,
Zone* zone) {
- global_context_ = global_context;
+ native_context_ = native_context;
isolate_ = isolate;
zone_ = zone;
BuildDictionary(code);
@@ -71,8 +71,13 @@ TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
}
-Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
- int entry = dictionary_->FindEntry(ast_id);
+static uint32_t IdToKey(TypeFeedbackId ast_id) {
+ return static_cast<uint32_t>(ast_id.ToInt());
+}
+
+
+Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
+ int entry = dictionary_->FindEntry(IdToKey(ast_id));
return entry != UnseededNumberDictionary::kNotFound
? Handle<Object>(dictionary_->ValueAt(entry))
: Handle<Object>::cast(isolate_->factory()->undefined_value());
@@ -80,7 +85,7 @@ Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsMap()) return false;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -91,22 +96,23 @@ bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_keyed_load_stub() &&
+ bool preliminary_checks = code->is_keyed_load_stub() &&
code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
- code->FindFirstMap() != NULL &&
- !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
+ Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
+ if (!preliminary_checks) return false;
+ Map* map = code->FindFirstMap();
+ return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
}
bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Builtins* builtins = isolate_->builtins();
@@ -118,27 +124,29 @@ bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
}
-bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
bool allow_growth =
Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
ALLOW_JSARRAY_GROWTH;
- return code->is_keyed_store_stub() &&
+ bool preliminary_checks =
+ code->is_keyed_store_stub() &&
!allow_growth &&
code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
- code->FindFirstMap() != NULL &&
- !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
+ Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
+ if (!preliminary_checks) return false;
+ Map* map = code->FindFirstMap();
+ return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
}
-bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Builtins* builtins = isolate_->builtins();
@@ -156,26 +164,26 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetInfo(expr->id());
+ Handle<Object> value = GetInfo(expr->CallFeedbackId());
return value->IsMap() || value->IsSmi() || value->IsJSFunction();
}
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> value = GetInfo(expr->id());
+ Handle<Object> value = GetInfo(expr->CallNewFeedbackId());
return value->IsJSFunction();
}
bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
ObjectLiteral::Property* prop) {
- Handle<Object> map_or_code = GetInfo(prop->key()->id());
+ Handle<Object> map_or_code = GetInfo(prop->key()->LiteralFeedbackId());
return map_or_code->IsMap();
}
bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
- Handle<Object> value = GetInfo(stmt->PrepareId());
+ Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
return value->IsSmi() &&
Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
}
@@ -183,12 +191,12 @@ bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Map* first_map = code->FindFirstMap();
ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *global_context_)
+ return CanRetainOtherContext(first_map, *native_context_)
? Handle<Map>::null()
: Handle<Map>(first_map);
}
@@ -196,14 +204,15 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
}
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
- ASSERT(StoreIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->id());
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
+ TypeFeedbackId ast_id) {
+ ASSERT(StoreIsMonomorphicNormal(ast_id));
+ Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Map* first_map = code->FindFirstMap();
ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *global_context_)
+ return CanRetainOtherContext(first_map, *native_context_)
? Handle<Map>::null()
: Handle<Map>(first_map);
}
@@ -214,16 +223,18 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- CollectReceiverTypes(expr->id(), name, flags, types);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
+ CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
}
void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
- CollectReceiverTypes(expr->id(), name, flags, types);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::STORE_IC, Code::NORMAL);
+ CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
}
@@ -239,16 +250,16 @@ void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- NORMAL,
+ Code::NORMAL,
extra_ic_state,
OWN_MAP,
arity);
- CollectReceiverTypes(expr->id(), name, flags, types);
+ CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
}
CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetInfo(expr->id());
+ Handle<Object> value = GetInfo(expr->CallFeedbackId());
if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
ASSERT(check != RECEIVER_MAP_CHECK);
@@ -264,13 +275,13 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
UNREACHABLE();
break;
case STRING_CHECK:
- function = global_context_->string_function();
+ function = native_context_->string_function();
break;
case NUMBER_CHECK:
- function = global_context_->number_function();
+ function = native_context_->number_function();
break;
case BOOLEAN_CHECK:
- function = global_context_->boolean_function();
+ function = native_context_->boolean_function();
break;
}
ASSERT(function != NULL);
@@ -279,30 +290,30 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->id()));
+ return Handle<JSFunction>::cast(GetInfo(expr->CallFeedbackId()));
}
Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->id()));
+ return Handle<JSFunction>::cast(GetInfo(expr->CallNewFeedbackId()));
}
Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
ObjectLiteral::Property* prop) {
ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
- return Handle<Map>::cast(GetInfo(prop->key()->id()));
+ return Handle<Map>::cast(GetInfo(prop->key()->LiteralFeedbackId()));
}
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->id()) ==
+ return *GetInfo(expr->PropertyFeedbackId()) ==
isolate_->builtins()->builtin(id);
}
TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -332,7 +343,7 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return false;
@@ -342,7 +353,7 @@ bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
@@ -352,14 +363,14 @@ Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
}
Map* first_map = code->FindFirstMap();
ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *global_context_)
+ return CanRetainOtherContext(first_map, *native_context_)
? Handle<Map>::null()
: Handle<Map>(first_map);
}
TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->UnaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -378,7 +389,7 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -462,7 +473,7 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
- Handle<Object> object = GetInfo(expr->CountId());
+ Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -490,7 +501,7 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
}
-void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
+void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types) {
@@ -511,26 +522,27 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
isolate_->stub_cache()->CollectMatchingMaps(types,
*name,
flags,
- global_context_);
+ native_context_,
+ zone());
}
}
-// Check if a map originates from a given global context. We use this
+// Check if a map originates from a given native context. We use this
// information to filter out maps from different context to avoid
// retaining objects from different tabs in Chrome via optimized code.
bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
- Context* global_context) {
+ Context* native_context) {
Object* constructor = NULL;
while (!map->prototype()->IsNull()) {
constructor = map->constructor();
if (!constructor->IsNull()) {
// If the constructor is not null or a JSFunction, we have to
- // conservatively assume that it may retain a global context.
+ // conservatively assume that it may retain a native context.
if (!constructor->IsJSFunction()) return true;
// Check if the constructor directly references a foreign context.
if (CanRetainOtherContext(JSFunction::cast(constructor),
- global_context)) {
+ native_context)) {
return true;
}
}
@@ -539,14 +551,14 @@ bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
constructor = map->constructor();
if (constructor->IsNull()) return false;
JSFunction* function = JSFunction::cast(constructor);
- return CanRetainOtherContext(function, global_context);
+ return CanRetainOtherContext(function, native_context);
}
bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
- Context* global_context) {
- return function->context()->global() != global_context->global()
- && function->context()->global() != global_context->builtins();
+ Context* native_context) {
+ return function->context()->global_object() != native_context->global_object()
+ && function->context()->global_object() != native_context->builtins();
}
@@ -559,7 +571,7 @@ static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
}
-void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
+void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
if (!object->IsCode()) return;
@@ -573,7 +585,7 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
Object* object = info->target_object();
if (object->IsMap()) {
Map* map = Map::cast(object);
- if (!CanRetainOtherContext(map, *global_context_)) {
+ if (!CanRetainOtherContext(map, *native_context_)) {
AddMapIfMissing(Handle<Map>(map), types, zone());
}
}
@@ -582,7 +594,7 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
}
-byte TypeFeedbackOracle::ToBooleanTypes(unsigned ast_id) {
+byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) {
Handle<Object> object = GetInfo(ast_id);
return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
}
@@ -643,7 +655,8 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
for (int i = 0; i < infos->length(); i++) {
RelocInfo reloc_entry = (*infos)[i];
Address target_address = reloc_entry.target_address();
- unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
+ TypeFeedbackId ast_id =
+ TypeFeedbackId(static_cast<unsigned>((*infos)[i].data()));
Code* target = Code::GetCodeFromTargetAddress(target_address);
switch (target->kind()) {
case Code::LOAD_IC:
@@ -659,7 +672,7 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
if (map == NULL) {
SetInfo(ast_id, static_cast<Object*>(target));
} else if (!CanRetainOtherContext(Map::cast(map),
- *global_context_)) {
+ *native_context_)) {
SetInfo(ast_id, map);
}
}
@@ -696,21 +709,22 @@ void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
Handle<TypeFeedbackCells> cache(
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
for (int i = 0; i < cache->CellCount(); i++) {
- unsigned ast_id = cache->AstId(i)->value();
+ TypeFeedbackId ast_id = cache->AstId(i);
Object* value = cache->Cell(i)->value();
if (value->IsSmi() ||
(value->IsJSFunction() &&
!CanRetainOtherContext(JSFunction::cast(value),
- *global_context_))) {
+ *native_context_))) {
SetInfo(ast_id, value);
}
}
}
-void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
- ASSERT(dictionary_->FindEntry(ast_id) == UnseededNumberDictionary::kNotFound);
- MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
+void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
+ ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
+ UnseededNumberDictionary::kNotFound);
+ MaybeObject* maybe_result = dictionary_->AtNumberPut(IdToKey(ast_id), target);
USE(maybe_result);
#ifdef DEBUG
Object* result = NULL;
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 74910cd1a7..00d88c2afc 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -232,18 +232,18 @@ class UnaryOperation;
class ForInStatement;
-class TypeFeedbackOracle BASE_EMBEDDED {
+class TypeFeedbackOracle: public ZoneObject {
public:
TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context,
+ Handle<Context> native_context,
Isolate* isolate,
Zone* zone);
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
bool LoadIsMegamorphicWithTypeInfo(Property* expr);
- bool StoreIsMonomorphicNormal(Expression* expr);
- bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
+ bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
+ bool StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
@@ -251,7 +251,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
bool IsForInFastCase(ForInStatement* expr);
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
+ Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId ast_id);
void LoadReceiverTypes(Property* expr,
Handle<String> name,
@@ -263,12 +263,12 @@ class TypeFeedbackOracle BASE_EMBEDDED {
Handle<String> name,
CallKind call_kind,
SmallMapList* types);
- void CollectKeyedReceiverTypes(unsigned ast_id,
+ void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types);
- static bool CanRetainOtherContext(Map* map, Context* global_context);
+ static bool CanRetainOtherContext(Map* map, Context* native_context);
static bool CanRetainOtherContext(JSFunction* function,
- Context* global_context);
+ Context* native_context);
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
@@ -283,7 +283,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cylces in our headers. Death to tons of implementations in
// headers!! :-P
- byte ToBooleanTypes(unsigned ast_id);
+ byte ToBooleanTypes(TypeFeedbackId ast_id);
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
@@ -297,12 +297,12 @@ class TypeFeedbackOracle BASE_EMBEDDED {
Zone* zone() const { return zone_; }
private:
- void CollectReceiverTypes(unsigned ast_id,
+ void CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types);
- void SetInfo(unsigned ast_id, Object* target);
+ void SetInfo(TypeFeedbackId ast_id, Object* target);
void BuildDictionary(Handle<Code> code);
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
@@ -315,9 +315,9 @@ class TypeFeedbackOracle BASE_EMBEDDED {
// Returns an element from the backing store. Returns undefined if
// there is no information.
- Handle<Object> GetInfo(unsigned ast_id);
+ Handle<Object> GetInfo(TypeFeedbackId ast_id);
- Handle<Context> global_context_;
+ Handle<Context> native_context_;
Isolate* isolate_;
Handle<UnseededNumberDictionary> dictionary_;
Zone* zone_;
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index 9c0ebf9e1b..ec9c69f8da 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -29,6 +29,7 @@
#define V8_UNICODE_INL_H_
#include "unicode.h"
+#include "checks.h"
namespace unibrow {
@@ -144,6 +145,7 @@ uchar CharacterStream::GetNext() {
} else {
remaining_--;
}
+ ASSERT(BoundsCheck(cursor_));
return result;
}
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 94ab1b4c1e..91b16c9f35 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -201,6 +201,7 @@ class CharacterStream {
protected:
virtual void FillBuffer() = 0;
+ virtual bool BoundsCheck(unsigned offset) = 0;
// The number of characters left in the current buffer
unsigned remaining_;
// The current offset within the buffer
@@ -228,6 +229,9 @@ class InputBuffer : public CharacterStream {
InputBuffer() { }
explicit InputBuffer(Input input) { Reset(input); }
virtual void FillBuffer();
+ virtual bool BoundsCheck(unsigned offset) {
+ return (buffer_ != util_buffer_) || (offset < kSize);
+ }
// A custom offset that can be used by the string implementation to
// mark progress within the encoded string.
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index f116c14db3..dc3a171c8d 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -248,6 +248,7 @@ class BitField {
// bitfield without compiler warnings we have to compute 2^32 without
// using a shift count of 32.
static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
+ static const uint32_t kShift = shift;
// Value for the field with all bits set.
static const T kMax = static_cast<T>((1U << size) - 1);
@@ -980,6 +981,52 @@ class EnumSet {
T bits_;
};
+
+class TypeFeedbackId {
+ public:
+ explicit TypeFeedbackId(int id) : id_(id) { }
+ int ToInt() const { return id_; }
+
+ static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
+ bool IsNone() const { return id_ == kNoneId; }
+
+ private:
+ static const int kNoneId = -1;
+
+ int id_;
+};
+
+
+class BailoutId {
+ public:
+ explicit BailoutId(int id) : id_(id) { }
+ int ToInt() const { return id_; }
+
+ static BailoutId None() { return BailoutId(kNoneId); }
+ static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
+ static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
+ static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+
+ bool IsNone() const { return id_ == kNoneId; }
+ bool operator==(const BailoutId& other) const { return id_ == other.id_; }
+
+ private:
+ static const int kNoneId = -1;
+
+ // Using 0 could disguise errors.
+ static const int kFunctionEntryId = 2;
+
+ // This AST id identifies the point after the declarations have been visited.
+ // We need it to capture the environment effects of declarations that emit
+ // code (function declarations).
+ static const int kDeclarationsId = 3;
+
+ // Ever FunctionState starts with this id.
+ static const int kFirstUsableId = 4;
+
+ int id_;
+};
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index c6aa9cb7f8..3f83dffcae 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,11 +34,23 @@ namespace internal {
Counters::Counters() {
#define HT(name, caption) \
- HistogramTimer name = { #caption, NULL, false, 0, 0 }; \
+ HistogramTimer name = { {#caption, 0, 10000, 50, NULL, false}, 0, 0 }; \
name##_ = name;
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HP(name, caption) \
+ Histogram name = { #caption, 0, 101, 100, NULL, false }; \
+ name##_ = name;
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram name = { #caption, 1000, 500000, 50, NULL, false }; \
+ name##_ = name;
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
#define SC(name, caption) \
StatsCounter name = { "c:" #caption, NULL, false };\
name##_ = name;
@@ -47,6 +59,34 @@ Counters::Counters() {
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name) \
+ StatsCounter count_of_##name = { "c:" "V8.CountOf_" #name, NULL, false };\
+ count_of_##name##_ = count_of_##name; \
+ StatsCounter size_of_##name = { "c:" "V8.SizeOf_" #name, NULL, false };\
+ size_of_##name##_ = size_of_##name;
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter count_of_CODE_TYPE_##name = { \
+ "c:" "V8.CountOf_CODE_TYPE-" #name, NULL, false }; \
+ count_of_CODE_TYPE_##name##_ = count_of_CODE_TYPE_##name; \
+ StatsCounter size_of_CODE_TYPE_##name = { \
+ "c:" "V8.SizeOf_CODE_TYPE-" #name, NULL, false }; \
+ size_of_CODE_TYPE_##name##_ = size_of_CODE_TYPE_##name;
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter count_of_FIXED_ARRAY_##name = { \
+ "c:" "V8.CountOf_FIXED_ARRAY-" #name, NULL, false }; \
+ count_of_FIXED_ARRAY_##name##_ = count_of_FIXED_ARRAY_##name; \
+ StatsCounter size_of_FIXED_ARRAY_##name = { \
+ "c:" "V8.SizeOf_FIXED_ARRAY-" #name, NULL, false }; \
+ size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
StatsCounter state_counters[] = {
#define COUNTER_NAME(name) \
{ "c:V8.State" #name, NULL, false },
@@ -59,4 +99,18 @@ Counters::Counters() {
}
}
+void Counters::ResetHistograms() {
+#define HT(name, caption) name##_.Reset();
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define HP(name, caption) name##_.Reset();
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) name##_.Reset();
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 6db9c77edc..fad3454812 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "counters.h"
+#include "objects.h"
#include "v8globals.h"
namespace v8 {
@@ -50,6 +51,36 @@ namespace internal {
HT(compile_lazy, V8.CompileLazy)
+#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ HP(external_fragmentation_total, \
+ V8.MemoryExternalFragmentationTotal) \
+ HP(external_fragmentation_old_pointer_space, \
+ V8.MemoryExternalFragmentationOldPointerSpace) \
+ HP(external_fragmentation_old_data_space, \
+ V8.MemoryExternalFragmentationOldDataSpace) \
+ HP(external_fragmentation_code_space, \
+ V8.MemoryExternalFragmentationCodeSpace) \
+ HP(external_fragmentation_map_space, \
+ V8.MemoryExternalFragmentationMapSpace) \
+ HP(external_fragmentation_cell_space, \
+ V8.MemoryExternalFragmentationCellSpace) \
+ HP(external_fragmentation_lo_space, \
+ V8.MemoryExternalFragmentationLoSpace) \
+ HP(heap_fraction_map_space, \
+ V8.MemoryHeapFractionMapSpace) \
+ HP(heap_fraction_cell_space, \
+ V8.MemoryHeapFractionCellSpace) \
+
+
+#define HISTOGRAM_MEMORY_LIST(HM) \
+ HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
+ HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \
+ HM(heap_sample_map_space_committed, \
+ V8.MemoryHeapSampleMapSpaceCommitted) \
+ HM(heap_sample_cell_space_committed, \
+ V8.MemoryHeapSampleCellSpaceCommitted)
+
+
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
// lines) rather than one macro (of length about 80 lines) to work around
@@ -210,6 +241,9 @@ namespace internal {
SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
+ SC(fast_new_closure_total, V8.FastNewClosureTotal) \
+ SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
+ SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
SC(string_add_runtime, V8.StringAddRuntime) \
SC(string_add_native, V8.StringAddNative) \
SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
@@ -240,14 +274,33 @@ namespace internal {
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(other_ticks, V8.OtherTicks) \
- SC(js_opt_ticks, V8.JsOptTicks) \
- SC(js_non_opt_ticks, V8.JsNonoptTicks) \
- SC(js_other_ticks, V8.JsOtherTicks) \
SC(smi_checks_removed, V8.SmiChecksRemoved) \
SC(map_checks_removed, V8.MapChecksRemoved) \
SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)
+ SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount) \
+ SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
+ SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
+ SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
+ SC(old_pointer_space_bytes_available, \
+ V8.MemoryOldPointerSpaceBytesAvailable) \
+ SC(old_pointer_space_bytes_committed, \
+ V8.MemoryOldPointerSpaceBytesCommitted) \
+ SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
+ SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
+ SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
+ SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
+ SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
+ SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
+ SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
+ SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
+ SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
+ SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
+ SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
+ SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
+ SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
+ SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
+ SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
+ SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
// This file contains all the v8 counters that are in use.
@@ -258,20 +311,69 @@ class Counters {
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HP(name, caption) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
#define SC(name, caption) \
StatsCounter* name() { return &name##_; }
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name) \
+ StatsCounter* count_of_##name() { return &count_of_##name##_; } \
+ StatsCounter* size_of_##name() { return &size_of_##name##_; }
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_CODE_TYPE_##name() \
+ { return &count_of_CODE_TYPE_##name##_; } \
+ StatsCounter* size_of_CODE_TYPE_##name() \
+ { return &size_of_CODE_TYPE_##name##_; }
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_FIXED_ARRAY_##name() \
+ { return &count_of_FIXED_ARRAY_##name##_; } \
+ StatsCounter* size_of_FIXED_ARRAY_##name() \
+ { return &size_of_FIXED_ARRAY_##name##_; }
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
enum Id {
#define RATE_ID(name, caption) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
#undef RATE_ID
+#define PERCENTAGE_ID(name, caption) k_##name,
+ HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID)
+#undef PERCENTAGE_ID
+#define MEMORY_ID(name, caption) k_##name,
+ HISTOGRAM_MEMORY_LIST(MEMORY_ID)
+#undef MEMORY_ID
#define COUNTER_ID(name, caption) k_##name,
STATS_COUNTER_LIST_1(COUNTER_ID)
STATS_COUNTER_LIST_2(COUNTER_ID)
#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
+ INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \
+ kSizeOfCODE_TYPE_##name,
+ CODE_KIND_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
+ kSizeOfFIXED_ARRAY__##name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
#define COUNTER_ID(name) k_##name,
STATE_TAG_LIST(COUNTER_ID)
#undef COUNTER_ID
@@ -282,18 +384,48 @@ class Counters {
return &state_counters_[state];
}
+ void ResetHistograms();
+
private:
#define HT(name, caption) \
HistogramTimer name##_;
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HP(name, caption) \
+ Histogram name##_;
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram name##_;
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
#define SC(name, caption) \
StatsCounter name##_;
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name) \
+ StatsCounter size_of_##name##_; \
+ StatsCounter count_of_##name##_;
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_CODE_TYPE_##name##_; \
+ StatsCounter count_of_CODE_TYPE_##name##_;
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_FIXED_ARRAY_##name##_; \
+ StatsCounter count_of_FIXED_ARRAY_##name##_;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
enum {
#define COUNTER_ID(name) __##name,
STATE_TAG_LIST(COUNTER_ID)
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 2910a0700d..2407037b32 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -106,13 +106,16 @@ void V8::TearDown() {
if (!has_been_set_up_ || has_been_disposed_) return;
+ // The isolate has to be torn down before clearing the LOperand
+ // caches so that the optimizing compiler thread (if running)
+ // doesn't see an inconsistent view of the lithium instructions.
+ isolate->TearDown();
+ delete isolate;
+
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
RegisteredExtension::UnregisterAll();
- isolate->TearDown();
- delete isolate;
-
is_running_ = false;
has_been_disposed_ = true;
@@ -166,7 +169,7 @@ void V8::SetReturnAddressLocationResolver(
// Used by JavaScript APIs
uint32_t V8::Random(Context* context) {
- ASSERT(context->IsGlobalContext());
+ ASSERT(context->IsNativeContext());
ByteArray* seed = context->random_seed();
return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
}
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 6a1766a1a5..3d214f8dd3 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -94,6 +94,7 @@ const uint32_t kDebugZapValue = 0xbadbaddb;
const uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif
+const int kCodeZapValue = 0xbadc0de;
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
@@ -126,6 +127,7 @@ class Debugger;
class DebugInfo;
class Descriptor;
class DescriptorArray;
+class TransitionArray;
class ExternalReference;
class FixedArray;
class FunctionTemplateInfo;
@@ -311,14 +313,6 @@ typedef void (*StoreBufferCallback)(Heap* heap,
StoreBufferEvent event);
-// Whether to remove map transitions and constant transitions from a
-// DescriptorArray.
-enum TransitionFlag {
- REMOVE_TRANSITIONS,
- KEEP_TRANSITIONS
-};
-
-
// Union used for fast testing of specific double values.
union DoubleRepresentation {
double value;
@@ -366,11 +360,12 @@ struct AccessorDescriptor {
// VMState object leaves a state by popping the current state from the
// stack.
-#define STATE_TAG_LIST(V) \
- V(JS) \
- V(GC) \
- V(COMPILER) \
- V(OTHER) \
+#define STATE_TAG_LIST(V) \
+ V(JS) \
+ V(GC) \
+ V(COMPILER) \
+ V(PARALLEL_COMPILER_PROLOGUE) \
+ V(OTHER) \
V(EXTERNAL)
enum StateTag {
@@ -442,6 +437,7 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
+ VFP2 = 3, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
@@ -483,16 +479,17 @@ const uint64_t kLastNonNaNInt64 =
(static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
+// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode {
// User declared variables:
VAR, // declared via 'var', and 'function' declarations
CONST, // declared via 'const' declarations
- CONST_HARMONY, // declared via 'const' declarations in harmony mode
-
LET, // declared via 'let' declarations
+ CONST_HARMONY, // declared via 'const' declarations in harmony mode
+
// Variables introduced by the compiler:
DYNAMIC, // always require dynamic lookup (we don't know
// the declaration)
@@ -514,6 +511,26 @@ enum VariableMode {
};
+inline bool IsDynamicVariableMode(VariableMode mode) {
+ return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
+}
+
+
+inline bool IsDeclaredVariableMode(VariableMode mode) {
+ return mode >= VAR && mode <= CONST_HARMONY;
+}
+
+
+inline bool IsLexicalVariableMode(VariableMode mode) {
+ return mode >= LET && mode <= CONST_HARMONY;
+}
+
+
+inline bool IsImmutableVariableMode(VariableMode mode) {
+ return mode == CONST || mode == CONST_HARMONY;
+}
+
+
// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
// and immutable bindings that can be in two states: initialized and
// uninitialized. In ES5 only immutable bindings have these two states. When
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index fd8d536401..32ea5e197c 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -238,12 +238,18 @@ static int ArchiveSpacePerThread() {
ThreadState::ThreadState(ThreadManager* thread_manager)
: id_(ThreadId::Invalid()),
terminate_on_restore_(false),
+ data_(NULL),
next_(this),
previous_(this),
thread_manager_(thread_manager) {
}
+ThreadState::~ThreadState() {
+ DeleteArray<char>(data_);
+}
+
+
void ThreadState::AllocateSpace() {
data_ = NewArray<char>(ArchiveSpacePerThread());
}
@@ -306,8 +312,19 @@ ThreadManager::ThreadManager()
ThreadManager::~ThreadManager() {
delete mutex_;
- delete free_anchor_;
- delete in_use_anchor_;
+ DeleteThreadStateList(free_anchor_);
+ DeleteThreadStateList(in_use_anchor_);
+}
+
+
+void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
+ // The list starts and ends with the anchor.
+ for (ThreadState* current = anchor->next_; current != anchor;) {
+ ThreadState* next = current->next_;
+ delete current;
+ current = next;
+ }
+ delete anchor;
}
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index a2aee4e338..8dce8602f6 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -57,6 +57,7 @@ class ThreadState {
private:
explicit ThreadState(ThreadManager* thread_manager);
+ ~ThreadState();
void AllocateSpace();
@@ -114,6 +115,8 @@ class ThreadManager {
ThreadManager();
~ThreadManager();
+ void DeleteThreadStateList(ThreadState* anchor);
+
void EagerlyArchiveThread();
Mutex* mutex_;
diff --git a/deps/v8/src/v8utils.cc b/deps/v8/src/v8utils.cc
index 042a60f0b4..627169e709 100644
--- a/deps/v8/src/v8utils.cc
+++ b/deps/v8/src/v8utils.cc
@@ -53,6 +53,15 @@ void PrintF(FILE* out, const char* format, ...) {
}
+void PrintPID(const char* format, ...) {
+ OS::Print("[%d] ", OS::GetCurrentProcessId());
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
+
void Flush(FILE* out) {
fflush(out);
}
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index bb587e1733..9072b4e285 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -57,6 +57,9 @@ namespace internal {
void PRINTF_CHECKING PrintF(const char* format, ...);
void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
+// Prepends the current process ID to the output.
+void PRINTF_CHECKING PrintPID(const char* format, ...);
+
// Our version of fflush.
void Flush(FILE* out);
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 32ad5bc5dd..0416f3a390 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -41,7 +41,7 @@ const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
- case CONST_HARMONY: return "CONST";
+ case CONST_HARMONY: return "CONST_HARMONY";
case LET: return "LET";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -81,10 +81,11 @@ Variable::Variable(Scope* scope,
}
-bool Variable::is_global() const {
+bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
+ return mode_ != TEMPORARY && !IsLexicalVariableMode(mode_)
+ && scope_ != NULL && scope_->is_global_scope();
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index f49b6e1276..ba26b80472 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -55,7 +55,7 @@ class Variable: public ZoneObject {
UNALLOCATED,
// A slot in the parameter section on the stack. index() is the
- // parameter index, counting left-to-right. The reciever is index -1;
+ // parameter index, counting left-to-right. The receiver is index -1;
// the first parameter is index 0.
PARAMETER,
@@ -118,21 +118,14 @@ class Variable: public ZoneObject {
bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
bool IsContextSlot() const { return location_ == CONTEXT; }
bool IsLookupSlot() const { return location_ == LOOKUP; }
+ bool IsGlobalObjectProperty() const;
- bool is_dynamic() const {
- return (mode_ == DYNAMIC ||
- mode_ == DYNAMIC_GLOBAL ||
- mode_ == DYNAMIC_LOCAL);
- }
- bool is_const_mode() const {
- return (mode_ == CONST ||
- mode_ == CONST_HARMONY);
- }
+ bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
+ bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
bool binding_needs_init() const {
return initialization_flag_ == kNeedsInitialization;
}
- bool is_global() const;
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 102b645525..628665caf1 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 11
-#define BUILD_NUMBER 10
-#define PATCH_LEVEL 22
+#define MINOR_VERSION 13
+#define BUILD_NUMBER 7
+#define PATCH_LEVEL 1
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index c647e56c97..384940dfa5 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -47,6 +47,8 @@ inline const char* StateToString(StateTag state) {
return "GC";
case COMPILER:
return "COMPILER";
+ case PARALLEL_COMPILER_PROLOGUE:
+ return "PARALLEL_COMPILER_PROLOGUE";
case OTHER:
return "OTHER";
case EXTERNAL:
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index a9cc2ef287..f3940e8255 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -65,10 +65,10 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
} else {
RecordRelocInfo(rmode);
}
@@ -309,10 +309,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 2f0c542bc2..862a735579 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -75,6 +75,7 @@ void CpuFeatures::Probe() {
// Save old rsp, since we are going to modify the stack.
__ push(rbp);
__ pushfq();
+ __ push(rdi);
__ push(rcx);
__ push(rbx);
__ movq(rbp, rsp);
@@ -128,6 +129,7 @@ void CpuFeatures::Probe() {
__ movq(rsp, rbp);
__ pop(rbx);
__ pop(rcx);
+ __ pop(rdi);
__ popfq();
__ pop(rbp);
__ ret(0);
@@ -348,7 +350,8 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
code_targets_(100),
positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -467,7 +470,7 @@ void Assembler::bind_to(Label* L, int pos) {
static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
ASSERT(offset_to_next <= 0);
int disp = pos - (fixup_pos + sizeof(int8_t));
- ASSERT(is_int8(disp));
+ CHECK(is_int8(disp));
set_byte_at(fixup_pos, disp);
if (offset_to_next < 0) {
L->link_to(fixup_pos + offset_to_next, Label::kNear);
@@ -875,7 +878,7 @@ void Assembler::call(Label* L) {
void Assembler::call(Handle<Code> target,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
@@ -1232,7 +1235,16 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
const int long_size = 6;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
+ // Determine whether we can use 1-byte offsets for backwards branches,
+ // which have a max range of 128 bytes.
+
+ // We also need to check the predictable_code_size_ flag here, because
+ // on x64, when the full code generator recompiles code for debugging, some
+ // places need to be padded out to a certain size. The debugger is keeping
+ // track of how often it did this so that it can adjust return addresses on
+ // the stack, but if the size of jump instructions can also change, that's
+ // not enough and the calculated offsets would be incorrect.
+ if (is_int8(offs - short_size) && !predictable_code_size_) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
@@ -1289,7 +1301,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
+ if (is_int8(offs - short_size) && !predictable_code_size_) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 9f5f850294..e00b403199 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -455,6 +455,7 @@ class CpuFeatures : public AllStatic {
ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
+ if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
@@ -560,6 +561,11 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -1201,7 +1207,7 @@ class Assembler : public AssemblerBase {
void call(Label* L);
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
@@ -1432,6 +1438,7 @@ class Assembler : public AssemblerBase {
protected:
bool emit_debug_code() const { return emit_debug_code_; }
+ bool predictable_code_size() const { return predictable_code_size_; }
private:
byte* addr_at(int pos) { return buffer_ + pos; }
@@ -1451,7 +1458,7 @@ class Assembler : public AssemblerBase {
inline void emitw(uint16_t x);
inline void emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
@@ -1636,6 +1643,7 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
+ bool predictable_code_size_;
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 0af0a43477..9e4153a868 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -73,6 +73,45 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ movq(kScratchRegister,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
+ __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ jmp(kScratchRegister);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -711,9 +750,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
@@ -896,9 +935,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 17b5ce93b4..3fa93b2983 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -62,9 +62,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
@@ -72,36 +76,113 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
+ __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
__ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
__ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+ __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
__ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ movq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ testq(rbx, rbx);
+ __ j(not_zero, &check_optimized, Label::kNear);
+ }
+ __ bind(&install_unoptimized);
+ __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
+ rdi); // Initialize with undefined.
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+ // rcx holds native context, ebx points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into edx.
+ __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
+ __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
+ __ j(equal, &install_optimized);
+
+ // Iterate through the rest of map backwards. rdx holds an index.
+ Label loop;
+ Label restore;
+ __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ bind(&loop);
+ // Do not double check first entry.
+ __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
+ __ j(equal, &restore);
+ __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
+ __ cmpq(rcx, FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &loop, Label::kNear);
+ // Hit: fetch the optimized code.
+ __ movq(rdx, FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+
+ // Now link a function into a list of optimized functions.
+ __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
+ // No need for write barrier as JSFunction (rax) is in the new space.
+
+ __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
+ // Store JSFunction (rax) into rdx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ movq(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ rdx,
+ rbx,
+ kDontSaveFPRegs);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
+ __ bind(&restore);
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&install_unoptimized);
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(rcx); // Temporarily remove return address.
@@ -136,8 +217,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -178,9 +259,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
@@ -190,7 +271,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(equal, message);
}
__ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
__ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
@@ -200,8 +281,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+ __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
// Initialize the rest of the slots to the hole value.
__ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
@@ -2375,10 +2456,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rax = address of new object(s) (tagged)
// rcx = argument count (untagged)
- // Get the arguments boilerplate from the current (global) context into rdi.
+ // Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
__ testq(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -2591,9 +2672,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
- // Get the arguments boilerplate from the current (global) context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rdi, offset));
@@ -2710,7 +2791,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize));
__ j(above, &runtime);
// rax: RegExp data (FixedArray)
@@ -3122,8 +3203,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r8: Number of array elements as smi.
// Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
@@ -6010,6 +6091,8 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
+ // FastNewClosureStub::Generate
+ { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -6333,6 +6416,74 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Save volatile registers.
+ // Live registers at this point are the same as at the start of any
+ // JS function:
+ // o rdi: the JS function object being called (i.e. ourselves)
+ // o rsi: our context
+ // o rbp: our caller's frame pointer
+ // o rsp: stack pointer (pointing to return address)
+ // o rcx: rcx is zero for method calls and non-zero for function calls.
+#ifdef _WIN64
+ const int kNumSavedRegisters = 1;
+
+ __ push(rcx);
+#else
+ const int kNumSavedRegisters = 3;
+
+ __ push(rcx);
+ __ push(rdi);
+ __ push(rsi);
+#endif
+
+ // Calculate the original stack pointer and store it in the second arg.
+#ifdef _WIN64
+ __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
+#else
+ __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
+#endif
+
+ // Calculate the function address to the first arg.
+#ifdef _WIN64
+ __ movq(rcx, Operand(rdx, 0));
+ __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
+#else
+ __ movq(rdi, Operand(rsi, 0));
+ __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
+#endif
+
+ // Call the entry hook function.
+ __ movq(rax, &entry_hook_, RelocInfo::NONE);
+ __ movq(rax, Operand(rax, 0));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+
+ const int kArgumentCount = 2;
+ __ PrepareCallCFunction(kArgumentCount);
+ __ CallCFunction(rax, kArgumentCount);
+
+ // Restore volatile regs.
+#ifdef _WIN64
+ __ pop(rcx);
+#else
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(rcx);
+#endif
+
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index f3046b9ce3..0502502ab0 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -52,6 +52,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
// Get the optimized code.
Code* code = function->code();
@@ -100,8 +104,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ // Iterate over all the functions which share the same code object
+ // and make them use unoptimized version.
+ Context* context = function->context()->native_context();
+ Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ SharedFunctionInfo* shared = function->shared();
+ while (!element->IsUndefined()) {
+ JSFunction* func = JSFunction::cast(element);
+ // Grab element before code replacement as ReplaceCode alters the list.
+ element = func->next_function_link();
+ if (func->code() == code) {
+ func->ReplaceCode(shared->code());
+ }
+ }
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -188,11 +203,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -214,7 +229,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// the ast id. Confusing.
ASSERT(bailout_id_ == ast_id);
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -234,9 +249,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -341,15 +356,15 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(pc);
}
Code* continuation =
- function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
}
}
@@ -576,16 +591,143 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address + 4 stack entries from
+ // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
+ // entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 0738153588..c8606c40b2 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -703,6 +703,9 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
case 4:
mnem = "mul";
break;
+ case 5:
+ mnem = "imul";
+ break;
case 7:
mnem = "idiv";
break;
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index a3e42eb505..78e1dec513 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -123,6 +123,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -172,10 +174,13 @@ void FullCodeGenerator::Generate() {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
+ Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
@@ -252,7 +257,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -267,7 +272,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
@@ -310,10 +315,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
// Self-optimization is a one-off thing; if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = 10;
- }
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
@@ -323,10 +324,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 162;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -338,7 +335,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -394,7 +391,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance = kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -508,12 +505,20 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ Move(result_register(), lit);
+ if (lit->IsSmi()) {
+ __ SafeMove(result_register(), Smi::cast(*lit));
+ } else {
+ __ Move(result_register(), lit);
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- __ Push(lit);
+ if (lit->IsSmi()) {
+ __ SafePush(Smi::cast(*lit));
+ } else {
+ __ Push(lit);
+ }
}
@@ -757,7 +762,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
@@ -811,10 +816,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
+ ASSERT(IsDeclaredVariableMode(mode));
PropertyAttributes attr =
- (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -1100,22 +1104,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label fixed_array;
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array, Label::kNear);
+ __ j(not_equal, &fixed_array);
// We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
+
+ Label no_descriptors;
+
+ __ EnumLength(rdx, rax);
+ __ Cmp(rdx, Smi::FromInt(0));
+ __ j(equal, &no_descriptors);
+
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(rax); // Map.
- __ push(rdx); // Enumeration cache.
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ push(rax); // Enumeration cache length (as smi).
+ __ push(rcx); // Enumeration cache.
+ __ push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&exit);
+
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1124,7 +1138,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(rbx, cell);
__ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
@@ -1283,9 +1297,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ movq(temp, context);
}
// Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
- // Terminate at global context.
+ // Terminate at native context.
__ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
@@ -1570,7 +1584,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1596,7 +1610,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1807,11 +1821,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1866,14 +1880,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1895,7 +1909,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1944,7 +1959,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(rdx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -2070,7 +2086,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ movq(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2123,7 +2139,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2163,7 +2179,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2186,6 +2202,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(rax);
} else {
VisitForStackValue(expr->obj());
@@ -2199,7 +2216,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
__ call(code, rmode, ast_id);
}
@@ -2222,7 +2239,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2255,7 +2272,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2275,20 +2292,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ Move(rbx, cell);
- }
+ // Record call targets in unoptimized code.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ Move(rbx, cell);
CallFunctionStub stub(arg_count, flags);
__ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->id());
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2463,20 +2478,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ movq(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ Move(rbx, cell);
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ Move(rbx, cell);
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
@@ -2617,7 +2626,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(rax);
+ if (generate_debug_code_) __ AbortIfSmi(rax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2645,9 +2654,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Operand(
rbx, index.reg, index.scale, FixedArray::kHeaderSize));
// Calculate location of the first key name.
- __ addq(rbx,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@@ -2656,7 +2663,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rdx, FieldOperand(rbx, 0));
__ Cmp(rdx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ addq(rbx, Immediate(kPointerSize));
+ __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmpq(rbx, rcx);
__ j(not_equal, &loop);
@@ -2670,8 +2677,8 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ testq(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ j(not_equal, if_false);
@@ -2841,7 +2848,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(rax);
+ if (generate_debug_code_) __ AbortIfNotSmi(rax);
context()->Plug(rax);
}
@@ -2948,12 +2955,14 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
__ PrepareCallCFunction(1);
#ifdef _WIN64
- __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
#else
- __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
#endif
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -3027,19 +3036,18 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = rax;
Register result = rax;
Register scratch = rcx;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ __ j(not_equal, &not_date_object);
if (index->value() == 0) {
__ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3061,8 +3069,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
#endif
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(rax);
}
@@ -3327,10 +3339,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
- __ j(equal, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &runtime);
// InvokeFunction requires the function in rdi. Move it in there.
__ movq(rdi, result_register());
@@ -3340,7 +3353,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3369,7 +3382,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3382,9 +3395,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = rax;
Register cache = rbx;
Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
__ movq(cache,
- FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+ FieldOperand(cache, GlobalObject::kNativeContextOffset));
__ movq(cache,
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ movq(cache,
@@ -3485,9 +3498,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(rax);
- }
+ __ AbortIfNotString(rax);
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -3565,7 +3576,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Loop condition: while (index < array_length).
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmpq(index, array_length);
__ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@@ -3811,7 +3822,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3969,7 +3980,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(rax);
}
@@ -4025,7 +4037,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4090,7 +4102,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4124,7 +4136,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4141,7 +4153,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4348,7 +4360,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4430,7 +4442,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4473,6 +4485,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Load(rdx, has_pending_message);
+ __ Integer32ToSmi(rdx, rdx);
__ push(rdx);
ExternalReference pending_message_script =
@@ -4492,6 +4505,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
__ Store(pending_message_script, rdx);
__ pop(rdx);
+ __ SmiToInteger32(rdx, rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Store(has_pending_message, rdx);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 82fdb3cece..0fd8a40036 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -135,7 +135,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
r0,
r1);
- // If probing finds an entry in the dictionary, r0 contains the
+ // If probing finds an entry in the dictionary, r1 contains the
// index into the dictionary. Check that the value is a normal
// property.
__ bind(&done);
@@ -178,10 +178,9 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
//
// value - holds the value to store and is unchanged.
//
- // scratch0 - used for index into the property dictionary and is clobbered.
+ // scratch0 - used during the positive dictionary lookup and is clobbered.
//
- // scratch1 - used to hold the capacity of the property dictionary and is
- // clobbered.
+ // scratch1 - used for index into the property dictionary and is clobbered.
Label done;
// Probe the dictionary.
@@ -823,7 +822,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
rax);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 7dc38a1429..89e311e461 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -92,17 +92,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -128,6 +119,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -320,24 +313,24 @@ bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
@@ -367,7 +360,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -375,11 +371,19 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@@ -391,7 +395,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -399,18 +404,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
+ false,
false);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i));
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
@@ -419,6 +429,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -432,6 +444,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -439,8 +453,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -577,13 +591,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -883,6 +897,89 @@ void LCodeGen::DoModI(LModI* instr) {
}
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ ASSERT(instr->InputAt(1)->IsConstantOperand());
+
+ const Register dividend = ToRegister(instr->InputAt(0));
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+ const Register result = ToRegister(instr->result());
+
+ switch (divisor) {
+ case 0:
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+
+ case 1:
+ if (!result.is(dividend)) {
+ __ movl(result, dividend);
+ }
+ return;
+
+ case -1:
+ if (!result.is(dividend)) {
+ __ movl(result, dividend);
+ }
+ __ negl(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ return;
+ }
+
+ uint32_t divisor_abs = abs(divisor);
+ if (IsPowerOf2(divisor_abs)) {
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (divisor < 0) {
+ __ movsxlq(result, dividend);
+ __ neg(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ sar(result, Immediate(power));
+ } else {
+ if (!result.is(dividend)) {
+ __ movl(result, dividend);
+ }
+ __ sarl(result, Immediate(power));
+ }
+ } else {
+ Register reg1 = ToRegister(instr->TempAt(0));
+ Register reg2 = ToRegister(instr->result());
+
+ // Find b which: 2^b < divisor_abs < 2^(b+1).
+ unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
+ unsigned shift = 32 + b; // Precision +1bit (effectively).
+ double multiplier_f =
+ static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
+ int64_t multiplier;
+ if (multiplier_f - floor(multiplier_f) < 0.5) {
+ multiplier = static_cast<int64_t>(floor(multiplier_f));
+ } else {
+ multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
+ }
+ // The multiplier is a uint32.
+ ASSERT(multiplier > 0 &&
+ multiplier < (static_cast<int64_t>(1) << 32));
+ // The multiply is int64, so sign-extend to r64.
+ __ movsxlq(reg1, dividend);
+ if (divisor < 0 &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ neg(reg1);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ movq(reg2, multiplier, RelocInfo::NONE);
+ // Result just fit in r64, because it's int32 * uint32.
+ __ imul(reg2, reg1);
+
+ __ addq(reg2, Immediate(1 << 30));
+ __ sar(reg2, Immediate(shift));
+ }
+}
+
+
void LCodeGen::DoDivI(LDivI* instr) {
LOperand* right = instr->InputAt(1);
ASSERT(ToRegister(instr->result()).is(rax));
@@ -1193,6 +1290,13 @@ void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->InputAt(0));
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
@@ -1228,15 +1332,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Smi* index = instr->index();
- Label runtime, done;
+ Label runtime, done, not_date_object;
ASSERT(object.is(result));
ASSERT(object.is(rax));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ Condition cc = masm()->CheckSmi(object);
+ DeoptimizeIf(cc, instr->environment());
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
__ movq(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1305,6 +1408,72 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Label return_left;
+ Condition condition = (operation == HMathMinMax::kMathMin)
+ ? less_equal
+ : greater_equal;
+ Register left_reg = ToRegister(left);
+ if (right->IsConstantOperand()) {
+ Immediate right_imm =
+ Immediate(ToInteger32(LConstantOperand::cast(right)));
+ __ cmpq(left_reg, right_imm);
+ __ j(condition, &return_left, Label::kNear);
+ __ movq(left_reg, right_imm);
+ } else if (right->IsRegister()) {
+ Register right_reg = ToRegister(right);
+ __ cmpq(left_reg, right_reg);
+ __ j(condition, &return_left, Label::kNear);
+ __ movq(left_reg, right_reg);
+ } else {
+ Operand right_op = ToOperand(right);
+ __ cmpq(left_reg, right_op);
+ __ j(condition, &return_left, Label::kNear);
+ __ movq(left_reg, right_op);
+ }
+ __ bind(&return_left);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+ XMMRegister left_reg = ToDoubleRegister(left);
+ XMMRegister right_reg = ToDoubleRegister(right);
+ __ ucomisd(left_reg, right_reg);
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(left_reg, xmm_scratch);
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ orpd(left_reg, right_reg);
+ } else {
+ // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+ __ addsd(left_reg, right_reg);
+ }
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ ucomisd(left_reg, left_reg); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear);
+ __ bind(&return_right);
+ __ movsd(left_reg, right_reg);
+
+ __ bind(&return_left);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->InputAt(0));
XMMRegister right = ToDoubleRegister(instr->InputAt(1));
@@ -1789,9 +1958,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AbortIfNotString(input);
__ movl(result, FieldOperand(input, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -2198,9 +2365,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
+ type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsFound() && lookup.type() == FIELD) {
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2212,7 +2379,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
@@ -2242,11 +2409,10 @@ static bool CompactEmit(SmallMapList* list,
Handle<Map> map = list->at(i);
// If the map has ElementsKind transitions, we will generate map checks
// for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->elements_transition_map() != NULL) return false;
+ if (map->HasElementsTransition()) return false;
LookupResult lookup(isolate);
- map->LookupInDescriptors(NULL, *name, &lookup);
- return lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION);
+ map->LookupDescriptor(NULL, *name, &lookup);
+ return lookup.IsField() || lookup.IsConstantFunction();
}
@@ -2416,18 +2582,26 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits.
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
// Load the result.
__ movq(result,
BuildFastArrayOperand(instr->elements(),
- instr->key(),
+ key,
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
@@ -2448,12 +2622,20 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2461,7 +2643,7 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
@@ -2471,7 +2653,7 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -2508,17 +2690,27 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
@@ -2547,11 +2739,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(result, operand);
- __ testl(result, result);
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(negative, instr->environment());
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ testl(result, result);
+ DeoptimizeIf(negative, instr->environment());
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
@@ -2673,7 +2864,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
- __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
__ movq(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
@@ -2736,7 +2927,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -2791,14 +2982,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(rdi, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- }
+ // Change context.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
@@ -2946,7 +3131,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- Label done;
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
@@ -2961,10 +3145,13 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmpl(output_reg, Immediate(0x80000000));
DeoptimizeIf(equal, instr->environment());
} else {
+ Label negative_sign, done;
// Deoptimize on negative inputs.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment());
+ __ j(below, &negative_sign, Label::kNear);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero.
Label positive_sign;
@@ -2979,12 +3166,23 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
-
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x80000000));
DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done, Label::kNear);
+
+ // Non-zero negative reaches here.
+ __ bind(&negative_sign);
+ // Truncate, then compare and compensate.
+ __ cvttsd2si(output_reg, input_reg);
+ __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(equal, &done, Label::kNear);
+ __ subl(output_reg, Immediate(1));
+ DeoptimizeIf(overflow, instr->environment());
+
+ __ bind(&done);
}
- __ bind(&done);
}
@@ -3142,11 +3340,11 @@ void LCodeGen::DoRandom(LRandom* instr) {
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
__ movq(global_object,
- FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
+ FieldOperand(global_object, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the global context's random seeds
+ // rbx: FixedArray of the native context's random seeds
// Load state[0].
__ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
@@ -3438,18 +3636,27 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
@@ -3490,18 +3697,46 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ Condition cc;
+ if (operand->IsRegister()) {
+ cc = masm()->CheckSmi(ToRegister(operand));
+ } else {
+ cc = masm()->CheckSmi(ToOperand(operand));
+ }
+ DeoptimizeIf(NegateCondition(cc), environment);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
- if (FLAG_debug_code) {
+ if (FLAG_debug_code &&
+ !instr->hydrogen()->length()->representation().IsTagged()) {
__ AbortIfNotZeroExtended(reg);
}
if (instr->index()->IsConstantOperand()) {
- __ cmpq(reg,
- Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ Cmp(reg, Smi::FromInt(constant_index));
+ } else {
+ __ cmpq(reg, Immediate(constant_index));
+ }
} else {
Register reg2 = ToRegister(instr->index());
- if (FLAG_debug_code) {
+ if (FLAG_debug_code &&
+ !instr->hydrogen()->index()->representation().IsTagged()) {
__ AbortIfNotZeroExtended(reg2);
}
__ cmpq(reg, reg2);
@@ -3521,37 +3756,46 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
Operand operand =
BuildFastArrayOperand(instr->object(),
- instr->key(),
+ key,
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
- __ movq(operand, value);
-
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ lea(key, operand);
+ Register key_reg(ToRegister(key));
+ __ lea(key_reg, operand);
+ __ movq(Operand(key_reg, 0), value);
__ RecordWrite(elements,
- key,
+ key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
+ } else {
+ __ movq(operand, value);
}
}
@@ -3559,6 +3803,21 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
void LCodeGen::DoStoreKeyedFastDoubleElement(
LStoreKeyedFastDoubleElement* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -3575,18 +3834,11 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
__ movsd(double_store_operand, value);
}
@@ -3774,6 +4026,17 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->InputAt(0);
+ LOperand* output = instr->result();
+ LOperand* temp = instr->TempAt(0);
+
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -3783,6 +4046,69 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
}
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmpl(reg, Immediate(Smi::kMaxValue));
+ __ j(above, deferred->entry());
+ __ Integer32ToSmi(reg, reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
+ Label slow;
+ Register reg = ToRegister(instr->InputAt(0));
+ Register tmp = reg.is(rax) ? rcx : rax;
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ Label done;
+ // Load value into xmm1 which will be preserved across potential call to
+ // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
+ // XMM registers on x64).
+ __ LoadUint32(xmm1, reg, xmm0);
+
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, &slow);
+ __ jmp(&done, Label::kNear);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ StoreToSafepointRegisterSlot(reg, Immediate(0));
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ if (!reg.is(rax)) __ movq(reg, rax);
+
+ // Done. Put the value in xmm1 into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
+ __ StoreToSafepointRegisterSlot(reg, reg);
+}
+
+
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD: public LDeferredCode {
public:
@@ -4303,7 +4629,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
@@ -4324,12 +4650,11 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
// Set up the parameters to the stub/runtime call.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
+ __ Push(isolate()->factory()->empty_fixed_array());
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4532,14 +4857,12 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // rdi = JS function.
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(rcx, instr->hydrogen()->literals());
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -4908,11 +5231,19 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ Cmp(result, Smi::FromInt(0));
+ __ j(not_equal, &load_cache);
+ __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
+ __ jmp(&done);
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ movq(result,
- FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ movq(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ bind(&done);
Condition cc = masm()->CheckSmi(result);
DeoptimizeIf(cc, instr->environment());
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 99e7ec8249..c12f4e8b24 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -45,26 +45,25 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
- Zone* zone)
- : chunk_(chunk),
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4, zone),
- jump_table_(4, zone),
- deoptimization_literals_(8, zone),
+ deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- translations_(zone),
- deferred_(8, zone),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
- safepoints_(zone),
- zone_(zone),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -99,6 +98,7 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredNumberTagU(LNumberTagU* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -141,7 +141,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -157,7 +157,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -224,7 +224,8 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -272,6 +273,11 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -325,7 +331,8 @@ class LCodeGen BASE_EMBEDDED {
void EnsureSpaceForLazyDeopt(int space_needed);
- LChunk* const chunk_;
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -347,8 +354,6 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
- Zone* zone_;
-
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index d06a6a4063..bee1854448 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -358,12 +358,12 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
@@ -376,42 +376,6 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
}
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
@@ -463,84 +427,9 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap, zone());
- index = instructions_.length();
- instructions_.Add(instr, zone());
- } else {
- index = instructions_.length();
- instructions_.Add(instr, zone());
- instructions_.Add(gap, zone());
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map(), zone());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id(), zone());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(from, to, zone());
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -555,17 +444,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -736,7 +616,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -831,13 +711,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -970,8 +853,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@@ -996,7 +879,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1287,12 +1172,55 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
+HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
+ return NULL;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
+ }
return NULL;
}
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
+ LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
+ int32_t divisor_si = HConstant::cast(right)->Integer32Value();
+ if (divisor_si == 0) {
+ LOperand* dividend = UseRegister(instr->left());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
+ } else if (IsPowerOf2(abs(divisor_si))) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ } else {
+ // use two r64
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ }
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1398,6 +1326,26 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ return DefineSameAsFirst(minmax);
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1580,6 +1528,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1596,7 +1550,7 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1671,16 +1625,26 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ } else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ } else {
+ ASSERT(to.IsDouble());
+ LOperand* value = Use(instr->value());
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ }
}
}
UNREACHABLE();
@@ -1883,10 +1847,15 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastElement* result =
+ new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}
@@ -1895,9 +1864,13 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
@@ -1914,9 +1887,13 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
@@ -1941,13 +1918,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
+ bool clobbers_key = needs_write_barrier ||
+ instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastElement(obj, key, val);
@@ -1958,12 +1938,15 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
@@ -1979,7 +1962,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
@@ -1988,11 +1972,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
-
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ key, val);
}
@@ -2239,7 +2224,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2265,7 +2250,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index d038dda060..84d05c051a 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -96,6 +96,7 @@ class LCodeGen;
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
+ V(MapEnumLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -108,6 +109,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -115,7 +117,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -132,10 +133,13 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -162,6 +166,7 @@ class LCodeGen;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -257,8 +262,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
@@ -270,6 +273,11 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -289,7 +297,6 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
@@ -299,6 +306,9 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ virtual int InputCount() { return I; }
};
@@ -554,6 +564,21 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI: public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
@@ -841,6 +866,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -977,6 +1003,16 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
@@ -1047,6 +1083,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1551,6 +1599,17 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1561,6 +1620,17 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
@@ -2162,71 +2232,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) { }
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure, zone());
- }
-
- Zone* zone() const { return graph_->zone(); }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2245,16 +2257,19 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
private:
enum Status {
UNUSED,
@@ -2263,7 +2278,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2273,7 +2288,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2367,7 +2382,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2379,7 +2394,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 7d5d6d3d0b..1b0f2fa2d4 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -53,9 +53,17 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
+static const int kInvalidRootRegisterDelta = -1;
+
+
+intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+ if (predictable_code_size() &&
+ (other.address() < reinterpret_cast<Address>(isolate()) ||
+ other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
+ return kInvalidRootRegisterDelta;
+ }
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_array_start());
+ reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
intptr_t delta = other.address() - roots_register_value;
return delta;
}
@@ -64,8 +72,8 @@ static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(target);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
@@ -77,8 +85,8 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
@@ -96,8 +104,8 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
void MacroAssembler::Store(ExternalReference destination, Register source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(destination);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
@@ -116,8 +124,8 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
@@ -133,8 +141,8 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
// Operand is lea(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
@@ -216,7 +224,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
@@ -397,7 +405,7 @@ void MacroAssembler::RecordWrite(Register object,
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmpq(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -538,7 +546,7 @@ void MacroAssembler::Abort(const char* msg) {
}
-void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -743,17 +751,52 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = rax;
+ Register map = rcx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kTrueValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kFalseValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kNullValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ Abort("API call returned invalid object");
+
+ bind(&ok);
+#endif
+
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
bind(&empty_result);
// It was zero; the result is undefined.
- Move(rax, factory->undefined_value());
+ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
jmp(&prologue);
+ bind(&promote_scheduled_exception);
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
@@ -798,7 +841,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
movq(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
@@ -892,6 +935,38 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
}
+
+bool MacroAssembler::IsUnsafeInt(const int x) {
+ static const int kMaxBits = 17;
+ return !is_intn(x, kMaxBits);
+}
+
+
+void MacroAssembler::SafeMove(Register dst, Smi* src) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xor_(dst, kScratchRegister);
+ } else {
+ Move(dst, src);
+ }
+}
+
+
+void MacroAssembler::SafePush(Smi* src) {
+ ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xor_(Operand(rsp, 0), kScratchRegister);
+ } else {
+ Push(src);
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
@@ -2377,7 +2452,7 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
@@ -2460,6 +2535,12 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
};
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
+ const Immediate& imm) {
+ movq(SafepointRegisterSlot(dst), imm);
+}
+
+
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
movq(SafepointRegisterSlot(dst), src);
}
@@ -2806,11 +2887,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
xorps(temp_xmm_reg, temp_xmm_reg);
ucomisd(input_reg, temp_xmm_reg);
j(below, &done, Label::kNear);
- uint64_t one_half = BitCast<uint64_t, double>(0.5);
- Set(temp_reg, one_half);
- movq(temp_xmm_reg, temp_reg);
- addsd(temp_xmm_reg, input_reg);
- cvttsd2si(result_reg, temp_xmm_reg);
+ cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
Set(result_reg, 255);
@@ -2818,14 +2895,49 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
}
+static double kUint32Bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+
+
+void MacroAssembler::LoadUint32(XMMRegister dst,
+ Register src,
+ XMMRegister scratch) {
+ Label done;
+ cmpl(src, Immediate(0));
+ movq(kScratchRegister,
+ reinterpret_cast<int64_t>(&kUint32Bias),
+ RelocInfo::NONE);
+ movsd(scratch, Operand(kScratchRegister, 0));
+ cvtlsi2sd(dst, src);
+ j(not_sign, &done, Label::kNear);
+ addsd(dst, scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map,
- Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
+ Register temp = descriptors;
+ movq(temp, FieldOperand(map, Map::kTransitionsOrBackPointerOffset));
+
+ Label ok, fail;
+ CheckMap(temp,
+ isolate()->factory()->fixed_array_map(),
+ &fail,
+ DONT_DO_SMI_CHECK);
+ movq(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
+ jmp(&ok);
+ bind(&fail);
Move(descriptors, isolate()->factory()->empty_descriptor_array());
- bind(&not_smi);
+ bind(&ok);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
+ and_(dst, kScratchRegister);
}
@@ -3395,20 +3507,21 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
cmpq(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ isolate()->factory()->native_context_map());
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3416,23 +3529,24 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// compatible with the security token in the receiving global
// object.
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
push(holder_reg);
- movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ movq(holder_reg,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, "JSGlobalProxy::context() should not be null.");
- // Read the first word and compare to global_context_map(),
+ // Read the first word and compare to native_context_map(),
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg);
}
movq(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movq(scratch, FieldOperand(scratch, token_offset));
@@ -3954,7 +4068,7 @@ void MacroAssembler::CopyBytes(Register destination,
int min_length,
Register scratch) {
ASSERT(min_length >= 0);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, "Invalid min_length");
}
@@ -4052,8 +4166,9 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ movq(scratch,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
movq(scratch, Operand(scratch,
@@ -4103,10 +4218,11 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ movq(function,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
movq(function, Operand(function, Context::SlotOffset(index)));
}
@@ -4331,7 +4447,7 @@ void MacroAssembler::EnsureNotWhite(
testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
@@ -4406,44 +4522,38 @@ void MacroAssembler::EnsureNotWhite(
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
+ Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r9;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
movq(rcx, rax);
- bind(&next);
-
- // Check that there are no elements. Register rcx contains the
- // current JS object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- j(not_equal, call_runtime);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in rbx for the subsequent
- // prototype load.
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(rdx, call_runtime);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (rdx). This is the case if the next enumeration
- // index field does not contain a smi.
- movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(rdx, call_runtime);
+ EnumLength(rdx, rbx);
+ Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
+ bind(&next);
+
+ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmpq(rcx, rax);
- j(equal, &check_prototype, Label::kNear);
- movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmpq(rdx, empty_fixed_array_value);
+ EnumLength(rdx, rbx);
+ Cmp(rdx, Smi::FromInt(0));
+ j(not_equal, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ cmpq(empty_fixed_array_value,
+ FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
cmpq(rcx, null_value);
j(not_equal, &next);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 1c1cd95e94..5268fe2a2e 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -317,6 +317,7 @@ class MacroAssembler: public Assembler {
void PopSafepointRegisters() { Popad(); }
// Store the value in register src in the safepoint register stack
// slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
@@ -774,6 +775,11 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
@@ -817,7 +823,7 @@ class MacroAssembler: public Assembler {
void Call(ExternalReference ext);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
// The size of the code generated for different call instructions.
int CallSize(Address destination, RelocInfo::Mode rmode) {
@@ -939,7 +945,18 @@ class MacroAssembler: public Assembler {
Register result_reg,
Register temp_reg);
+ void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+
void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int full_shift = Field::kShift + kSmiShift;
+ static const int low_mask = Field::kMask >> Field::kShift;
+ shr(reg, Immediate(full_shift));
+ and_(reg, Immediate(low_mask));
+ }
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
@@ -1128,8 +1145,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -1155,7 +1172,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
@@ -1323,6 +1340,8 @@ class MacroAssembler: public Assembler {
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
+ intptr_t RootRegisterDelta(ExternalReference other);
+
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
@@ -1442,7 +1461,7 @@ inline Operand ContextOperand(Register context, int index) {
inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_INDEX);
+ return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index a72a0a0d1d..86f7bfe6ca 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -353,6 +353,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// In either case succeed immediately.
__ j(equal, &fallthrough);
+ // -----------------------
+ // rdx - Start of capture
+ // rbx - length of capture
+ // Check that there are sufficient characters left in the input.
+ __ movl(rax, rdi);
+ __ addl(rax, rbx);
+ BranchOrBacktrack(greater, on_no_match);
+
if (mode_ == ASCII) {
Label loop_increment;
if (on_no_match == NULL) {
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a6acd9791e..cd71086eec 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -228,15 +228,15 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
Register prototype) {
// Load the global or builtins object from the current context.
__ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
__ movq(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ FieldOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
__ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -249,13 +249,13 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ Move(prototype, isolate->global());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+ __ Move(prototype, isolate->global_object());
+ __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -1029,6 +1029,49 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ movq(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch3;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movq(scratch2,
+ Operand(dictionary, index, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movq(scratch3, callback, RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(scratch2, scratch3);
+ __ j(not_equal, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1036,6 +1079,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1046,6 +1090,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch2.is(reg));
__ pop(scratch2); // Get return address to place it below.
@@ -1143,7 +1192,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
@@ -1221,7 +1270,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), rax, holder_reg,
@@ -1391,7 +1440,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1915,7 +1964,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2030,7 +2079,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2279,7 +2328,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2342,7 +2391,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2373,14 +2422,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2388,19 +2440,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(rdx, &miss);
+ CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
- // Check that the map of the object hasn't changed.
- __ CheckMap(rdx, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ pop(rbx); // remove the return address
__ push(rdx); // receiver
@@ -2420,38 +2465,41 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<JSObject> receiver,
- Handle<JSFunction> setter,
- Handle<String> name) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
-
{
- FrameScope scope(masm(), StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(rax);
- // Call the JavaScript getter with the receiver and the value on the stack.
- __ push(rdx);
- __ push(rax);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ push(rdx);
+ __ push(rax);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
// We have to return the passed value, not the return value of the setter.
__ pop(rax);
@@ -2460,13 +2508,38 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(rdx, &miss);
+ CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
__ bind(&miss);
Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2512,7 +2585,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2560,7 +2633,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2599,7 +2672,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -2623,7 +2698,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -2661,7 +2736,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -2699,7 +2774,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2719,7 +2794,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2734,16 +2809,53 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+ GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, r8, callback,
name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(rax);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
@@ -2760,25 +2872,13 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
__ JumpIfSmi(rax, &miss);
CheckPrototypes(receiver, rax, holder, rbx, rdx, rdi, name, &miss);
- {
- FrameScope scope(masm(), StackFrame::INTERNAL);
-
- // Call the JavaScript getter with the receiver on the stack.
- __ push(rax);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
-
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
+ GenerateLoadViaGetter(masm(), getter),
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2798,7 +2898,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2822,7 +2922,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2866,7 +2966,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2895,7 +2995,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2917,14 +3017,14 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ Cmp(rax, name);
__ j(not_equal, &miss);
- GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+ GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, r8, callback,
name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2954,7 +3054,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2984,7 +3084,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3010,7 +3110,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3036,7 +3136,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3062,7 +3162,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3082,7 +3182,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3110,7 +3210,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index d75e297af1..e312b20899 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -40,7 +40,7 @@ namespace internal {
inline void* Zone::New(int size) {
- ASSERT(ZoneScope::nesting() > 0);
+ ASSERT(scope_nesting_ > 0);
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -100,7 +100,7 @@ void* ZoneObject::operator new(size_t size, Zone* zone) {
inline void* ZoneAllocationPolicy::New(size_t size) {
ASSERT(zone_);
- return zone_->New(size);
+ return zone_->New(static_cast<int>(size));
}
@@ -110,19 +110,14 @@ void* ZoneList<T>::operator new(size_t size, Zone* zone) {
}
-ZoneScope::ZoneScope(Isolate* isolate, ZoneScopeMode mode)
- : isolate_(isolate), mode_(mode) {
- isolate_->zone()->scope_nesting_++;
+ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
+ : zone_(zone), mode_(mode) {
+ zone_->scope_nesting_++;
}
bool ZoneScope::ShouldDeleteOnExit() {
- return isolate_->zone()->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
-int ZoneScope::nesting() {
- return Isolate::Current()->zone()->scope_nesting_;
+ return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
}
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index d5d05ab95f..51b8113a0d 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -67,20 +67,20 @@ class Segment {
};
-Zone::Zone()
+Zone::Zone(Isolate* isolate)
: zone_excess_limit_(256 * MB),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
scope_nesting_(0),
- segment_head_(NULL) {
+ segment_head_(NULL),
+ isolate_(isolate) {
}
unsigned Zone::allocation_size_ = 0;
ZoneScope::~ZoneScope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
- isolate_->zone()->scope_nesting_--;
+ if (ShouldDeleteOnExit()) zone_->DeleteAll();
+ zone_->scope_nesting_--;
}
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index 1bc4984aa2..01e887e779 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -64,6 +64,8 @@ class Isolate;
class Zone {
public:
+ explicit Zone(Isolate* isolate);
+ ~Zone() { DeleteKeptSegment(); }
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
inline void* New(int size);
@@ -114,9 +116,6 @@ class Zone {
// the zone.
int segment_bytes_allocated_;
- // Each isolate gets its own zone.
- Zone();
-
// Expand the Zone to hold at least 'size' more bytes and allocate
// the bytes. Returns the address of the newly allocated chunk of
// memory in the Zone. Should only be called if there isn't enough
@@ -235,7 +234,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
// outer-most scope.
class ZoneScope BASE_EMBEDDED {
public:
- INLINE(ZoneScope(Isolate* isolate, ZoneScopeMode mode));
+ INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
virtual ~ZoneScope();
@@ -250,7 +249,7 @@ class ZoneScope BASE_EMBEDDED {
inline static int nesting();
private:
- Isolate* isolate_;
+ Zone* zone_;
ZoneScopeMode mode_;
};
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index a242fe3c85..66d848c5c1 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -118,7 +118,7 @@
'test-disasm-arm.cc'
],
}],
- ['v8_target_arch=="mips"', {
+ ['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
'test-disasm-mips.cc',
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index fc111ab94b..df2c520567 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -44,6 +44,9 @@ test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
test-serialize/TestThatAlwaysFails: FAIL
test-serialize/DependentTestThatAlwaysFails: FAIL
+# This test always fails. It tests that LiveEdit causes abort when turned off.
+test-debug/LiveEditDisabled: FAIL
+
# TODO(gc): Temporarily disabled in the GC branch.
test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
@@ -65,7 +68,7 @@ test-api/OutOfMemoryNested: SKIP
# BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP
-# BUG(945): Socket connect fails on ARM
+# BUG(945): Tests using Socket cannot be run in parallel.
test-debug/DebuggerAgent: SKIP
test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
test-sockets/Socket: SKIP
@@ -76,3 +79,18 @@ test-serialize/DeserializeFromSecondSerializationAndRunScript2: SKIP
test-serialize/DeserializeAndRunScript2: SKIP
test-serialize/DeserializeFromSecondSerialization: SKIP
+##############################################################################
+[ $arch == android_arm || $arch == android_ia32 ]
+
+# Tests crash as there is no /tmp directory in Android.
+test-log/LogAccessorCallbacks: SKIP
+test-log/LogCallbacks: SKIP
+test-log/ProfLazyMode: SKIP
+
+# platform-tls.h does not contain an ANDROID-related header.
+test-platform-tls/FastTLS: SKIP
+
+# BUG(945): Tests using Socket cannot be run in parallel.
+test-debug/DebuggerAgent: SKIP
+test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
+test-sockets/Socket: SKIP
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index a8e504fd44..50e60da271 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -151,12 +151,21 @@ TEST(StressJS) {
Handle<Map> map(function->initial_map());
Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
Handle<Foreign> foreign = FACTORY->NewForeign(&kDescriptor);
- instance_descriptors = FACTORY->CopyAppendForeignDescriptor(
- instance_descriptors,
- FACTORY->NewStringFromAscii(Vector<const char>("get", 3)),
- foreign,
- static_cast<PropertyAttributes>(0));
- map->set_instance_descriptors(*instance_descriptors);
+ Handle<String> name =
+ FACTORY->NewStringFromAscii(Vector<const char>("get", 3));
+ ASSERT(instance_descriptors->IsEmpty());
+
+ Handle<DescriptorArray> new_descriptors = FACTORY->NewDescriptorArray(1);
+
+ v8::internal::DescriptorArray::WhitenessWitness witness(*new_descriptors);
+ v8::internal::Map::SetDescriptors(map, new_descriptors);
+
+ CallbacksDescriptor d(*name,
+ *foreign,
+ static_cast<PropertyAttributes>(0),
+ v8::internal::PropertyDetails::kInitialIndex);
+ map->AppendDescriptor(&d, witness);
+
// Add the Foo constructor the global object.
env->Global()->Set(v8::String::New("Foo"), v8::Utils::ToLocal(function));
// Call the accessor through JavaScript.
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index e24782085f..7fac9fd618 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -38,6 +38,7 @@
#include "isolate.h"
#include "compilation-cache.h"
#include "execution.h"
+#include "objects.h"
#include "snapshot.h"
#include "platform.h"
#include "utils.h"
@@ -2085,6 +2086,10 @@ THREADED_TEST(HiddenProperties) {
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK(obj->SetHiddenValue(key, Handle<Value>()));
+ CHECK(obj->GetHiddenValue(key).IsEmpty());
+
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
CHECK(obj->DeleteHiddenValue(key));
CHECK(obj->GetHiddenValue(key).IsEmpty());
}
@@ -2702,7 +2707,7 @@ TEST(HugeConsStringOutOfMemory) {
static const int K = 1024;
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(2 * K * K);
+ constraints.set_max_old_space_size(3 * K * K);
v8::SetResourceConstraints(&constraints);
// Execute a script that causes out of memory.
@@ -3061,6 +3066,32 @@ TEST(APIThrowMessageOverwrittenToString) {
}
+static void check_custom_error_message(
+ v8::Handle<v8::Message> message,
+ v8::Handle<v8::Value> data) {
+ const char* uncaught_error = "Uncaught MyError toString";
+ CHECK(message->Get()->Equals(v8_str(uncaught_error)));
+}
+
+
+TEST(CustomErrorToString) {
+ v8::HandleScope scope;
+ v8::V8::AddMessageListener(check_custom_error_message);
+ LocalContext context;
+ CompileRun(
+ "function MyError(name, message) { "
+ " this.name = name; "
+ " this.message = message; "
+ "} "
+ "MyError.prototype = Object.create(Error.prototype); "
+ "MyError.prototype.toString = function() { "
+ " return 'MyError toString'; "
+ "}; "
+ "throw new MyError('my name', 'my message'); ");
+ v8::V8::RemoveMessageListeners(check_custom_error_message);
+}
+
+
static void receive_message(v8::Handle<v8::Message> message,
v8::Handle<v8::Value> data) {
message->Get();
@@ -3715,6 +3746,36 @@ THREADED_TEST(SimplePropertyWrite) {
}
+THREADED_TEST(SetterOnly) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
+ for (int i = 0; i < 10; i++) {
+ CHECK(xValue.IsEmpty());
+ script->Run();
+ CHECK_EQ(v8_num(4), xValue);
+ xValue.Dispose();
+ xValue = v8::Persistent<Value>();
+ }
+}
+
+
+THREADED_TEST(NoAccessors) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetAccessor(v8_str("x"), NULL, NULL, v8_str("donut"));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
+ for (int i = 0; i < 10; i++) {
+ script->Run();
+ }
+}
+
+
static v8::Handle<Value> XPropertyGetter(Local<String> property,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
@@ -4610,6 +4671,18 @@ THREADED_TEST(SimpleExtensions) {
}
+THREADED_TEST(NullExtensions) {
+ v8::HandleScope handle_scope;
+ v8::RegisterExtension(new Extension("nulltest", NULL));
+ const char* extension_names[] = { "nulltest" };
+ v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::Handle<Context> context = Context::New(&extensions);
+ Context::Scope lock(context);
+ v8::Handle<Value> result = Script::Compile(v8_str("1+3"))->Run();
+ CHECK_EQ(result, v8::Integer::New(4));
+}
+
+
static const char* kEmbeddedExtensionSource =
"function Ret54321(){return 54321;}~~@@$"
"$%% THIS IS A SERIES OF NON-NULL-TERMINATED STRINGS.";
@@ -5223,7 +5296,7 @@ THREADED_TEST(IndependentHandleRevival) {
object.MarkIndependent();
HEAP->PerformScavenge();
CHECK(revived);
- HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
{
v8::HandleScope handle_scope;
v8::Local<String> y_str = v8_str("y");
@@ -5548,6 +5621,7 @@ THREADED_TEST(StringWrite) {
v8::Handle<String> str = v8_str("abcde");
// abc<Icelandic eth><Unicode snowman>.
v8::Handle<String> str2 = v8_str("abc\303\260\342\230\203");
+ v8::Handle<String> str3 = v8::String::New("abc\0def", 7);
const int kStride = 4; // Must match stride in for loops in JS below.
CompileRun(
"var left = '';"
@@ -5758,6 +5832,28 @@ THREADED_TEST(StringWrite) {
CHECK_NE(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
utf8buf[8] = '\0';
CHECK_EQ(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
+
+ memset(utf8buf, 0x1, sizeof(utf8buf));
+ utf8buf[5] = 'X';
+ len = str->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen,
+ String::NO_NULL_TERMINATION);
+ CHECK_EQ(5, len);
+ CHECK_EQ('X', utf8buf[5]); // Test that the sixth character is untouched.
+ CHECK_EQ(5, charlen);
+ utf8buf[5] = '\0';
+ CHECK_EQ(0, strcmp(utf8buf, "abcde"));
+
+ memset(buf, 0x1, sizeof(buf));
+ len = str3->WriteAscii(buf);
+ CHECK_EQ(7, len);
+ CHECK_EQ(0, strcmp("abc def", buf));
+
+ memset(buf, 0x1, sizeof(buf));
+ len = str3->WriteAscii(buf, 0, -1, String::PRESERVE_ASCII_NULL);
+ CHECK_EQ(7, len);
+ CHECK_EQ(0, strcmp("abc", buf));
+ CHECK_EQ(0, buf[3]);
+ CHECK_EQ(0, strcmp("def", buf + 4));
}
@@ -9375,7 +9471,8 @@ static void GenerateSomeGarbage() {
v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) {
static int count = 0;
if (count++ % 3 == 0) {
- HEAP-> CollectAllGarbage(true); // This should move the stub
+ HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ // This should move the stub
GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
}
return v8::Handle<v8::Value>();
@@ -9430,7 +9527,7 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
v8::Handle<v8::Value> DirectGetterCallback(Local<String> name,
const v8::AccessorInfo& info) {
if (++p_getter_count % 3 == 0) {
- HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
GenerateSomeGarbage();
}
return v8::Handle<v8::Value>();
@@ -10877,6 +10974,307 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
}
+static i::Handle<i::JSFunction>* foo_ptr = NULL;
+static int foo_count = 0;
+static i::Handle<i::JSFunction>* bar_ptr = NULL;
+static int bar_count = 0;
+
+
+static void entry_hook(uintptr_t function,
+ uintptr_t return_addr_location) {
+ i::Code* code = i::Code::GetCodeFromTargetAddress(
+ reinterpret_cast<i::Address>(function));
+ CHECK(code != NULL);
+
+ if (bar_ptr != NULL && code == (*bar_ptr)->code())
+ ++bar_count;
+
+ if (foo_ptr != NULL && code == (*foo_ptr)->code())
+ ++foo_count;
+
+ // TODO(siggi): Verify return_addr_location.
+ // This can be done by capturing JitCodeEvents, but requires an ordered
+ // collection.
+}
+
+
+static void RunLoopInNewEnv() {
+ bar_ptr = NULL;
+ foo_ptr = NULL;
+
+ v8::HandleScope outer;
+ v8::Persistent<Context> env = Context::New();
+ env->Enter();
+
+ const char* script =
+ "function bar() {"
+ " var sum = 0;"
+ " for (i = 0; i < 100; ++i)"
+ " sum = foo(i);"
+ " return sum;"
+ "}"
+ "function foo(i) { return i * i; }";
+ CompileRun(script);
+ i::Handle<i::JSFunction> bar =
+ i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()->Get(v8_str("bar"))));
+ ASSERT(*bar);
+
+ i::Handle<i::JSFunction> foo =
+ i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()->Get(v8_str("foo"))));
+ ASSERT(*foo);
+
+ bar_ptr = &bar;
+ foo_ptr = &foo;
+
+ v8::Handle<v8::Value> value = CompileRun("bar();");
+ CHECK(value->IsNumber());
+ CHECK_EQ(9801.0, v8::Number::Cast(*value)->Value());
+
+ // Test the optimized codegen path.
+ value = CompileRun("%OptimizeFunctionOnNextCall(foo);"
+ "bar();");
+ CHECK(value->IsNumber());
+ CHECK_EQ(9801.0, v8::Number::Cast(*value)->Value());
+
+ env->Exit();
+}
+
+
+TEST(SetFunctionEntryHook) {
+ i::FLAG_allow_natives_syntax = true;
+
+ // Test setting and resetting the entry hook.
+ // Nulling it should always succeed.
+ CHECK(v8::V8::SetFunctionEntryHook(NULL));
+
+ CHECK(v8::V8::SetFunctionEntryHook(entry_hook));
+ // Setting a hook while one's active should fail.
+ CHECK_EQ(false, v8::V8::SetFunctionEntryHook(entry_hook));
+
+ CHECK(v8::V8::SetFunctionEntryHook(NULL));
+
+ // Reset the entry count to zero and set the entry hook.
+ bar_count = 0;
+ foo_count = 0;
+ CHECK(v8::V8::SetFunctionEntryHook(entry_hook));
+ RunLoopInNewEnv();
+
+ CHECK_EQ(2, bar_count);
+ CHECK_EQ(200, foo_count);
+
+ // Clear the entry hook and count.
+ bar_count = 0;
+ foo_count = 0;
+ v8::V8::SetFunctionEntryHook(NULL);
+
+ // Clear the compilation cache to make sure we don't reuse the
+ // functions from the previous invocation.
+ v8::internal::Isolate::Current()->compilation_cache()->Clear();
+
+ // Verify that entry hooking is now disabled.
+ RunLoopInNewEnv();
+ CHECK_EQ(0u, bar_count);
+ CHECK_EQ(0u, foo_count);
+}
+
+
+static i::HashMap* code_map = NULL;
+static int saw_bar = 0;
+static int move_events = 0;
+
+
+static bool FunctionNameIs(const char* expected,
+ const v8::JitCodeEvent* event) {
+ // Log lines for functions are of the general form:
+ // "LazyCompile:<type><function_name>", where the type is one of
+ // "*", "~" or "".
+ static const char kPreamble[] = "LazyCompile:";
+ static size_t kPreambleLen = sizeof(kPreamble) - 1;
+
+ if (event->name.len < sizeof(kPreamble) - 1 ||
+ strncmp(kPreamble, event->name.str, kPreambleLen) != 0) {
+ return false;
+ }
+
+ const char* tail = event->name.str + kPreambleLen;
+ size_t tail_len = event->name.len - kPreambleLen;
+ size_t expected_len = strlen(expected);
+ if (tail_len == expected_len + 1) {
+ if (*tail == '*' || *tail == '~') {
+ --tail_len;
+ ++tail;
+ } else {
+ return false;
+ }
+ }
+
+ if (tail_len != expected_len)
+ return false;
+
+ return strncmp(tail, expected, expected_len) == 0;
+}
+
+
+static void event_handler(const v8::JitCodeEvent* event) {
+ CHECK(event != NULL);
+ CHECK(code_map != NULL);
+
+ switch (event->type) {
+ case v8::JitCodeEvent::CODE_ADDED: {
+ CHECK(event->code_start != NULL);
+ CHECK_NE(0, static_cast<int>(event->code_len));
+ CHECK(event->name.str != NULL);
+ i::HashMap::Entry* entry =
+ code_map->Lookup(event->code_start,
+ i::ComputePointerHash(event->code_start),
+ true);
+ entry->value = reinterpret_cast<void*>(event->code_len);
+
+ if (FunctionNameIs("bar", event)) {
+ ++saw_bar;
+ }
+ }
+ break;
+
+ case v8::JitCodeEvent::CODE_MOVED: {
+ uint32_t hash = i::ComputePointerHash(event->code_start);
+ // We would like to never see code move that we haven't seen before,
+ // but the code creation event does not happen until the line endings
+ // have been calculated (this is so that we can report the line in the
+ // script at which the function source is found, see
+ // Compiler::RecordFunctionCompilation) and the line endings
+ // calculations can cause a GC, which can move the newly created code
+ // before its existence can be logged.
+ i::HashMap::Entry* entry =
+ code_map->Lookup(event->code_start, hash, false);
+ if (entry != NULL) {
+ ++move_events;
+
+ CHECK_EQ(reinterpret_cast<void*>(event->code_len), entry->value);
+ code_map->Remove(event->code_start, hash);
+
+ entry = code_map->Lookup(event->new_code_start,
+ i::ComputePointerHash(event->new_code_start),
+ true);
+ CHECK(entry != NULL);
+ entry->value = reinterpret_cast<void*>(event->code_len);
+ }
+ }
+ break;
+
+ case v8::JitCodeEvent::CODE_REMOVED:
+ // Object/code removal events are currently not dispatched from the GC.
+ CHECK(false);
+ break;
+ default:
+ // Impossible event.
+ CHECK(false);
+ break;
+ }
+}
+
+
+// Implemented in the test-alloc.cc test suite.
+void SimulateFullSpace(i::PagedSpace* space);
+
+
+static bool MatchPointers(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+TEST(SetJitCodeEventHandler) {
+ const char* script =
+ "function bar() {"
+ " var sum = 0;"
+ " for (i = 0; i < 100; ++i)"
+ " sum = foo(i);"
+ " return sum;"
+ "}"
+ "function foo(i) { return i * i; };"
+ "bar();";
+
+ // Run this test in a new isolate to make sure we don't
+ // have remnants of state from other code.
+ v8::Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
+
+ {
+ i::HashMap code(MatchPointers);
+ code_map = &code;
+
+ saw_bar = 0;
+ move_events = 0;
+
+ i::FLAG_stress_compaction = true;
+ V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
+
+ v8::HandleScope scope;
+ // Generate new code objects sparsely distributed across several
+ // different fragmented code-space pages.
+ const int kIterations = 10;
+ for (int i = 0; i < kIterations; ++i) {
+ LocalContext env;
+
+ v8::Handle<v8::Script> compiled_script;
+ {
+ i::AlwaysAllocateScope always_allocate;
+ SimulateFullSpace(HEAP->code_space());
+ compiled_script = v8_compile(script);
+ }
+ compiled_script->Run();
+
+ // Clear the compilation cache to get more wastage.
+ ISOLATE->compilation_cache()->Clear();
+ }
+
+ // Force code movement.
+ HEAP->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
+
+ CHECK_LE(kIterations, saw_bar);
+ CHECK_NE(0, move_events);
+
+ code_map = NULL;
+ V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ }
+
+ isolate->Exit();
+ isolate->Dispose();
+
+ // Do this in a new isolate.
+ isolate = v8::Isolate::New();
+ isolate->Enter();
+
+ // Verify that we get callbacks for existing code objects when we
+ // request enumeration of existing code.
+ {
+ v8::HandleScope scope;
+ LocalContext env;
+ CompileRun(script);
+
+ // Now get code through initial iteration.
+ i::HashMap code(MatchPointers);
+ code_map = &code;
+
+ V8::SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting, event_handler);
+ V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+
+ code_map = NULL;
+
+ // We expect that we got some events. Note that if we could get code removal
+ // notifications, we could compare two collections, one created by listening
+ // from the time of creation of an isolate, and the other by subscribing
+ // with EnumExisting.
+ CHECK_NE(0, code.occupancy());
+ }
+
+ isolate->Exit();
+ isolate->Dispose();
+}
+
+
static int64_t cast(intptr_t x) { return static_cast<int64_t>(x); }
@@ -12046,7 +12444,7 @@ class RegExpStringModificationTest {
// Inject the input as a global variable.
i::Handle<i::String> input_name =
FACTORY->NewStringFromAscii(i::Vector<const char>("input", 5));
- i::Isolate::Current()->global_context()->global()->SetProperty(
+ i::Isolate::Current()->native_context()->global_object()->SetProperty(
*input_name,
*input_,
NONE,
@@ -13625,6 +14023,41 @@ THREADED_TEST(ExternalArrayInfo) {
}
+void ExternalArrayLimitTestHelper(v8::ExternalArrayType array_type, int size) {
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ last_location = last_message = NULL;
+ obj->SetIndexedPropertiesToExternalArrayData(NULL, array_type, size);
+ CHECK(!obj->HasIndexedPropertiesInExternalArrayData());
+ CHECK_NE(NULL, last_location);
+ CHECK_NE(NULL, last_message);
+}
+
+
+TEST(ExternalArrayLimits) {
+ v8::HandleScope scope;
+ LocalContext context;
+ ExternalArrayLimitTestHelper(v8::kExternalByteArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalByteArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalUnsignedByteArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalUnsignedByteArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalShortArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalShortArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalUnsignedShortArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalUnsignedShortArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalIntArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalIntArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalUnsignedIntArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalUnsignedIntArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalFloatArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalFloatArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalDoubleArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalDoubleArray, 0xffffffff);
+ ExternalArrayLimitTestHelper(v8::kExternalPixelArray, 0x40000000);
+ ExternalArrayLimitTestHelper(v8::kExternalPixelArray, 0xffffffff);
+}
+
+
THREADED_TEST(ScriptContextDependence) {
v8::HandleScope scope;
LocalContext c1;
@@ -14423,6 +14856,7 @@ TEST(Regress528) {
context->Exit();
}
context.Dispose();
+ v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
CompileRun(source_simple);
@@ -14445,6 +14879,7 @@ TEST(Regress528) {
context->Exit();
}
context.Dispose();
+ v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
CompileRun(source_eval);
@@ -14483,6 +14918,7 @@ TEST(Regress528) {
CHECK_EQ(1, GetGlobalObjectsCount());
other_context.Dispose();
+ v8::V8::ContextDisposedNotification();
}
@@ -16125,7 +16561,8 @@ THREADED_TEST(Regress1516) {
CHECK_LE(1, elements);
}
- i::Isolate::Current()->heap()->CollectAllGarbage(true);
+ i::Isolate::Current()->heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
{ i::Object* raw_map_cache = i::Isolate::Current()->context()->map_cache();
if (raw_map_cache != i::Isolate::Current()->heap()->undefined_value()) {
i::MapCache* map_cache = i::MapCache::cast(raw_map_cache);
@@ -16977,50 +17414,24 @@ THREADED_TEST(Regress142088) {
SetterWhichSetsYOnThisTo23);
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- // Turn monomorphic on slow object with native accessor, then just
- // delete the property and fail.
CompileRun("function load(x) { return x.foo; }"
- "function store(x) { x.foo = void 0; }"
- "function keyed_load(x, key) { return x[key]; }"
- // Second version of function has a different source (add void 0)
- // so that it does not share code with the first version. This
- // ensures that the ICs are monomorphic.
- "function load2(x) { void 0; return x.foo; }"
- "function store2(x) { void 0; x.foo = void 0; }"
- "function keyed_load2(x, key) { void 0; return x[key]; }"
-
- "obj.__proto__ = null;"
- "var subobj = {};"
- "subobj.__proto__ = obj;"
+ "var o = Object.create(obj);"
"%OptimizeObjectForAddingMultipleProperties(obj, 1);"
+ "load(o); load(o); load(o); load(o);");
+}
- // Make the ICs monomorphic.
- "load(obj); load(obj);"
- "load2(subobj); load2(subobj);"
- "store(obj);"
- "store2(subobj);"
- "keyed_load(obj, 'foo'); keyed_load(obj, 'foo');"
- "keyed_load2(subobj, 'foo'); keyed_load2(subobj, 'foo');"
- // Delete the accessor. It better not be called any more now.
- "delete obj.foo;"
- "obj.y = void 0;"
- "subobj.y = void 0;"
+THREADED_TEST(Regress137496) {
+ i::FLAG_expose_gc = true;
+ v8::HandleScope scope;
+ LocalContext context;
- "var load_result = load(obj);"
- "var load_result2 = load2(subobj);"
- "var keyed_load_result = keyed_load(obj, 'foo');"
- "var keyed_load_result2 = keyed_load2(subobj, 'foo');"
- "store(obj);"
- "store2(subobj);"
- "var y_from_obj = obj.y;"
- "var y_from_subobj = subobj.y;");
- CHECK(context->Global()->Get(v8_str("load_result"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("load_result2"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("keyed_load_result"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("keyed_load_result2"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("y_from_obj"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("y_from_subobj"))->IsUndefined());
+ // Compile a try-finally clause where the finally block causes a GC
+ // while there still is a message pending for external reporting.
+ TryCatch try_catch;
+ try_catch.SetVerbose(true);
+ CompileRun("try { throw new Error(); } finally { gc(); }");
+ CHECK(try_catch.HasCaught());
}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index ecbf956916..cdab1b95ce 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -642,8 +642,8 @@ TEST(8) {
// single precision values around in memory.
Assembler assm(Isolate::Current(), NULL, 0);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -753,8 +753,8 @@ TEST(9) {
// single precision values around in memory.
Assembler assm(Isolate::Current(), NULL, 0);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -868,8 +868,8 @@ TEST(10) {
// single precision values around in memory.
Assembler assm(Isolate::Current(), NULL, 0);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 80c7fdff78..c72f87ec3d 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -39,8 +39,10 @@ TEST(List) {
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- AstNodeFactory<AstNullVisitor> factory(Isolate::Current());
+ Isolate* isolate = Isolate::Current();
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+ AstNodeFactory<AstNullVisitor> factory(isolate, zone);
AstNode* node = factory.NewEmptyStatement();
list->Add(node);
CHECK_EQ(1, list->length());
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 9ca0b0a170..961c94bff0 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -101,14 +101,14 @@ static void InitializeVM() {
static MaybeObject* GetGlobalProperty(const char* name) {
Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
- return Isolate::Current()->context()->global()->GetProperty(*symbol);
+ return Isolate::Current()->context()->global_object()->GetProperty(*symbol);
}
static void SetGlobalProperty(const char* name, Object* value) {
Handle<Object> object(value);
Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
SetProperty(global, symbol, object, NONE, kNonStrictMode);
}
@@ -120,12 +120,13 @@ static Handle<JSFunction> Compile(const char* source) {
Handle<String>(),
0,
0,
+ Handle<Context>(Isolate::Current()->native_context()),
NULL,
NULL,
Handle<String>::null(),
NOT_NATIVES_CODE);
return FACTORY->NewFunctionFromSharedFunctionInfo(shared_function,
- Isolate::Current()->global_context());
+ Isolate::Current()->native_context());
}
@@ -138,7 +139,7 @@ static double Inc(int x) {
if (fun.is_null()) return -1;
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
@@ -159,7 +160,7 @@ static double Add(int x, int y) {
SetGlobalProperty("x", Smi::FromInt(x));
SetGlobalProperty("y", Smi::FromInt(y));
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
@@ -179,7 +180,7 @@ static double Abs(int x) {
SetGlobalProperty("x", Smi::FromInt(x));
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
@@ -200,7 +201,7 @@ static double Sum(int n) {
SetGlobalProperty("n", Smi::FromInt(n));
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
return GetGlobalProperty("result")->ToObjectChecked()->Number();
@@ -221,7 +222,7 @@ TEST(Print) {
Handle<JSFunction> fun = Compile(source);
if (fun.is_null()) return;
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
}
@@ -254,7 +255,7 @@ TEST(Stuff) {
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
CHECK_EQ(511.0, GetGlobalProperty("r")->ToObjectChecked()->Number());
@@ -269,7 +270,7 @@ TEST(UncaughtThrow) {
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(has_pending_exception);
CHECK_EQ(42.0, Isolate::Current()->pending_exception()->
@@ -294,12 +295,12 @@ TEST(C2JSFrames) {
// Run the generated code to populate the global object with 'foo'.
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global());
+ Handle<JSObject> global(Isolate::Current()->context()->global_object());
Execution::Call(fun0, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
Object* foo_symbol = FACTORY->LookupAsciiSymbol("foo")->ToObjectChecked();
- MaybeObject* fun1_object = Isolate::Current()->context()->global()->
+ MaybeObject* fun1_object = Isolate::Current()->context()->global_object()->
GetProperty(String::cast(foo_symbol));
Handle<Object> fun1(fun1_object->ToObjectChecked());
CHECK(fun1->IsJSFunction());
@@ -352,6 +353,38 @@ TEST(GetScriptLineNumber) {
}
+// Test that optimized code for different closures is actually shared
+// immediately by the FastNewClosureStub when run in the same context.
+TEST(OptimizedCodeSharing) {
+ // Skip test if --cache-optimized-code is not activated by default because
+ // FastNewClosureStub that is baked into the snapshot is incorrect.
+ if (!FLAG_cache_optimized_code) return;
+ FLAG_allow_natives_syntax = true;
+ InitializeVM();
+ v8::HandleScope scope;
+ for (int i = 0; i < 10; i++) {
+ LocalContext env;
+ env->Global()->Set(v8::String::New("x"), v8::Integer::New(i));
+ CompileRun("function MakeClosure() {"
+ " return function() { return x; };"
+ "}"
+ "var closure0 = MakeClosure();"
+ "%DebugPrint(closure0());"
+ "%OptimizeFunctionOnNextCall(closure0);"
+ "%DebugPrint(closure0());"
+ "var closure1 = MakeClosure();"
+ "var closure2 = MakeClosure();");
+ Handle<JSFunction> fun1 = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure1"))));
+ Handle<JSFunction> fun2 = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure2"))));
+ CHECK(fun1->IsOptimized() || !fun1->IsOptimizable());
+ CHECK(fun2->IsOptimized() || !fun2->IsOptimizable());
+ CHECK_EQ(fun1->code(), fun2->code());
+ }
+}
+
+
#ifdef ENABLE_DISASSEMBLER
static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
const char* property_name) {
@@ -374,15 +407,16 @@ static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
Address end = pc + decode_size;
v8::internal::EmbeddedVector<char, 128> decode_buffer;
+ v8::internal::EmbeddedVector<char, 128> smi_hex_buffer;
+ Smi* smi = Smi::FromInt(12345678);
+ OS::SNPrintF(smi_hex_buffer, "0x%lx", reinterpret_cast<intptr_t>(smi));
while (pc < end) {
int num_const = d.ConstantPoolSizeAt(pc);
if (num_const >= 0) {
pc += (num_const + 1) * kPointerSize;
} else {
pc += d.InstructionDecode(decode_buffer, pc);
- CHECK(strstr(decode_buffer.start(), "mov eax,0x178c29c") == NULL);
- CHECK(strstr(decode_buffer.start(), "push 0x178c29c") == NULL);
- CHECK(strstr(decode_buffer.start(), "0x178c29c") == NULL);
+ CHECK(strstr(decode_buffer.start(), smi_hex_buffer.start()) == NULL);
}
}
}
diff --git a/deps/v8/test/cctest/test-dataflow.cc b/deps/v8/test/cctest/test-dataflow.cc
index 005d440d13..ae3327965f 100644
--- a/deps/v8/test/cctest/test-dataflow.cc
+++ b/deps/v8/test/cctest/test-dataflow.cc
@@ -36,8 +36,8 @@ using namespace v8::internal;
TEST(BitVector) {
v8::internal::V8::Initialize(NULL);
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ Zone* zone = Isolate::Current()->runtime_zone();
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
{
BitVector v(15, zone);
v.Add(1);
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 166b762913..234b6df722 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -197,10 +197,9 @@ static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
// number.
static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
static int break_point = 0;
- Handle<v8::internal::SharedFunctionInfo> shared(fun->shared());
v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
debug->SetBreakPoint(
- shared,
+ fun,
Handle<Object>(v8::internal::Smi::FromInt(++break_point)),
&position);
return break_point;
@@ -515,7 +514,7 @@ void CheckDebugBreakFunction(DebugLocalContext* env,
// there
ClearBreakPoint(bp);
CHECK(!debug->HasDebugInfo(shared));
- CHECK(debug->EnsureDebugInfo(shared));
+ CHECK(debug->EnsureDebugInfo(shared, fun));
TestBreakLocationIterator it2(Debug::GetDebugInfo(shared));
it2.FindBreakLocationFromPosition(position);
actual_mode = it2.it()->rinfo()->rmode();
@@ -4274,9 +4273,9 @@ TEST(InterceptorPropertyMirror) {
"named_values[%d] instanceof debug.PropertyMirror", i);
CHECK(CompileRun(buffer.start())->BooleanValue());
- // 5 is PropertyType.Interceptor
OS::SNPrintF(buffer, "named_values[%d].propertyType()", i);
- CHECK_EQ(5, CompileRun(buffer.start())->Int32Value());
+ CHECK_EQ(v8::internal::INTERCEPTOR,
+ CompileRun(buffer.start())->Int32Value());
OS::SNPrintF(buffer, "named_values[%d].isNative()", i);
CHECK(CompileRun(buffer.start())->BooleanValue());
@@ -7393,4 +7392,51 @@ TEST(Regress131642) {
v8::Debug::SetDebugEventListener(NULL);
}
+
+// Import from test-heap.cc
+int CountNativeContexts();
+
+
+static void NopListener(v8::DebugEvent event,
+ v8::Handle<v8::Object> exec_state,
+ v8::Handle<v8::Object> event_data,
+ v8::Handle<v8::Value> data) {
+}
+
+
+TEST(DebuggerCreatesContextIffActive) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ CHECK_EQ(1, CountNativeContexts());
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CompileRun("debugger;");
+ CHECK_EQ(1, CountNativeContexts());
+
+ v8::Debug::SetDebugEventListener(NopListener);
+ CompileRun("debugger;");
+ CHECK_EQ(2, CountNativeContexts());
+
+ v8::Debug::SetDebugEventListener(NULL);
+}
+
+
+TEST(LiveEditEnabled) {
+ v8::internal::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Debug::SetLiveEditEnabled(true);
+ CompileRun("%LiveEditCompareStrings('', '')");
+}
+
+
+TEST(LiveEditDisabled) {
+ v8::internal::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Debug::SetLiveEditEnabled(false);
+ CompileRun("%LiveEditCompareStrings('', '')");
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index e6bdc9f505..6fc601213c 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -37,7 +37,8 @@ using namespace v8;
enum Expectations {
EXPECT_RESULT,
- EXPECT_EXCEPTION
+ EXPECT_EXCEPTION,
+ EXPECT_ERROR
};
@@ -72,6 +73,10 @@ class DeclarationContext {
void InitializeIfNeeded();
+ // Perform optional initialization steps on the context after it has
+ // been created. Defaults to none but may be overwritten.
+ virtual void PostInitializeContext(Handle<Context> context) {}
+
// Get the holder for the interceptor. Default to the instance template
// but may be overwritten.
virtual Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) {
@@ -91,7 +96,6 @@ class DeclarationContext {
private:
bool is_initialized_;
Persistent<Context> context_;
- Local<String> property_;
int get_count_;
int set_count_;
@@ -120,6 +124,7 @@ void DeclarationContext::InitializeIfNeeded() {
context_ = Context::New(0, function->InstanceTemplate(), Local<Value>());
context_->Enter();
is_initialized_ = true;
+ PostInitializeContext(context_);
}
@@ -134,7 +139,13 @@ void DeclarationContext::Check(const char* source,
HandleScope scope;
TryCatch catcher;
catcher.SetVerbose(true);
- Local<Value> result = Script::Compile(String::New(source))->Run();
+ Local<Script> script = Script::Compile(String::New(source));
+ if (expectations == EXPECT_ERROR) {
+ CHECK(script.IsEmpty());
+ return;
+ }
+ CHECK(!script.IsEmpty());
+ Local<Value> result = script->Run();
CHECK_EQ(get, get_count());
CHECK_EQ(set, set_count());
CHECK_EQ(query, query_count());
@@ -536,9 +547,9 @@ TEST(ExistsInPrototype) {
{ ExistsInPrototypeContext context;
context.Check("var x; x",
- 0, // get
0,
- 0, // declaration
+ 0,
+ 0,
EXPECT_RESULT, Undefined());
}
@@ -546,7 +557,7 @@ TEST(ExistsInPrototype) {
context.Check("var x = 0; x",
0,
0,
- 0, // declaration
+ 0,
EXPECT_RESULT, Number::New(0));
}
@@ -554,7 +565,7 @@ TEST(ExistsInPrototype) {
context.Check("const x; x",
0,
0,
- 0, // declaration
+ 0,
EXPECT_RESULT, Undefined());
}
@@ -562,7 +573,7 @@ TEST(ExistsInPrototype) {
context.Check("const x = 0; x",
0,
0,
- 0, // declaration
+ 0,
EXPECT_RESULT, Number::New(0));
}
}
@@ -591,7 +602,305 @@ TEST(AbsentInPrototype) {
context.Check("if (false) { var x = 0; }; x",
0,
0,
- 0, // declaration
+ 0,
EXPECT_RESULT, Undefined());
}
}
+
+
+
+class ExistsInHiddenPrototypeContext: public DeclarationContext {
+ public:
+ ExistsInHiddenPrototypeContext() {
+ hidden_proto_ = FunctionTemplate::New();
+ hidden_proto_->SetHiddenPrototype(true);
+ }
+
+ protected:
+ virtual v8::Handle<Integer> Query(Local<String> key) {
+ // Let it seem that the property exists in the hidden prototype object.
+ return Integer::New(v8::None);
+ }
+
+ // Install the hidden prototype after the global object has been created.
+ virtual void PostInitializeContext(Handle<Context> context) {
+ Local<Object> global_object = context->Global();
+ Local<Object> hidden_proto = hidden_proto_->GetFunction()->NewInstance();
+ context->DetachGlobal();
+ context->Global()->SetPrototype(hidden_proto);
+ context->ReattachGlobal(global_object);
+ }
+
+ // Use the hidden prototype as the holder for the interceptors.
+ virtual Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) {
+ return hidden_proto_->InstanceTemplate();
+ }
+
+ private:
+ Local<FunctionTemplate> hidden_proto_;
+};
+
+
+TEST(ExistsInHiddenPrototype) {
+ i::FLAG_es52_globals = true;
+ HandleScope scope;
+
+ { ExistsInHiddenPrototypeContext context;
+ context.Check("var x; x",
+ 1, // access
+ 0,
+ 2, // declaration + initialization
+ EXPECT_EXCEPTION); // x is not defined!
+ }
+
+ { ExistsInHiddenPrototypeContext context;
+ context.Check("var x = 0; x",
+ 1, // access
+ 1, // initialization
+ 2, // declaration + initialization
+ EXPECT_RESULT, Number::New(0));
+ }
+
+ { ExistsInHiddenPrototypeContext context;
+ context.Check("function x() { }; x",
+ 0,
+ 0,
+ 0,
+ EXPECT_RESULT);
+ }
+
+ // TODO(mstarzinger): The semantics of global const is vague.
+ { ExistsInHiddenPrototypeContext context;
+ context.Check("const x; x",
+ 0,
+ 0,
+ 1, // (re-)declaration
+ EXPECT_RESULT, Undefined());
+ }
+
+ // TODO(mstarzinger): The semantics of global const is vague.
+ { ExistsInHiddenPrototypeContext context;
+ context.Check("const x = 0; x",
+ 0,
+ 0,
+ 1, // (re-)declaration
+ EXPECT_RESULT, Number::New(0));
+ }
+}
+
+
+
+class SimpleContext {
+ public:
+ SimpleContext() {
+ context_ = Context::New(0);
+ context_->Enter();
+ }
+
+ virtual ~SimpleContext() {
+ context_->Exit();
+ context_.Dispose();
+ }
+
+ void Check(const char* source,
+ Expectations expectations,
+ v8::Handle<Value> value = Local<Value>()) {
+ HandleScope scope;
+ TryCatch catcher;
+ catcher.SetVerbose(true);
+ Local<Script> script = Script::Compile(String::New(source));
+ if (expectations == EXPECT_ERROR) {
+ CHECK(script.IsEmpty());
+ return;
+ }
+ CHECK(!script.IsEmpty());
+ Local<Value> result = script->Run();
+ if (expectations == EXPECT_RESULT) {
+ CHECK(!catcher.HasCaught());
+ if (!value.IsEmpty()) {
+ CHECK_EQ(value, result);
+ }
+ } else {
+ CHECK(expectations == EXPECT_EXCEPTION);
+ CHECK(catcher.HasCaught());
+ if (!value.IsEmpty()) {
+ CHECK_EQ(value, catcher.Exception());
+ }
+ }
+ }
+
+ private:
+ Persistent<Context> context_;
+};
+
+
+TEST(MultiScriptConflicts) {
+ HandleScope scope;
+
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ context.Check("const x = 3; x",
+ EXPECT_RESULT, Number::New(3));
+ context.Check("const x = 4; x",
+ EXPECT_RESULT, Number::New(4));
+ context.Check("x = 5; x",
+ EXPECT_RESULT, Number::New(5));
+ context.Check("var x = 6; x",
+ EXPECT_RESULT, Number::New(6));
+ context.Check("this.x",
+ EXPECT_RESULT, Number::New(6));
+ context.Check("function x() { return 7 }; x()",
+ EXPECT_RESULT, Number::New(7));
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 2; x", // assignment ignored
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 3; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("x = 4; x", // assignment ignored
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 5; x", // assignment ignored
+ EXPECT_RESULT, Number::New(1));
+ context.Check("this.x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("function x() { return 7 }; x",
+ EXPECT_EXCEPTION);
+ }
+
+ i::FLAG_use_strict = true;
+ i::FLAG_harmony_scoping = true;
+
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("this.x",
+ EXPECT_RESULT, Number::New(1));
+ }
+
+ { SimpleContext context;
+ context.Check("function x() { return 4 }; x()",
+ EXPECT_RESULT, Number::New(4));
+ context.Check("x()",
+ EXPECT_RESULT, Number::New(4));
+ context.Check("this.x()",
+ EXPECT_RESULT, Number::New(4));
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ context.Check("x",
+ EXPECT_RESULT, Number::New(2));
+ // TODO(rossberg): The current ES6 draft spec does not reflect lexical
+ // bindings on the global object. However, this will probably change, in
+ // which case we reactivate the following test.
+ // context.Check("this.x",
+ // EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 3; x",
+ EXPECT_RESULT, Number::New(3));
+ context.Check("x",
+ EXPECT_RESULT, Number::New(3));
+ // TODO(rossberg): The current ES6 draft spec does not reflect lexical
+ // bindings on the global object. However, this will probably change, in
+ // which case we reactivate the following test.
+ // context.Check("this.x",
+ // EXPECT_RESULT, Number::New(3));
+ }
+
+ // TODO(rossberg): All of the below should actually be errors in Harmony.
+
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("function x() { return 1 }; x()",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("function x() { return 1 }; x()",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("function x() { return 2 }; x()",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("function x() { return 2 }; x()",
+ EXPECT_ERROR);
+ }
+}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 793e228a97..00e38333fc 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -48,24 +48,24 @@ TEST(ObjectHashTable) {
table = PutIntoObjectHashTable(table, a, b);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(*a), *b);
- CHECK_EQ(table->Lookup(*b), HEAP->undefined_value());
+ CHECK_EQ(table->Lookup(*b), HEAP->the_hole_value());
// Keys still have to be valid after objects were moved.
HEAP->CollectGarbage(NEW_SPACE);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(*a), *b);
- CHECK_EQ(table->Lookup(*b), HEAP->undefined_value());
+ CHECK_EQ(table->Lookup(*b), HEAP->the_hole_value());
// Keys that are overwritten should not change number of elements.
table = PutIntoObjectHashTable(table, a, FACTORY->NewJSArray(13));
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_NE(table->Lookup(*a), *b);
- // Keys mapped to undefined should be removed permanently.
- table = PutIntoObjectHashTable(table, a, FACTORY->undefined_value());
+ // Keys mapped to the hole should be removed permanently.
+ table = PutIntoObjectHashTable(table, a, FACTORY->the_hole_value());
CHECK_EQ(table->NumberOfElements(), 0);
CHECK_EQ(table->NumberOfDeletedElements(), 1);
- CHECK_EQ(table->Lookup(*a), HEAP->undefined_value());
+ CHECK_EQ(table->Lookup(*a), HEAP->the_hole_value());
// Keys should map back to their respective values and also should get
// an identity hash code generated.
@@ -85,7 +85,7 @@ TEST(ObjectHashTable) {
Handle<JSObject> key = FACTORY->NewJSArray(7);
CHECK(key->GetIdentityHash(ALLOW_CREATION)->ToObjectChecked()->IsSmi());
CHECK_EQ(table->FindEntry(*key), ObjectHashTable::kNotFound);
- CHECK_EQ(table->Lookup(*key), HEAP->undefined_value());
+ CHECK_EQ(table->Lookup(*key), HEAP->the_hole_value());
CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
}
@@ -93,7 +93,7 @@ TEST(ObjectHashTable) {
// should not get an identity hash code generated.
for (int i = 0; i < 100; i++) {
Handle<JSObject> key = FACTORY->NewJSArray(7);
- CHECK_EQ(table->Lookup(*key), HEAP->undefined_value());
+ CHECK_EQ(table->Lookup(*key), HEAP->the_hole_value());
CHECK_EQ(key->GetIdentityHash(OMIT_CREATION), HEAP->undefined_value());
}
}
@@ -105,6 +105,12 @@ TEST(ObjectHashSetCausesGC) {
LocalContext context;
Handle<ObjectHashSet> table = FACTORY->NewObjectHashSet(1);
Handle<JSObject> key = FACTORY->NewJSArray(0);
+ v8::Handle<v8::Object> key_obj = v8::Utils::ToLocal(key);
+
+ // Force allocation of hash table backing store for hidden properties.
+ key_obj->SetHiddenValue(v8_str("key 1"), v8_str("val 1"));
+ key_obj->SetHiddenValue(v8_str("key 2"), v8_str("val 2"));
+ key_obj->SetHiddenValue(v8_str("key 3"), v8_str("val 3"));
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
@@ -128,13 +134,19 @@ TEST(ObjectHashTableCausesGC) {
LocalContext context;
Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(1);
Handle<JSObject> key = FACTORY->NewJSArray(0);
+ v8::Handle<v8::Object> key_obj = v8::Utils::ToLocal(key);
+
+ // Force allocation of hash table backing store for hidden properties.
+ key_obj->SetHiddenValue(v8_str("key 1"), v8_str("val 1"));
+ key_obj->SetHiddenValue(v8_str("key 2"), v8_str("val 2"));
+ key_obj->SetHiddenValue(v8_str("key 3"), v8_str("val 3"));
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
FLAG_gc_interval = 0;
// Calling Lookup() should not cause GC ever.
- CHECK(table->Lookup(*key)->IsUndefined());
+ CHECK(table->Lookup(*key)->IsTheHole());
// Calling Put() should request GC by returning a failure.
CHECK(table->Put(*key, *key)->IsRetryAfterGC());
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 32f1264f7f..9cb12c4787 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -159,7 +159,7 @@ TEST(Flags6) {
CHECK_EQ(3, FlagList::SetFlagsFromCommandLine(&argc,
const_cast<char **>(argv),
true));
- CHECK_EQ(4, argc);
+ CHECK_EQ(2, argc);
}
@@ -232,3 +232,16 @@ TEST(FlagsJSArguments4) {
CHECK_EQ(0, FLAG_js_arguments.argc());
}
+
+TEST(FlagsRemoveIncomplete) {
+ // Test that processed command line arguments are removed, even
+ // if the list of arguments ends unexpectedly.
+ SetFlagsToDefault();
+ int argc = 3;
+ const char* argv[] = { "", "--crankshaft", "--expose-debug-as" };
+ CHECK_EQ(2, FlagList::SetFlagsFromCommandLine(&argc,
+ const_cast<char **>(argv),
+ true));
+ CHECK_NE(NULL, argv[1]);
+ CHECK_EQ(argc, 2);
+}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 762cc9f0fa..cda6aa005f 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "debug.h"
#include "runtime.h"
#include "cctest.h"
@@ -87,10 +88,10 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
#ifdef ENABLE_DEBUGGER_SUPPORT
// Obtain SharedFunctionInfo for the function.
+ Isolate::Current()->debug()->PrepareForBreakPoints();
Object* shared_func_info_ptr =
- Runtime::FindSharedFunctionInfoInScript(Isolate::Current(),
- i_script,
- func_pos);
+ Isolate::Current()->debug()->FindSharedFunctionInfoInScript(i_script,
+ func_pos);
CHECK(shared_func_info_ptr != HEAP->undefined_value());
Handle<SharedFunctionInfo> shared_func_info(
SharedFunctionInfo::cast(shared_func_info_ptr));
@@ -398,7 +399,9 @@ TEST(AssignmentAndCall) {
// The inferred name is empty, because this is an assignment of a result.
CheckFunctionName(script, "return 1", "");
// See MultipleAssignments test.
- CheckFunctionName(script, "return 2", "Enclosing.Bar");
+ // TODO(2276): Lazy compiling the enclosing outer closure would yield
+ // in "Enclosing.Bar" being the inferred name here.
+ CheckFunctionName(script, "return 2", "Bar");
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 9d2755ddc8..1004104dd9 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -714,9 +714,9 @@ TEST(HeapSnapshotObjectsStats) {
LocalContext env;
v8::HeapProfiler::StartHeapObjectsTracking();
- // We have to call GC 5 times. In other case the garbage will be
+ // We have to call GC 6 times. In other case the garbage will be
// the reason of flakiness.
- for (int i = 0; i < 5; ++i) {
+ for (int i = 0; i < 6; ++i) {
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -1449,6 +1449,36 @@ TEST(FastCaseGetter) {
CHECK_NE(NULL, setterFunction);
}
+TEST(HiddenPropertiesFastCase) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun(
+ "function C(x) { this.a = this; this.b = x; }\n"
+ "c = new C(2012);\n");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("HiddenPropertiesFastCase1"));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* c =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
+ CHECK_NE(NULL, c);
+ const v8::HeapGraphNode* hidden_props =
+ GetProperty(c, v8::HeapGraphEdge::kInternal, "hidden_properties");
+ CHECK_EQ(NULL, hidden_props);
+
+ v8::Handle<v8::Value> cHandle = env->Global()->Get(v8::String::New("c"));
+ CHECK(!cHandle.IsEmpty() && cHandle->IsObject());
+ cHandle->ToObject()->SetHiddenValue(v8_str("key"), v8_str("val"));
+
+ snapshot = v8::HeapProfiler::TakeSnapshot(
+ v8_str("HiddenPropertiesFastCase2"));
+ global = GetGlobalObject(snapshot);
+ c = GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
+ CHECK_NE(NULL, c);
+ hidden_props = GetProperty(c, v8::HeapGraphEdge::kInternal,
+ "hidden_properties");
+ CHECK_NE(NULL, hidden_props);
+}
bool HasWeakEdge(const v8::HeapGraphNode* node) {
for (int i = 0; i < node->GetChildrenCount(); ++i) {
@@ -1491,7 +1521,7 @@ TEST(WeakGlobalHandle) {
}
-TEST(WeakGlobalContextRefs) {
+TEST(WeakNativeContextRefs) {
v8::HandleScope scope;
LocalContext env;
@@ -1503,10 +1533,10 @@ TEST(WeakGlobalContextRefs) {
const v8::HeapGraphNode* global_handles = GetNode(
gc_roots, v8::HeapGraphNode::kObject, "(Global handles)");
CHECK_NE(NULL, global_handles);
- const v8::HeapGraphNode* global_context = GetNode(
- global_handles, v8::HeapGraphNode::kHidden, "system / GlobalContext");
- CHECK_NE(NULL, global_context);
- CHECK(HasWeakEdge(global_context));
+ const v8::HeapGraphNode* native_context = GetNode(
+ global_handles, v8::HeapGraphNode::kHidden, "system / NativeContext");
+ CHECK_NE(NULL, native_context);
+ CHECK(HasWeakEdge(native_context));
}
@@ -1529,6 +1559,7 @@ TEST(SfiAndJsFunctionWeakRefs) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
TEST(NoDebugObjectInSnapshot) {
v8::HandleScope scope;
LocalContext env;
@@ -1551,6 +1582,7 @@ TEST(NoDebugObjectInSnapshot) {
}
CHECK_EQ(1, globals_count);
}
+#endif // ENABLE_DEBUGGER_SUPPORT
TEST(PersistentHandleCount) {
@@ -1626,3 +1658,25 @@ TEST(NoRefsToNonEssentialEntries) {
GetProperty(global_object, v8::HeapGraphEdge::kInternal, "elements");
CHECK_EQ(NULL, elements);
}
+
+
+TEST(MapHasDescriptorsAndTransitions) {
+ v8::HandleScope scope;
+ LocalContext env;
+ CompileRun("obj = { a: 10 };\n");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* global_object =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "obj");
+ CHECK_NE(NULL, global_object);
+ const v8::HeapGraphNode* map =
+ GetProperty(global_object, v8::HeapGraphEdge::kInternal, "map");
+ CHECK_NE(NULL, map);
+ const v8::HeapGraphNode* descriptors =
+ GetProperty(map, v8::HeapGraphEdge::kInternal, "descriptors");
+ CHECK_NE(NULL, descriptors);
+ const v8::HeapGraphNode* transitions =
+ GetProperty(map, v8::HeapGraphEdge::kInternal, "transitions");
+ CHECK_NE(NULL, transitions);
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index f8f20ab3cc..4b765637f5 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -157,7 +157,8 @@ TEST(HeapObjects) {
String* object_symbol = String::cast(HEAP->Object_symbol());
CHECK(
- Isolate::Current()->context()->global()->HasLocalProperty(object_symbol));
+ Isolate::Current()->context()->global_object()->HasLocalProperty(
+ object_symbol));
// Check ToString for oddballs
CheckOddball(HEAP->true_value(), "true");
@@ -213,7 +214,7 @@ TEST(GarbageCollection) {
Handle<Map> initial_map =
FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Isolate::Current()->context()->global()->SetProperty(
+ Isolate::Current()->context()->global_object()->SetProperty(
*name, *function, NONE, kNonStrictMode)->ToObjectChecked();
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = FACTORY->NewJSObject(function);
@@ -229,9 +230,10 @@ TEST(GarbageCollection) {
HEAP->CollectGarbage(NEW_SPACE);
// Function should be alive.
- CHECK(Isolate::Current()->context()->global()->HasLocalProperty(*name));
+ CHECK(Isolate::Current()->context()->global_object()->
+ HasLocalProperty(*name));
// Check function is retained.
- Object* func_value = Isolate::Current()->context()->global()->
+ Object* func_value = Isolate::Current()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
@@ -240,7 +242,7 @@ TEST(GarbageCollection) {
HandleScope inner_scope;
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = FACTORY->NewJSObject(function);
- Isolate::Current()->context()->global()->SetProperty(
+ Isolate::Current()->context()->global_object()->SetProperty(
*obj_name, *obj, NONE, kNonStrictMode)->ToObjectChecked();
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
@@ -249,10 +251,11 @@ TEST(GarbageCollection) {
// After gc, it should survive.
HEAP->CollectGarbage(NEW_SPACE);
- CHECK(Isolate::Current()->context()->global()->HasLocalProperty(*obj_name));
- CHECK(Isolate::Current()->context()->global()->
+ CHECK(Isolate::Current()->context()->global_object()->
+ HasLocalProperty(*obj_name));
+ CHECK(Isolate::Current()->context()->global_object()->
GetProperty(*obj_name)->ToObjectChecked()->IsJSObject());
- Object* obj = Isolate::Current()->context()->global()->
+ Object* obj = Isolate::Current()->context()->global_object()->
GetProperty(*obj_name)->ToObjectChecked();
JSObject* js_obj = JSObject::cast(obj);
CHECK_EQ(Smi::FromInt(23), js_obj->GetProperty(*prop_name));
@@ -415,6 +418,7 @@ TEST(WeakGlobalHandlesMark) {
global_handles->Destroy(h1.location());
}
+
TEST(DeleteWeakGlobalHandle) {
InitializeVM();
GlobalHandles* global_handles = Isolate::Current()->global_handles();
@@ -445,6 +449,7 @@ TEST(DeleteWeakGlobalHandle) {
CHECK(WeakPointerCleared);
}
+
static const char* not_so_random_string_table[] = {
"abstract",
"boolean",
@@ -561,7 +566,7 @@ TEST(ObjectProperties) {
v8::HandleScope sc;
String* object_symbol = String::cast(HEAP->Object_symbol());
- Object* raw_object = Isolate::Current()->context()->global()->
+ Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(object_symbol)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
@@ -658,7 +663,7 @@ TEST(JSArray) {
v8::HandleScope sc;
Handle<String> name = FACTORY->LookupAsciiSymbol("Array");
- Object* raw_object = Isolate::Current()->context()->global()->
+ Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
Handle<JSFunction> function = Handle<JSFunction>(
JSFunction::cast(raw_object));
@@ -705,7 +710,7 @@ TEST(JSObjectCopy) {
v8::HandleScope sc;
String* object_symbol = String::cast(HEAP->Object_symbol());
- Object* raw_object = Isolate::Current()->context()->global()->
+ Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(object_symbol)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
@@ -874,7 +879,7 @@ TEST(Regression39128) {
// Step 1: prepare a map for the object. We add 1 inobject property to it.
Handle<JSFunction> object_ctor(
- Isolate::Current()->global_context()->object_function());
+ Isolate::Current()->native_context()->object_function());
CHECK(object_ctor->has_initial_map());
Handle<Map> object_map(object_ctor->initial_map());
// Create a map with single inobject property.
@@ -954,7 +959,7 @@ TEST(TestCodeFlushing) {
}
// Check function is compiled.
- Object* func_value = Isolate::Current()->context()->global()->
+ Object* func_value = Isolate::Current()->context()->global_object()->
GetProperty(*foo_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
@@ -983,10 +988,10 @@ TEST(TestCodeFlushing) {
}
-// Count the number of global contexts in the weak list of global contexts.
-static int CountGlobalContexts() {
+// Count the number of native contexts in the weak list of native contexts.
+int CountNativeContexts() {
int count = 0;
- Object* object = HEAP->global_contexts_list();
+ Object* object = HEAP->native_contexts_list();
while (!object->IsUndefined()) {
count++;
object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
@@ -996,7 +1001,7 @@ static int CountGlobalContexts() {
// Count the number of user functions in the weak list of optimized
-// functions attached to a global context.
+// functions attached to a native context.
static int CountOptimizedUserFunctions(v8::Handle<v8::Context> context) {
int count = 0;
Handle<Context> icontext = v8::Utils::OpenHandle(*context);
@@ -1017,7 +1022,7 @@ TEST(TestInternalWeakLists) {
v8::HandleScope scope;
v8::Persistent<v8::Context> ctx[kNumTestContexts];
- CHECK_EQ(0, CountGlobalContexts());
+ CHECK_EQ(0, CountNativeContexts());
// Create a number of global contests which gets linked together.
for (int i = 0; i < kNumTestContexts; i++) {
@@ -1025,7 +1030,7 @@ TEST(TestInternalWeakLists) {
bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
- CHECK_EQ(i + 1, CountGlobalContexts());
+ CHECK_EQ(i + 1, CountNativeContexts());
ctx[i]->Enter();
@@ -1085,7 +1090,7 @@ TEST(TestInternalWeakLists) {
// Force compilation cache cleanup.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- // Dispose the global contexts one by one.
+ // Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i].Dispose();
ctx[i].Clear();
@@ -1093,23 +1098,23 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
HEAP->PerformScavenge();
- CHECK_EQ(kNumTestContexts - i, CountGlobalContexts());
+ CHECK_EQ(kNumTestContexts - i, CountNativeContexts());
}
// Mark compact handles the weak references.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
+ CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts());
}
- CHECK_EQ(0, CountGlobalContexts());
+ CHECK_EQ(0, CountNativeContexts());
}
-// Count the number of global contexts in the weak list of global contexts
+// Count the number of native contexts in the weak list of native contexts
// causing a GC after the specified number of elements.
-static int CountGlobalContextsWithGC(int n) {
+static int CountNativeContextsWithGC(int n) {
int count = 0;
- Handle<Object> object(HEAP->global_contexts_list());
+ Handle<Object> object(HEAP->native_contexts_list());
while (!object->IsUndefined()) {
count++;
if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
@@ -1121,7 +1126,7 @@ static int CountGlobalContextsWithGC(int n) {
// Count the number of user functions in the weak list of optimized
-// functions attached to a global context causing a GC after the
+// functions attached to a native context causing a GC after the
// specified number of elements.
static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
int n) {
@@ -1147,14 +1152,14 @@ TEST(TestInternalWeakListsTraverseWithGC) {
v8::HandleScope scope;
v8::Persistent<v8::Context> ctx[kNumTestContexts];
- CHECK_EQ(0, CountGlobalContexts());
+ CHECK_EQ(0, CountNativeContexts());
// Create an number of contexts and check the length of the weak list both
// with and without GCs while iterating the list.
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i] = v8::Context::New();
- CHECK_EQ(i + 1, CountGlobalContexts());
- CHECK_EQ(i + 1, CountGlobalContextsWithGC(i / 2 + 1));
+ CHECK_EQ(i + 1, CountNativeContexts());
+ CHECK_EQ(i + 1, CountNativeContextsWithGC(i / 2 + 1));
}
bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
@@ -1198,6 +1203,7 @@ TEST(TestSizeOfObjects) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(HEAP->old_pointer_space()->IsSweepingComplete());
int initial_size = static_cast<int>(HEAP->SizeOfObjects());
@@ -1267,6 +1273,7 @@ static void FillUpNewSpace(NewSpace* new_space) {
// that the scavenger does not undo the filling.
v8::HandleScope scope;
AlwaysAllocateScope always_allocate;
+ LinearAllocationScope allocate_linearly;
intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
@@ -1279,7 +1286,8 @@ TEST(GrowAndShrinkNewSpace) {
InitializeVM();
NewSpace* new_space = HEAP->new_space();
- if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
+ if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize() ||
+ HEAP->MaxSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
// The max size cannot exceed the reserved size, since semispaces must be
// always within the reserved space. We can't test new space growing and
// shrinking if the reserved size is the same as the minimum (initial) size.
@@ -1327,7 +1335,8 @@ TEST(GrowAndShrinkNewSpace) {
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
InitializeVM();
- if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
+ if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize() ||
+ HEAP->MaxSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
// The max size cannot exceed the reserved size, since semispaces must be
// always within the reserved space. We can't test new space growing and
// shrinking if the reserved size is the same as the minimum (initial) size.
@@ -1360,7 +1369,7 @@ static int NumberOfGlobalObjects() {
// Test that we don't embed maps from foreign contexts into
// optimized code.
-TEST(LeakGlobalContextViaMap) {
+TEST(LeakNativeContextViaMap) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope outer_scope;
v8::Persistent<v8::Context> ctx1 = v8::Context::New();
@@ -1397,7 +1406,7 @@ TEST(LeakGlobalContextViaMap) {
// Test that we don't embed functions from foreign contexts into
// optimized code.
-TEST(LeakGlobalContextViaFunction) {
+TEST(LeakNativeContextViaFunction) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope outer_scope;
v8::Persistent<v8::Context> ctx1 = v8::Context::New();
@@ -1432,7 +1441,7 @@ TEST(LeakGlobalContextViaFunction) {
}
-TEST(LeakGlobalContextViaMapKeyed) {
+TEST(LeakNativeContextViaMapKeyed) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope outer_scope;
v8::Persistent<v8::Context> ctx1 = v8::Context::New();
@@ -1467,7 +1476,7 @@ TEST(LeakGlobalContextViaMapKeyed) {
}
-TEST(LeakGlobalContextViaMapProto) {
+TEST(LeakNativeContextViaMapProto) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope outer_scope;
v8::Persistent<v8::Context> ctx1 = v8::Context::New();
@@ -1586,7 +1595,7 @@ TEST(PrototypeTransitionClearing) {
CHECK_EQ(transitions, baseObject->map()->NumberOfProtoTransitions());
// Verify that prototype transitions array was compacted.
- FixedArray* trans = baseObject->map()->prototype_transitions();
+ FixedArray* trans = baseObject->map()->GetPrototypeTransitions();
for (int i = 0; i < transitions; i++) {
int j = Map::kProtoTransitionHeaderSize +
i * Map::kProtoTransitionElementsPerEntry;
@@ -1607,7 +1616,8 @@ TEST(PrototypeTransitionClearing) {
// clearing correctly records slots in prototype transition array.
i::FLAG_always_compact = true;
Handle<Map> map(baseObject->map());
- CHECK(!space->LastPage()->Contains(map->prototype_transitions()->address()));
+ CHECK(!space->LastPage()->Contains(
+ map->GetPrototypeTransitions()->address()));
CHECK(space->LastPage()->Contains(prototype->address()));
baseObject->SetPrototype(*prototype, false)->ToObjectChecked();
CHECK(map->GetPrototypeTransition(*prototype)->IsMap());
@@ -1742,14 +1752,20 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
static int CountMapTransitions(Map* map) {
- int result = 0;
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsTransitionOnly(i)) {
- result++;
- }
+ return map->transitions()->number_of_transitions();
+}
+
+
+// Go through all incremental marking steps in one swoop.
+static void SimulateIncrementalMarking() {
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ CHECK(marking->IsStopped());
+ marking->Start();
+ CHECK(marking->IsMarking());
+ while (!marking->IsComplete()) {
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
}
- return result;
+ CHECK(marking->IsComplete());
}
@@ -1760,14 +1776,18 @@ TEST(Regress1465) {
i::FLAG_trace_incremental_marking = true;
InitializeVM();
v8::HandleScope scope;
+ static const int transitions_count = 256;
- #define TRANSITION_COUNT 256
- for (int i = 0; i < TRANSITION_COUNT; i++) {
- EmbeddedVector<char, 64> buffer;
- OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
- CompileRun(buffer.start());
+ {
+ AlwaysAllocateScope always_allocate;
+ for (int i = 0; i < transitions_count; i++) {
+ EmbeddedVector<char, 64> buffer;
+ OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
+ CompileRun(buffer.start());
+ }
+ CompileRun("var root = new Object;");
}
- CompileRun("var root = new Object;");
+
Handle<JSObject> root =
v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(
@@ -1776,19 +1796,10 @@ TEST(Regress1465) {
// Count number of live transitions before marking.
int transitions_before = CountMapTransitions(root->map());
CompileRun("%DebugPrint(root);");
- CHECK_EQ(TRANSITION_COUNT, transitions_before);
+ CHECK_EQ(transitions_count, transitions_before);
- // Go through all incremental marking steps in one swoop.
- IncrementalMarking* marking = HEAP->incremental_marking();
- CHECK(marking->IsStopped());
- marking->Start();
- CHECK(marking->IsMarking());
- while (!marking->IsComplete()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- }
- CHECK(marking->IsComplete());
+ SimulateIncrementalMarking();
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(marking->IsStopped());
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -1810,15 +1821,7 @@ TEST(Regress2143a) {
"root.foo = 0;"
"root = new Object;");
- // Go through all incremental marking steps in one swoop.
- IncrementalMarking* marking = HEAP->incremental_marking();
- CHECK(marking->IsStopped());
- marking->Start();
- CHECK(marking->IsMarking());
- while (!marking->IsComplete()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- }
- CHECK(marking->IsComplete());
+ SimulateIncrementalMarking();
// Compile a StoreIC that performs the prepared map transition. This
// will restart incremental marking and should make sure the root is
@@ -1834,7 +1837,6 @@ TEST(Regress2143a) {
// Explicitly request GC to perform final marking step and sweeping.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(marking->IsStopped());
Handle<JSObject> root =
v8::Utils::OpenHandle(
@@ -1860,15 +1862,7 @@ TEST(Regress2143b) {
"root.foo = 0;"
"root = new Object;");
- // Go through all incremental marking steps in one swoop.
- IncrementalMarking* marking = HEAP->incremental_marking();
- CHECK(marking->IsStopped());
- marking->Start();
- CHECK(marking->IsMarking());
- while (!marking->IsComplete()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- }
- CHECK(marking->IsComplete());
+ SimulateIncrementalMarking();
// Compile an optimized LStoreNamedField that performs the prepared
// map transition. This will restart incremental marking and should
@@ -1887,7 +1881,6 @@ TEST(Regress2143b) {
// Explicitly request GC to perform final marking step and sweeping.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(marking->IsStopped());
Handle<JSObject> root =
v8::Utils::OpenHandle(
@@ -1906,6 +1899,9 @@ void SimulateFullSpace(PagedSpace* space);
TEST(ReleaseOverReservedPages) {
i::FLAG_trace_gc = true;
+ // The optimizer can allocate stuff, messing up the test.
+ i::FLAG_crankshaft = false;
+ i::FLAG_always_opt = false;
InitializeVM();
v8::HandleScope scope;
static const int number_of_test_pages = 20;
@@ -1937,3 +1933,308 @@ TEST(ReleaseOverReservedPages) {
HEAP->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages());
}
+
+
+TEST(Regress2237) {
+ InitializeVM();
+ v8::HandleScope scope;
+ Handle<String> slice(HEAP->empty_string());
+
+ {
+ // Generate a parent that lives in new-space.
+ v8::HandleScope inner_scope;
+ const char* c = "This text is long enough to trigger sliced strings.";
+ Handle<String> s = FACTORY->NewStringFromAscii(CStrVector(c));
+ CHECK(s->IsSeqAsciiString());
+ CHECK(HEAP->InNewSpace(*s));
+
+ // Generate a sliced string that is based on the above parent and
+ // lives in old-space.
+ FillUpNewSpace(HEAP->new_space());
+ AlwaysAllocateScope always_allocate;
+ Handle<String> t;
+ // TODO(mstarzinger): Unfortunately FillUpNewSpace() still leaves
+ // some slack, so we need to allocate a few sliced strings.
+ for (int i = 0; i < 16; i++) {
+ t = FACTORY->NewProperSubString(s, 5, 35);
+ }
+ CHECK(t->IsSlicedString());
+ CHECK(!HEAP->InNewSpace(*t));
+ *slice.location() = *t.location();
+ }
+
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqAsciiString());
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqAsciiString());
+}
+
+
+#ifdef OBJECT_PRINT
+TEST(PrintSharedFunctionInfo) {
+ InitializeVM();
+ v8::HandleScope scope;
+ const char* source = "f = function() { return 987654321; }\n"
+ "g = function() { return 123456789; }\n";
+ CompileRun(source);
+ Handle<JSFunction> g =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
+
+ AssertNoAllocation no_alloc;
+ g->shared()->PrintLn();
+}
+#endif // OBJECT_PRINT
+
+
+TEST(Regress2211) {
+ InitializeVM();
+ v8::HandleScope scope;
+
+ v8::Handle<v8::String> value = v8_str("val string");
+ Smi* hash = Smi::FromInt(321);
+ Heap* heap = Isolate::Current()->heap();
+
+ for (int i = 0; i < 2; i++) {
+ // Store identity hash first and common hidden property second.
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ Handle<JSObject> internal_obj = v8::Utils::OpenHandle(*obj);
+ CHECK(internal_obj->HasFastProperties());
+
+ // In the first iteration, set hidden value first and identity hash second.
+ // In the second iteration, reverse the order.
+ if (i == 0) obj->SetHiddenValue(v8_str("key string"), value);
+ MaybeObject* maybe_obj = internal_obj->SetIdentityHash(hash,
+ ALLOW_CREATION);
+ CHECK(!maybe_obj->IsFailure());
+ if (i == 1) obj->SetHiddenValue(v8_str("key string"), value);
+
+ // Check values.
+ CHECK_EQ(hash,
+ internal_obj->GetHiddenProperty(heap->identity_hash_symbol()));
+ CHECK(value->Equals(obj->GetHiddenValue(v8_str("key string"))));
+
+ // Check size.
+ DescriptorArray* descriptors = internal_obj->map()->instance_descriptors();
+ ObjectHashTable* hashtable = ObjectHashTable::cast(
+ internal_obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
+ // HashTable header (5) and 4 initial entries (8).
+ CHECK_LE(hashtable->SizeFor(hashtable->length()), 13 * kPointerSize);
+ }
+}
+
+
+TEST(IncrementalMarkingClearsTypeFeedbackCells) {
+ if (i::FLAG_always_opt) return;
+ InitializeVM();
+ v8::HandleScope scope;
+ v8::Local<v8::Value> fun1, fun2;
+
+ {
+ LocalContext env;
+ CompileRun("function fun() {};");
+ fun1 = env->Global()->Get(v8_str("fun"));
+ }
+
+ {
+ LocalContext env;
+ CompileRun("function fun() {};");
+ fun2 = env->Global()->Get(v8_str("fun"));
+ }
+
+ // Prepare function f that contains type feedback for closures
+ // originating from two different native contexts.
+ v8::Context::GetCurrent()->Global()->Set(v8_str("fun1"), fun1);
+ v8::Context::GetCurrent()->Global()->Set(v8_str("fun2"), fun2);
+ CompileRun("function f(a, b) { a(); b(); } f(fun1, fun2);");
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ Handle<TypeFeedbackCells> cells(TypeFeedbackInfo::cast(
+ f->shared()->code()->type_feedback_info())->type_feedback_cells());
+
+ CHECK_EQ(2, cells->CellCount());
+ CHECK(cells->Cell(0)->value()->IsJSFunction());
+ CHECK(cells->Cell(1)->value()->IsJSFunction());
+
+ SimulateIncrementalMarking();
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+ CHECK_EQ(2, cells->CellCount());
+ CHECK(cells->Cell(0)->value()->IsTheHole());
+ CHECK(cells->Cell(1)->value()->IsTheHole());
+}
+
+
+static Code* FindFirstIC(Code* code, Code::Kind kind) {
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Code* target = Code::GetCodeFromTargetAddress(info->target_address());
+ if (target->is_inline_cache_stub() && target->kind() == kind) {
+ return target;
+ }
+ }
+ return NULL;
+}
+
+
+TEST(IncrementalMarkingPreservesMonomorhpicIC) {
+ if (i::FLAG_always_opt) return;
+ InitializeVM();
+ v8::HandleScope scope;
+
+ // Prepare function f that contains a monomorphic IC for object
+ // originating from the same native context.
+ CompileRun("function fun() { this.x = 1; }; var obj = new fun();"
+ "function f(o) { return o.x; } f(obj); f(obj);");
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+
+ Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+ CHECK(ic_before->ic_state() == MONOMORPHIC);
+
+ // Fire context dispose notification.
+ v8::V8::ContextDisposedNotification();
+ SimulateIncrementalMarking();
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+ Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+ CHECK(ic_after->ic_state() == MONOMORPHIC);
+}
+
+
+TEST(IncrementalMarkingClearsMonomorhpicIC) {
+ if (i::FLAG_always_opt) return;
+ InitializeVM();
+ v8::HandleScope scope;
+ v8::Local<v8::Value> obj1;
+
+ {
+ LocalContext env;
+ CompileRun("function fun() { this.x = 1; }; var obj = new fun();");
+ obj1 = env->Global()->Get(v8_str("obj"));
+ }
+
+ // Prepare function f that contains a monomorphic IC for object
+ // originating from a different native context.
+ v8::Context::GetCurrent()->Global()->Set(v8_str("obj1"), obj1);
+ CompileRun("function f(o) { return o.x; } f(obj1); f(obj1);");
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+
+ Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+ CHECK(ic_before->ic_state() == MONOMORPHIC);
+
+ // Fire context dispose notification.
+ v8::V8::ContextDisposedNotification();
+ SimulateIncrementalMarking();
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+ Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+ CHECK(ic_after->ic_state() == UNINITIALIZED);
+}
+
+
+TEST(IncrementalMarkingClearsPolymorhpicIC) {
+ if (i::FLAG_always_opt) return;
+ InitializeVM();
+ v8::HandleScope scope;
+ v8::Local<v8::Value> obj1, obj2;
+
+ {
+ LocalContext env;
+ CompileRun("function fun() { this.x = 1; }; var obj = new fun();");
+ obj1 = env->Global()->Get(v8_str("obj"));
+ }
+
+ {
+ LocalContext env;
+ CompileRun("function fun() { this.x = 2; }; var obj = new fun();");
+ obj2 = env->Global()->Get(v8_str("obj"));
+ }
+
+ // Prepare function f that contains a polymorphic IC for objects
+ // originating from two different native contexts.
+ v8::Context::GetCurrent()->Global()->Set(v8_str("obj1"), obj1);
+ v8::Context::GetCurrent()->Global()->Set(v8_str("obj2"), obj2);
+ CompileRun("function f(o) { return o.x; } f(obj1); f(obj1); f(obj2);");
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+
+ Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+ CHECK(ic_before->ic_state() == MEGAMORPHIC);
+
+ // Fire context dispose notification.
+ v8::V8::ContextDisposedNotification();
+ SimulateIncrementalMarking();
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+ Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+ CHECK(ic_after->ic_state() == UNINITIALIZED);
+}
+
+
+class SourceResource: public v8::String::ExternalAsciiStringResource {
+ public:
+ explicit SourceResource(const char* data)
+ : data_(data), length_(strlen(data)) { }
+
+ virtual void Dispose() {
+ i::DeleteArray(data_);
+ data_ = NULL;
+ }
+
+ const char* data() const { return data_; }
+
+ size_t length() const { return length_; }
+
+ bool IsDisposed() { return data_ == NULL; }
+
+ private:
+ const char* data_;
+ size_t length_;
+};
+
+
+TEST(ReleaseStackTraceData) {
+ // Test that the data retained by the Error.stack accessor is released
+ // after the first time the accessor is fired. We use external string
+ // to check whether the data is being released since the external string
+ // resource's callback is fired when the external string is GC'ed.
+ InitializeVM();
+ v8::HandleScope scope;
+ static const char* source = "var error = 1; "
+ "try { "
+ " throw new Error(); "
+ "} catch (e) { "
+ " error = e; "
+ "} ";
+ SourceResource* resource = new SourceResource(i::StrDup(source));
+ {
+ v8::HandleScope scope;
+ v8::Handle<v8::String> source_string = v8::String::NewExternal(resource);
+ v8::Script::Compile(source_string)->Run();
+ CHECK(!resource->IsDisposed());
+ }
+ HEAP->CollectAllAvailableGarbage();
+ // External source is being retained by the stack trace.
+ CHECK(!resource->IsDisposed());
+
+ CompileRun("error.stack; error.stack;");
+ HEAP->CollectAllAvailableGarbage();
+ // External source has been released.
+ CHECK(resource->IsDisposed());
+
+ delete resource;
+}
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 013de026f2..2c89a387dc 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -81,8 +81,8 @@ class ListDiffOutputWriter : public Comparator::Output {
(*next_chunk_pointer_) = NULL;
}
void AddChunk(int pos1, int pos2, int len1, int len2) {
- current_chunk_ =
- new(Isolate::Current()->zone()) DiffChunkStruct(pos1, pos2, len1, len2);
+ current_chunk_ = new(Isolate::Current()->runtime_zone()) DiffChunkStruct(
+ pos1, pos2, len1, len2);
(*next_chunk_pointer_) = current_chunk_;
next_chunk_pointer_ = &current_chunk_->next;
}
@@ -96,7 +96,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
StringCompareInput input(s1, s2);
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk);
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 27123704b1..18c63b2fd0 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -194,7 +194,7 @@ TEST(MarkCompactCollector) {
Map::cast(HEAP->AllocateMap(JS_OBJECT_TYPE,
JSObject::kHeaderSize)->ToObjectChecked());
function->set_initial_map(initial_map);
- Isolate::Current()->context()->global()->SetProperty(
+ Isolate::Current()->context()->global_object()->SetProperty(
func_name, function, NONE, kNonStrictMode)->ToObjectChecked();
JSObject* obj = JSObject::cast(
@@ -203,8 +203,9 @@ TEST(MarkCompactCollector) {
func_name =
String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
- CHECK(Isolate::Current()->context()->global()->HasLocalProperty(func_name));
- Object* func_value = Isolate::Current()->context()->global()->
+ CHECK(Isolate::Current()->context()->global_object()->
+ HasLocalProperty(func_name));
+ Object* func_value = Isolate::Current()->context()->global_object()->
GetProperty(func_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
function = JSFunction::cast(func_value);
@@ -212,7 +213,7 @@ TEST(MarkCompactCollector) {
obj = JSObject::cast(HEAP->AllocateJSObject(function)->ToObjectChecked());
String* obj_name =
String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
- Isolate::Current()->context()->global()->SetProperty(
+ Isolate::Current()->context()->global_object()->SetProperty(
obj_name, obj, NONE, kNonStrictMode)->ToObjectChecked();
String* prop_name =
String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
@@ -225,10 +226,11 @@ TEST(MarkCompactCollector) {
obj_name =
String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
- CHECK(Isolate::Current()->context()->global()->HasLocalProperty(obj_name));
- CHECK(Isolate::Current()->context()->global()->
+ CHECK(Isolate::Current()->context()->global_object()->
+ HasLocalProperty(obj_name));
+ CHECK(Isolate::Current()->context()->global_object()->
GetProperty(obj_name)->ToObjectChecked()->IsJSObject());
- obj = JSObject::cast(Isolate::Current()->context()->global()->
+ obj = JSObject::cast(Isolate::Current()->context()->global_object()->
GetProperty(obj_name)->ToObjectChecked());
prop_name =
String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
@@ -526,7 +528,10 @@ static intptr_t MemoryInUse() {
TEST(BootUpMemoryUse) {
intptr_t initial_memory = MemoryInUse();
- FLAG_crankshaft = false; // Avoid flakiness.
+ // Avoid flakiness.
+ FLAG_crankshaft = false;
+ FLAG_parallel_recompilation = false;
+
// Only Linux has the proc filesystem and only if it is mapped. If it's not
// there we just skip the test.
if (initial_memory >= 0) {
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index b9123f01f0..717c66519f 100755
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -354,7 +354,8 @@ TEST(Regress928) {
v8::HandleScope handles;
i::Handle<i::String> source(
FACTORY->NewStringFromAscii(i::CStrVector(program)));
- i::ScriptDataImpl* data = i::ParserApi::PartialPreParse(source, NULL, false);
+ i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
+ i::ScriptDataImpl* data = i::ParserApi::PreParse(&stream, NULL, false);
CHECK(!data->HasError());
data->Initialize();
@@ -1016,12 +1017,11 @@ TEST(ScopePositions) {
FACTORY->NewStringFromUtf8(i::CStrVector(program.start())));
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = FACTORY->NewScript(source);
- i::Parser parser(script, i::kAllowLazy | i::EXTENDED_MODE, NULL, NULL,
- i::Isolate::Current()->zone());
- i::CompilationInfo info(script);
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info, i::kAllowLazy | i::EXTENDED_MODE, NULL, NULL);
info.MarkAsGlobal();
info.SetLanguageMode(source_data[i].language_mode);
- i::FunctionLiteral* function = parser.ParseProgram(&info);
+ i::FunctionLiteral* function = parser.ParseProgram();
CHECK(function != NULL);
// Check scope types and positions.
@@ -1061,10 +1061,10 @@ void TestParserSync(i::Handle<i::String> source, int flags) {
i::Handle<i::Script> script = FACTORY->NewScript(source);
bool save_harmony_scoping = i::FLAG_harmony_scoping;
i::FLAG_harmony_scoping = harmony_scoping;
- i::Parser parser(script, flags, NULL, NULL, i::Isolate::Current()->zone());
- i::CompilationInfo info(script);
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info, flags, NULL, NULL);
info.MarkAsGlobal();
- i::FunctionLiteral* function = parser.ParseProgram(&info);
+ i::FunctionLiteral* function = parser.ParseProgram();
i::FLAG_harmony_scoping = save_harmony_scoping;
i::String* type_string = NULL;
@@ -1148,6 +1148,7 @@ TEST(ParserSync) {
{ "with ({})", "" },
{ "switch (12) { case 12: ", "}" },
{ "switch (12) { default: ", "}" },
+ { "switch (12) { ", "case 12: }" },
{ "label2: ", "" },
{ NULL, NULL }
};
@@ -1237,3 +1238,26 @@ TEST(ParserSync) {
}
}
}
+
+
+TEST(PreparserStrictOctal) {
+ // Test that syntax error caused by octal literal is reported correctly as
+ // such (issue 2220).
+ v8::internal::FLAG_min_preparse_length = 1; // Force preparsing.
+ v8::V8::Initialize();
+ v8::HandleScope scope;
+ v8::Context::Scope context_scope(v8::Context::New());
+ v8::TryCatch try_catch;
+ const char* script =
+ "\"use strict\"; \n"
+ "a = function() { \n"
+ " b = function() { \n"
+ " 01; \n"
+ " }; \n"
+ "}; \n";
+ v8::Script::Compile(v8::String::New(script));
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value exception(try_catch.Exception());
+ CHECK_EQ("SyntaxError: Octal literals are not allowed in strict mode.",
+ *exception);
+}
diff --git a/deps/v8/test/cctest/test-platform-linux.cc b/deps/v8/test/cctest/test-platform-linux.cc
index 2a8d497850..47b99f084b 100644
--- a/deps/v8/test/cctest/test-platform-linux.cc
+++ b/deps/v8/test/cctest/test-platform-linux.cc
@@ -79,3 +79,9 @@ TEST(VirtualMemory) {
CHECK(vm->Uncommit(block_addr, block_size));
delete vm;
}
+
+
+TEST(GetCurrentProcessId) {
+ OS::SetUp();
+ CHECK_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
+}
diff --git a/deps/v8/test/cctest/test-platform-win32.cc b/deps/v8/test/cctest/test-platform-win32.cc
index 36b30aaceb..668ccdb0ea 100644
--- a/deps/v8/test/cctest/test-platform-win32.cc
+++ b/deps/v8/test/cctest/test-platform-win32.cc
@@ -25,3 +25,10 @@ TEST(VirtualMemory) {
CHECK(vm->Uncommit(block_addr, block_size));
delete vm;
}
+
+
+TEST(GetCurrentProcessId) {
+ OS::SetUp();
+ CHECK_EQ(static_cast<int>(::GetCurrentProcessId()),
+ OS::GetCurrentProcessId());
+}
diff --git a/deps/v8/test/cctest/test-random.cc b/deps/v8/test/cctest/test-random.cc
index a1f49318f5..86d6d8c144 100644
--- a/deps/v8/test/cctest/test-random.cc
+++ b/deps/v8/test/cctest/test-random.cc
@@ -52,7 +52,7 @@ void TestSeeds(Handle<JSFunction> fun,
uint32_t state0,
uint32_t state1) {
bool has_pending_exception;
- Handle<JSObject> global(context->global());
+ Handle<JSObject> global(context->global_object());
Handle<ByteArray> seeds(context->random_seed());
SetSeeds(seeds, state0, state1);
@@ -77,7 +77,7 @@ TEST(CrankshaftRandom) {
env->Enter();
Handle<Context> context(Isolate::Current()->context());
- Handle<JSObject> global(context->global());
+ Handle<JSObject> global(context->global_object());
Handle<ByteArray> seeds(context->random_seed());
bool has_pending_exception;
@@ -85,7 +85,7 @@ TEST(CrankshaftRandom) {
Object* symbol = FACTORY->LookupAsciiSymbol("f")->ToObjectChecked();
MaybeObject* fun_object =
- context->global()->GetProperty(String::cast(symbol));
+ context->global_object()->GetProperty(String::cast(symbol));
Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
// Optimize function.
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 325c686063..e433b925e8 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -72,24 +72,26 @@ using namespace v8::internal;
static bool CheckParse(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
- return v8::internal::RegExpParser::ParseRegExp(&reader, false, &result);
+ return v8::internal::RegExpParser::ParseRegExp(
+ &reader, false, &result, Isolate::Current()->runtime_zone());
}
static SmartArrayPointer<const char> Parse(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
+ CHECK(v8::internal::RegExpParser::ParseRegExp(
+ &reader, false, &result, Isolate::Current()->runtime_zone()));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
SmartArrayPointer<const char> output =
- result.tree->ToString(Isolate::Current()->zone());
+ result.tree->ToString(Isolate::Current()->runtime_zone());
return output;
}
@@ -97,10 +99,11 @@ static bool CheckSimple(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
+ CHECK(v8::internal::RegExpParser::ParseRegExp(
+ &reader, false, &result, Isolate::Current()->runtime_zone()));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
return result.simple;
@@ -115,10 +118,11 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
+ CHECK(v8::internal::RegExpParser::ParseRegExp(
+ &reader, false, &result, Isolate::Current()->runtime_zone()));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
int min_match = result.tree->min_match();
@@ -263,6 +267,7 @@ TEST(Parser) {
CHECK_PARSE_EQ("\\u003z", "'u003z'");
CHECK_PARSE_EQ("foo[z]*", "(: 'foo' (# 0 - g [z]))");
+ CHECK_SIMPLE("", false);
CHECK_SIMPLE("a", true);
CHECK_SIMPLE("a|b", false);
CHECK_SIMPLE("a\\n", false);
@@ -386,10 +391,11 @@ static void ExpectError(const char* input,
const char* expected) {
V8::Initialize(NULL);
v8::HandleScope scope;
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
- CHECK(!v8::internal::RegExpParser::ParseRegExp(&reader, false, &result));
+ CHECK(!v8::internal::RegExpParser::ParseRegExp(
+ &reader, false, &result, Isolate::Current()->runtime_zone()));
CHECK(result.tree == NULL);
CHECK(!result.error.is_null());
SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
@@ -469,8 +475,8 @@ static bool NotWord(uc16 c) {
static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
- ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Zone* zone = Isolate::Current()->runtime_zone();
ZoneList<CharacterRange>* ranges =
new(zone) ZoneList<CharacterRange>(2, zone);
CharacterRange::AddClassEscape(c, ranges, zone);
@@ -503,7 +509,8 @@ static RegExpNode* Compile(const char* input, bool multiline, bool is_ascii) {
FlatStringReader reader(isolate, CStrVector(input));
RegExpCompileData compile_data;
if (!v8::internal::RegExpParser::ParseRegExp(&reader, multiline,
- &compile_data))
+ &compile_data,
+ isolate->runtime_zone()))
return NULL;
Handle<String> pattern = isolate->factory()->
NewStringFromUtf8(CStrVector(input));
@@ -516,7 +523,7 @@ static RegExpNode* Compile(const char* input, bool multiline, bool is_ascii) {
pattern,
sample_subject,
is_ascii,
- isolate->zone());
+ isolate->runtime_zone());
return compile_data.node;
}
@@ -526,7 +533,7 @@ static void Execute(const char* input,
bool is_ascii,
bool dot_output = false) {
v8::HandleScope scope;
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
RegExpNode* node = Compile(input, multiline, is_ascii);
USE(node);
#ifdef DEBUG
@@ -566,8 +573,8 @@ static unsigned PseudoRandom(int i, int j) {
TEST(SplayTreeSimple) {
v8::internal::V8::Initialize(NULL);
static const unsigned kLimit = 1000;
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- ZoneSplayTree<TestConfig> tree(Isolate::Current()->zone());
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ ZoneSplayTree<TestConfig> tree(Isolate::Current()->runtime_zone());
bool seen[kLimit];
for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
#define CHECK_MAPS_EQUAL() do { \
@@ -634,13 +641,13 @@ TEST(DispatchTableConstruction) {
}
}
// Enter test data into dispatch table.
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- DispatchTable table(Isolate::Current()->zone());
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ DispatchTable table(Isolate::Current()->runtime_zone());
for (int i = 0; i < kRangeCount; i++) {
uc16* range = ranges[i];
for (int j = 0; j < 2 * kRangeSize; j += 2)
table.AddRange(CharacterRange(range[j], range[j + 1]), i,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
}
// Check that the table looks as we would expect
for (int p = 0; p < kLimit; p++) {
@@ -702,7 +709,8 @@ typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
class ContextInitializer {
public:
ContextInitializer()
- : env_(), scope_(), zone_(Isolate::Current(), DELETE_ON_EXIT) {
+ : env_(), scope_(), zone_(Isolate::Current()->runtime_zone(),
+ DELETE_ON_EXIT) {
env_ = v8::Context::New();
env_->Enter();
}
@@ -741,7 +749,7 @@ TEST(MacroAssemblerNativeSuccess) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
m.Succeed();
@@ -777,7 +785,7 @@ TEST(MacroAssemblerNativeSimple) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -835,7 +843,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -898,7 +906,7 @@ TEST(MacroAssemblerNativeBacktrack) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
Label fail;
Label backtrack;
@@ -937,7 +945,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -985,7 +993,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -1036,7 +1044,7 @@ TEST(MacroAssemblernativeAtStart) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
Label not_at_start, newline, fail;
m.CheckNotAtStart(&not_at_start);
@@ -1094,7 +1102,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
Label fail, succ;
@@ -1152,7 +1160,7 @@ TEST(MacroAssemblerNativeRegisters) {
Factory* factory = Isolate::Current()->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 6,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -1255,7 +1263,7 @@ TEST(MacroAssemblerStackOverflow) {
Factory* factory = isolate->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
Label loop;
m.Bind(&loop);
@@ -1294,7 +1302,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
Factory* factory = isolate->factory();
ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 2,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
// At least 2048, to ensure the allocated space for registers
// span one full page.
@@ -1341,7 +1349,8 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
TEST(MacroAssembler) {
V8::Initialize(NULL);
byte codes[1024];
- RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024));
+ RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024),
+ Isolate::Current()->runtime_zone());
// ^f(o)o.
Label fail, fail2, start;
uc16 foo_chars[3];
@@ -1411,8 +1420,8 @@ TEST(AddInverseToTable) {
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Zone* zone = Isolate::Current()->runtime_zone();
ZoneList<CharacterRange>* ranges =
new(zone)
ZoneList<CharacterRange>(kRangeCount, zone);
@@ -1423,7 +1432,8 @@ TEST(AddInverseToTable) {
ranges->Add(CharacterRange(from, to), zone);
}
DispatchTable table(zone);
- DispatchTableConstructor cons(&table, false, Isolate::Current()->zone());
+ DispatchTableConstructor cons(&table, false,
+ Isolate::Current()->runtime_zone());
cons.set_choice_index(0);
cons.AddInverse(ranges);
for (int i = 0; i < kLimit; i++) {
@@ -1434,13 +1444,14 @@ TEST(AddInverseToTable) {
CHECK_EQ(is_on, set->Get(0) == false);
}
}
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Zone* zone = Isolate::Current()->runtime_zone();
ZoneList<CharacterRange>* ranges =
new(zone) ZoneList<CharacterRange>(1, zone);
ranges->Add(CharacterRange(0xFFF0, 0xFFFE), zone);
DispatchTable table(zone);
- DispatchTableConstructor cons(&table, false, Isolate::Current()->zone());
+ DispatchTableConstructor cons(&table, false,
+ Isolate::Current()->runtime_zone());
cons.set_choice_index(0);
cons.AddInverse(ranges);
CHECK(!table.Get(0xFFFE)->Get(0));
@@ -1548,8 +1559,8 @@ TEST(UncanonicalizeEquivalence) {
static void TestRangeCaseIndependence(CharacterRange input,
Vector<CharacterRange> expected) {
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Zone* zone = Isolate::Current()->runtime_zone();
int count = expected.length();
ZoneList<CharacterRange>* list =
new(zone) ZoneList<CharacterRange>(count, zone);
@@ -1614,8 +1625,8 @@ static bool InClass(uc16 c, ZoneList<CharacterRange>* ranges) {
TEST(CharClassDifference) {
v8::internal::V8::Initialize(NULL);
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Zone* zone = Isolate::Current()->runtime_zone();
ZoneList<CharacterRange>* base =
new(zone) ZoneList<CharacterRange>(1, zone);
base->Add(CharacterRange::Everything(), zone);
@@ -1623,7 +1634,7 @@ TEST(CharClassDifference) {
ZoneList<CharacterRange>* included = NULL;
ZoneList<CharacterRange>* excluded = NULL;
CharacterRange::Split(base, overlay, &included, &excluded,
- Isolate::Current()->zone());
+ Isolate::Current()->runtime_zone());
for (int i = 0; i < (1 << 16); i++) {
bool in_base = InClass(i, base);
if (in_base) {
@@ -1644,8 +1655,8 @@ TEST(CharClassDifference) {
TEST(CanonicalizeCharacterSets) {
v8::internal::V8::Initialize(NULL);
- ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Zone* zone = Isolate::Current()->runtime_zone();
ZoneList<CharacterRange>* list =
new(zone) ZoneList<CharacterRange>(4, zone);
CharacterSet set(list);
@@ -1707,10 +1718,10 @@ TEST(CanonicalizeCharacterSets) {
TEST(CharacterRangeMerge) {
v8::internal::V8::Initialize(NULL);
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
- ZoneList<CharacterRange> l1(4, Isolate::Current()->zone());
- ZoneList<CharacterRange> l2(4, Isolate::Current()->zone());
- Zone* zone = Isolate::Current()->zone();
+ ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ ZoneList<CharacterRange> l1(4, Isolate::Current()->runtime_zone());
+ ZoneList<CharacterRange> l2(4, Isolate::Current()->runtime_zone());
+ Zone* zone = Isolate::Current()->runtime_zone();
// Create all combinations of intersections of ranges, both singletons and
// longer.
@@ -1788,9 +1799,9 @@ TEST(CharacterRangeMerge) {
ASSERT(CharacterRange::IsCanonical(&l1));
ASSERT(CharacterRange::IsCanonical(&l2));
- ZoneList<CharacterRange> first_only(4, Isolate::Current()->zone());
- ZoneList<CharacterRange> second_only(4, Isolate::Current()->zone());
- ZoneList<CharacterRange> both(4, Isolate::Current()->zone());
+ ZoneList<CharacterRange> first_only(4, Isolate::Current()->runtime_zone());
+ ZoneList<CharacterRange> second_only(4, Isolate::Current()->runtime_zone());
+ ZoneList<CharacterRange> both(4, Isolate::Current()->runtime_zone());
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index e426e7bd21..c4654868aa 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -250,18 +250,22 @@ static void Serialize() {
// Test that the whole heap can be serialized.
TEST(Serialize) {
- Serializer::Enable();
- v8::V8::Initialize();
- Serialize();
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+ Serialize();
+ }
}
// Test that heap serialization is non-destructive.
TEST(SerializeTwice) {
- Serializer::Enable();
- v8::V8::Initialize();
- Serialize();
- Serialize();
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+ Serialize();
+ Serialize();
+ }
}
@@ -278,8 +282,8 @@ static void SanityCheck() {
#ifdef DEBUG
HEAP->Verify();
#endif
- CHECK(Isolate::Current()->global()->IsJSObject());
- CHECK(Isolate::Current()->global_context()->IsContext());
+ CHECK(Isolate::Current()->global_object()->IsJSObject());
+ CHECK(Isolate::Current()->native_context()->IsContext());
CHECK(HEAP->symbol_table()->IsSymbolTable());
CHECK(!FACTORY->LookupAsciiSymbol("Empty")->IsFailure());
}
@@ -289,7 +293,7 @@ DEPENDENT_TEST(Deserialize, Serialize) {
// The serialize-deserialize tests only work if the VM is built without
// serialization. That doesn't matter. We don't need to be able to
// serialize a snapshot in a VM that is booted from a snapshot.
- if (!Snapshot::IsEnabled()) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
v8::HandleScope scope;
Deserialize();
@@ -302,7 +306,7 @@ DEPENDENT_TEST(Deserialize, Serialize) {
DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
- if (!Snapshot::IsEnabled()) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
v8::HandleScope scope;
Deserialize();
@@ -315,7 +319,7 @@ DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
- if (!Snapshot::IsEnabled()) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
v8::HandleScope scope;
Deserialize();
@@ -332,7 +336,7 @@ DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
SerializeTwice) {
- if (!Snapshot::IsEnabled()) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
v8::HandleScope scope;
Deserialize();
@@ -348,52 +352,55 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
TEST(PartialSerialization) {
- Serializer::Enable();
- v8::V8::Initialize();
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
+ Serializer::Enable();
+ v8::V8::Initialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
- ASSERT(!env.IsEmpty());
- env->Enter();
- // Make sure all builtin scripts are cached.
- { HandleScope scope;
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ ASSERT(!env.IsEmpty());
+ env->Enter();
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope;
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ }
}
- }
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- Object* raw_foo;
- {
- v8::HandleScope handle_scope;
- v8::Local<v8::String> foo = v8::String::New("foo");
- ASSERT(!foo.IsEmpty());
- raw_foo = *(v8::Utils::OpenHandle(*foo));
- }
+ Object* raw_foo;
+ {
+ v8::HandleScope handle_scope;
+ v8::Local<v8::String> foo = v8::String::New("foo");
+ ASSERT(!foo.IsEmpty());
+ raw_foo = *(v8::Utils::OpenHandle(*foo));
+ }
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- env->Exit();
- env.Dispose();
+ env->Exit();
+ env.Dispose();
- FileByteSink startup_sink(startup_name.start());
- startup_name.Dispose();
- StartupSerializer startup_serializer(&startup_sink);
- startup_serializer.SerializeStrongReferences();
-
- FileByteSink partial_sink(FLAG_testing_serialization_file);
- PartialSerializer p_ser(&startup_serializer, &partial_sink);
- p_ser.Serialize(&raw_foo);
- startup_serializer.SerializeWeakReferences();
- partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
- p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- p_ser.CurrentAllocationAddress(CODE_SPACE),
- p_ser.CurrentAllocationAddress(MAP_SPACE),
- p_ser.CurrentAllocationAddress(CELL_SPACE),
- p_ser.CurrentAllocationAddress(LO_SPACE));
+ FileByteSink startup_sink(startup_name.start());
+ startup_name.Dispose();
+ StartupSerializer startup_serializer(&startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(&startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_foo);
+ startup_serializer.SerializeWeakReferences();
+ partial_sink.WriteSpaceUsed(
+ p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
+ }
}
@@ -471,53 +478,56 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
TEST(ContextSerialization) {
- Serializer::Enable();
- v8::V8::Initialize();
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
+ Serializer::Enable();
+ v8::V8::Initialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
- ASSERT(!env.IsEmpty());
- env->Enter();
- // Make sure all builtin scripts are cached.
- { HandleScope scope;
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ ASSERT(!env.IsEmpty());
+ env->Enter();
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope;
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ }
}
- }
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of env.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of env.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- env->Exit();
+ env->Exit();
- Object* raw_context = *(v8::Utils::OpenHandle(*env));
+ Object* raw_context = *(v8::Utils::OpenHandle(*env));
- env.Dispose();
+ env.Dispose();
- FileByteSink startup_sink(startup_name.start());
- startup_name.Dispose();
- StartupSerializer startup_serializer(&startup_sink);
- startup_serializer.SerializeStrongReferences();
-
- FileByteSink partial_sink(FLAG_testing_serialization_file);
- PartialSerializer p_ser(&startup_serializer, &partial_sink);
- p_ser.Serialize(&raw_context);
- startup_serializer.SerializeWeakReferences();
- partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
- p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- p_ser.CurrentAllocationAddress(CODE_SPACE),
- p_ser.CurrentAllocationAddress(MAP_SPACE),
- p_ser.CurrentAllocationAddress(CELL_SPACE),
- p_ser.CurrentAllocationAddress(LO_SPACE));
+ FileByteSink startup_sink(startup_name.start());
+ startup_name.Dispose();
+ StartupSerializer startup_serializer(&startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(&startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_context);
+ startup_serializer.SerializeWeakReferences();
+ partial_sink.WriteSpaceUsed(
+ p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
+ }
}
DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
- if (!Snapshot::IsEnabled()) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
@@ -574,6 +584,7 @@ TEST(LinearAllocation) {
HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
size); // Large object space.
LinearAllocationScope linear_allocation_scope;
+ DisallowAllocationFailure disallow_allocation_failure;
const int kSmallFixedArrayLength = 4;
const int kSmallFixedArraySize =
FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 7cddff3309..5a9ccbb579 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Check that we can traverse very deep stacks of ConsStrings using
// StringInputBuffer. Check that Get(int) works on very deep stacks
@@ -11,6 +11,7 @@
#include "api.h"
#include "factory.h"
+#include "objects.h"
#include "cctest.h"
#include "zone-inl.h"
@@ -82,7 +83,7 @@ static void InitializeBuildingBlocks(
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
// A list of pointers that we don't have any interest in cleaning up.
// If they are reachable from a root then leak detection won't complain.
- Zone* zone = Isolate::Current()->zone();
+ Zone* zone = Isolate::Current()->runtime_zone();
for (int i = 0; i < NUMBER_OF_BUILDING_BLOCKS; i++) {
int len = gen() % 16;
if (len > 14) {
@@ -234,7 +235,7 @@ TEST(Traverse) {
InitializeVM();
v8::HandleScope scope;
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS];
- ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
InitializeBuildingBlocks(building_blocks);
Handle<String> flat = ConstructBalanced(building_blocks);
FlattenString(flat);
@@ -349,11 +350,11 @@ TEST(Utf8Conversion) {
TEST(ExternalShortStringAdd) {
- ZoneScope zonescope(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zonescope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
InitializeVM();
v8::HandleScope handle_scope;
- Zone* zone = Isolate::Current()->zone();
+ Zone* zone = Isolate::Current()->runtime_zone();
// Make sure we cover all always-flat lengths and at least one above.
static const int kMaxLength = 20;
@@ -440,7 +441,7 @@ TEST(CachedHashOverflow) {
// We incorrectly allowed strings to be tagged as array indices even if their
// values didn't fit in the hash field.
// See http://code.google.com/p/v8/issues/detail?id=728
- ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
+ ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
InitializeVM();
v8::HandleScope handle_scope;
@@ -691,3 +692,26 @@ TEST(RegExpOverflow) {
CHECK(result.IsEmpty());
CHECK(context->HasOutOfMemoryException());
}
+
+
+TEST(StringReplaceAtomTwoByteResult) {
+ InitializeVM();
+ HandleScope scope;
+ LocalContext context;
+ v8::Local<v8::Value> result = CompileRun(
+ "var subject = 'ascii~only~string~'; "
+ "var replace = '\x80'; "
+ "subject.replace(/~/g, replace); ");
+ CHECK(result->IsString());
+ Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+ CHECK(string->IsSeqTwoByteString());
+
+ v8::Local<v8::String> expected = v8_str("ascii\x80only\x80string\x80");
+ CHECK(expected->Equals(result));
+}
+
+
+TEST(IsAscii) {
+ CHECK(String::IsAscii(static_cast<char*>(NULL), 0));
+ CHECK(String::IsAscii(static_cast<uc16*>(NULL), 0));
+}
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index df8ff72e4f..c83acb909a 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -55,6 +55,22 @@ TEST(Utils1) {
CHECK_EQ(-2, -8 >> 2);
CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2);
CHECK_EQ(-2, static_cast<int>(static_cast<intptr_t>(-8) >> 2));
+
+ CHECK_EQ(-1000000, FastD2IChecked(-1000000.0));
+ CHECK_EQ(-1, FastD2IChecked(-1.0));
+ CHECK_EQ(0, FastD2IChecked(0.0));
+ CHECK_EQ(1, FastD2IChecked(1.0));
+ CHECK_EQ(1000000, FastD2IChecked(1000000.0));
+
+ CHECK_EQ(-1000000, FastD2IChecked(-1000000.123));
+ CHECK_EQ(-1, FastD2IChecked(-1.234));
+ CHECK_EQ(0, FastD2IChecked(0.345));
+ CHECK_EQ(1, FastD2IChecked(1.234));
+ CHECK_EQ(1000000, FastD2IChecked(1000000.123));
+
+ CHECK_EQ(INT_MAX, FastD2IChecked(1.0e100));
+ CHECK_EQ(INT_MIN, FastD2IChecked(-1.0e100));
+ CHECK_EQ(INT_MIN, FastD2IChecked(OS::nan_value()));
}
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index f1387e8a4f..532edfc26d 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -93,7 +93,8 @@ class CcTestConfiguration(test.TestConfiguration):
if utils.IsWindows():
executable += '.exe'
executable = join(self.context.buildspace, executable)
- output = test.Execute([executable, '--list'], self.context)
+ full_command = self.context.processor([executable, '--list'])
+ output = test.Execute(full_command, self.context)
if output.exit_code != 0:
print output.stdout
print output.stderr
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index af467e699e..9cb58264e0 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -41,9 +41,10 @@ class MessageTestCase(test.TestCase):
self.config = config
def IgnoreLine(self, str):
- """Ignore empty lines and valgrind output."""
+ """Ignore empty lines, valgrind output and Android output."""
if not str: return True
- else: return str.startswith('==') or str.startswith('**')
+ return (str.startswith('==') or str.startswith('**') or
+ str.startswith('ANDROID'))
def IsFailureOutput(self, output):
f = file(self.expected)
@@ -62,7 +63,7 @@ class MessageTestCase(test.TestCase):
pattern = '^%s$' % pattern
patterns.append(pattern)
# Compare actual output with the expected
- raw_lines = output.stdout.split('\n')
+ raw_lines = output.stdout.splitlines()
outlines = [ s for s in raw_lines if not self.IgnoreLine(s) ]
if len(outlines) != len(patterns):
return True
@@ -80,9 +81,9 @@ class MessageTestCase(test.TestCase):
def GetCommand(self):
result = self.config.context.GetVmCommand(self, self.mode)
source = open(self.file).read()
- flags_match = FLAGS_PATTERN.search(source)
- if flags_match:
- result += flags_match.group(1).strip().split()
+ flags_match = re.findall(FLAGS_PATTERN, source)
+ for match in flags_match:
+ result += match.strip().split()
result.append(self.file)
return result
diff --git a/deps/v8/test/mjsunit/accessor-map-sharing.js b/deps/v8/test/mjsunit/accessor-map-sharing.js
index ab45afab05..3afce37d92 100644
--- a/deps/v8/test/mjsunit/accessor-map-sharing.js
+++ b/deps/v8/test/mjsunit/accessor-map-sharing.js
@@ -35,7 +35,7 @@ function getter() { return 111; }
function setter(x) { print(222); }
function anotherGetter() { return 333; }
function anotherSetter(x) { print(444); }
-var obj1, obj2;
+var obj1, obj2, obj3, obj4;
// Two objects with the same getter.
obj1 = {};
@@ -174,3 +174,19 @@ assertEquals(getter, gop(obj1, "papa").get);
assertEquals(setter, gop(obj1, "papa").set);
assertTrue(gop(obj1, "papa").configurable);
assertFalse(gop(obj1, "papa").enumerable);
+
+// Two objects with the same getter on the prototype chain.
+obj1 = {};
+dp(obj1, "quebec", { get: getter });
+obj2 = Object.create(obj1);
+obj3 = Object.create(obj2);
+obj4 = Object.create(obj2);
+assertTrue(%HaveSameMap(obj3, obj4));
+
+// Two objects with the same setter on the prototype chain.
+obj1 = {};
+dp(obj1, "romeo", { set: setter });
+obj2 = Object.create(obj1);
+obj3 = Object.create(obj2);
+obj4 = Object.create(obj2);
+assertTrue(%HaveSameMap(obj3, obj4));
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index 81064aa237..0ab3096326 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -123,7 +123,7 @@ check_test_minus(7,false);
// ALWAYS: 3
// NEVER: 4
-if (false) {
+// Test that we still deopt on failed bound checks
test_base(5,true);
test_base(6,true);
test_base(5,false);
@@ -139,7 +139,21 @@ test_base(6,false);
%OptimizeFunctionOnNextCall(test_base);
test_base(2048,true);
assertTrue(%GetOptimizationStatus(test_base) != 1);
+
+// Specific test on negative offsets
+var short_a = new Array(100);
+for (var i = 0; i < short_a.length; i++) short_a[i] = 0;
+function short_test(a, i) {
+ a[i + 9] = 0;
+ a[i - 10] = 0;
}
+short_test(short_a, 50);
+short_test(short_a, 50);
+%OptimizeFunctionOnNextCall(short_test);
+short_a.length = 10;
+short_test(a, 0);
+assertTrue(%GetOptimizationStatus(short_test) != 1);
+
gc();
diff --git a/deps/v8/test/mjsunit/array-iteration.js b/deps/v8/test/mjsunit/array-iteration.js
index 0ee2e6e9ac..033bb5443a 100644
--- a/deps/v8/test/mjsunit/array-iteration.js
+++ b/deps/v8/test/mjsunit/array-iteration.js
@@ -40,7 +40,7 @@
// Simple use.
var a = [0,1];
assertArrayEquals([0], a.filter(function(n) { return n == 0; }));
- assertArrayEquals(a, a);
+ assertArrayEquals([0,1], a);
// Use specified object as this object when calling the function.
var o = { value: 42 }
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index a96719d448..d4c0c305fc 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --noparallel-recompilation
+
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
// time sticks if built with snapshot. If --smi-only-arrays is deactivated
diff --git a/deps/v8/test/mjsunit/assert-opt-and-deopt.js b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
index 51cb99adc3..c79d92349e 100644
--- a/deps/v8/test/mjsunit/assert-opt-and-deopt.js
+++ b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --noparallel-recompilation
/**
* This class shows how to use %GetOptimizationCount() and
diff --git a/deps/v8/test/mjsunit/compiler/alloc-object-huge.js b/deps/v8/test/mjsunit/compiler/alloc-object-huge.js
index 0b202f7580..b0a981d6c2 100644
--- a/deps/v8/test/mjsunit/compiler/alloc-object-huge.js
+++ b/deps/v8/test/mjsunit/compiler/alloc-object-huge.js
@@ -25,7 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --inline-construct --max-inlined-source-size=999999 --max-inlined-nodes=999999 --max-inlined-nodes-cumulative=999999
+// Flags: --allow-natives-syntax --inline-construct
+// Flags: --max-inlined-source-size=999999 --max-inlined-nodes=999999
+// Flags: --max-inlined-nodes-cumulative=999999
// Test that huge constructors (more than 256 this assignments) are
// handled correctly.
diff --git a/deps/v8/test/mjsunit/compiler/inline-accessors.js b/deps/v8/test/mjsunit/compiler/inline-accessors.js
new file mode 100644
index 0000000000..a4cf7ae8c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inline-accessors.js
@@ -0,0 +1,368 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --inline-accessors --max-opt-count=100
+
+var accessorCallCount, setterArgument, setterValue, obj, forceDeopt;
+
+// -----------------------------------------------------------------------------
+// Helpers for testing inlining of getters.
+
+function TestInlinedGetter(context, obj, expected) {
+ forceDeopt = { deopt: 0 };
+ accessorCallCount = 0;
+
+ assertEquals(expected, context(obj));
+ assertEquals(1, accessorCallCount);
+
+ assertEquals(expected, context(obj));
+ assertEquals(2, accessorCallCount);
+
+ %OptimizeFunctionOnNextCall(context);
+ assertEquals(expected, context(obj));
+ assertEquals(3, accessorCallCount);
+
+ forceDeopt = { /* empty*/ };
+ assertEquals(expected, context(obj));
+ assertEquals(4, accessorCallCount);
+}
+
+
+function value_context_for_getter(obj) {
+ return obj.getterProperty;
+}
+
+function test_context_for_getter(obj) {
+ if (obj.getterProperty) {
+ return 111;
+ } else {
+ return 222;
+ }
+}
+
+function effect_context_for_getter(obj) {
+ obj.getterProperty;
+ return 5678;
+}
+
+function TryGetter(context, getter, obj, expected, expectException) {
+ try {
+ TestInlinedGetter(context, obj, expected);
+ assertFalse(expectException);
+ } catch (exception) {
+ assertTrue(expectException);
+ assertEquals(7, exception.stack.split('\n').length);
+ }
+ %DeoptimizeFunction(context);
+ %ClearFunctionTypeFeedback(context);
+ %ClearFunctionTypeFeedback(getter);
+}
+
+function TestGetterInAllContexts(getter, obj, expected, expectException) {
+ TryGetter(value_context_for_getter, getter, obj, expected, expectException);
+ TryGetter(test_context_for_getter, getter, obj, expected ? 111 : 222,
+ expectException);
+ TryGetter(effect_context_for_getter, getter, obj, 5678, expectException);
+}
+
+// -----------------------------------------------------------------------------
+// Test getter returning something 'true'ish in all contexts.
+
+function getter1() {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ return 1234;
+}
+
+function ConstrG1() { }
+obj = Object.defineProperty(new ConstrG1(), "getterProperty", { get: getter1 });
+TestGetterInAllContexts(getter1, obj, 1234, false);
+obj = Object.create(obj);
+TestGetterInAllContexts(getter1, obj, 1234, false);
+
+// -----------------------------------------------------------------------------
+// Test getter returning false in all contexts.
+
+function getter2() {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ return false;
+}
+
+function ConstrG2() { }
+obj = Object.defineProperty(new ConstrG2(), "getterProperty", { get: getter2 });
+TestGetterInAllContexts(getter2, obj, false, false);
+obj = Object.create(obj);
+TestGetterInAllContexts(getter2, obj, false, false);
+
+// -----------------------------------------------------------------------------
+// Test getter without a return in all contexts.
+
+function getter3() {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+}
+
+function ConstrG3() { }
+obj = Object.defineProperty(new ConstrG3(), "getterProperty", { get: getter3 });
+TestGetterInAllContexts(getter3, obj, undefined, false);
+obj = Object.create(obj);
+TestGetterInAllContexts(getter3, obj, undefined, false);
+
+// -----------------------------------------------------------------------------
+// Test getter with too many arguments without a return in all contexts.
+
+function getter4(a) {
+ assertSame(obj, this);
+ assertEquals(undefined, a);
+ accessorCallCount++;
+ forceDeopt.deopt;
+}
+
+function ConstrG4() { }
+obj = Object.defineProperty(new ConstrG4(), "getterProperty", { get: getter4 });
+TestGetterInAllContexts(getter4, obj, undefined, false);
+obj = Object.create(obj);
+TestGetterInAllContexts(getter4, obj, undefined, false);
+
+// -----------------------------------------------------------------------------
+// Test getter with too many arguments with a return in all contexts.
+
+function getter5(a) {
+ assertSame(obj, this);
+ assertEquals(undefined, a);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ return 9876;
+}
+
+function ConstrG5() { }
+obj = Object.defineProperty(new ConstrG5(), "getterProperty", { get: getter5 });
+TestGetterInAllContexts(getter5, obj, 9876, false);
+obj = Object.create(obj);
+TestGetterInAllContexts(getter5, obj, 9876, false);
+
+// -----------------------------------------------------------------------------
+// Test getter which throws from optimized code.
+
+function getter6() {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ if (accessorCallCount == 4) { 123 in null; }
+ return 13579;
+}
+
+function ConstrG6() { }
+obj = Object.defineProperty(new ConstrG6(), "getterProperty", { get: getter6 });
+TestGetterInAllContexts(getter6, obj, 13579, true);
+obj = Object.create(obj);
+TestGetterInAllContexts(getter6, obj, 13579, true);
+
+// -----------------------------------------------------------------------------
+// Helpers for testing inlining of setters.
+
+function TestInlinedSetter(context, obj, value, expected) {
+ forceDeopt = { deopt: 0 };
+ accessorCallCount = 0;
+ setterArgument = value;
+
+ assertEquals(expected, context(obj, value));
+ assertEquals(value, setterValue);
+ assertEquals(1, accessorCallCount);
+
+ assertEquals(expected, context(obj, value));
+ assertEquals(value, setterValue);
+ assertEquals(2, accessorCallCount);
+
+ %OptimizeFunctionOnNextCall(context);
+ assertEquals(expected, context(obj, value));
+ assertEquals(value, setterValue);
+ assertEquals(3, accessorCallCount);
+
+ forceDeopt = { /* empty*/ };
+ assertEquals(expected, context(obj, value));
+ assertEquals(value, setterValue);
+ assertEquals(4, accessorCallCount);
+}
+
+function value_context_for_setter(obj, value) {
+ return obj.setterProperty = value;
+}
+
+function test_context_for_setter(obj, value) {
+ if (obj.setterProperty = value) {
+ return 333;
+ } else {
+ return 444;
+ }
+}
+
+function effect_context_for_setter(obj, value) {
+ obj.setterProperty = value;
+ return 666;
+}
+
+function TrySetter(context, setter, obj, expectException, value, expected) {
+ try {
+ TestInlinedSetter(context, obj, value, expected);
+ assertFalse(expectException);
+ } catch (exception) {
+ assertTrue(expectException);
+ assertEquals(7, exception.stack.split('\n').length);
+ }
+ %DeoptimizeFunction(context);
+ %ClearFunctionTypeFeedback(context);
+ %ClearFunctionTypeFeedback(setter);
+}
+
+function TestSetterInAllContexts(setter, obj, expectException) {
+ TrySetter(value_context_for_setter, setter, obj, expectException, 111, 111);
+ TrySetter(test_context_for_setter, setter, obj, expectException, true, 333);
+ TrySetter(test_context_for_setter, setter, obj, expectException, false, 444);
+ TrySetter(effect_context_for_setter, setter, obj, expectException, 555, 666);
+}
+
+// -----------------------------------------------------------------------------
+// Test setter without a return in all contexts.
+
+function setter1(value) {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ setterValue = value;
+}
+
+function ConstrS1() { }
+obj = Object.defineProperty(new ConstrS1(), "setterProperty", { set: setter1 });
+TestSetterInAllContexts(setter1, obj, false);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter1, obj, false);
+
+// -----------------------------------------------------------------------------
+// Test setter returning something different than the RHS in all contexts.
+
+function setter2(value) {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ setterValue = value;
+ return 1000000;
+}
+
+function ConstrS2() { }
+obj = Object.defineProperty(new ConstrS2(), "setterProperty", { set: setter2 });
+TestSetterInAllContexts(setter2, obj, false);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter2, obj, false);
+
+// -----------------------------------------------------------------------------
+// Test setter with too few arguments without a return in all contexts.
+
+function setter3() {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ setterValue = setterArgument;
+}
+
+function ConstrS3() { }
+obj = Object.defineProperty(new ConstrS3(), "setterProperty", { set: setter3 });
+TestSetterInAllContexts(setter3, obj, false);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter3, obj, false);
+
+// -----------------------------------------------------------------------------
+// Test setter with too few arguments with a return in all contexts.
+
+function setter4() {
+ assertSame(obj, this);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ setterValue = setterArgument;
+ return 2000000;
+}
+
+function ConstrS4() { }
+obj = Object.defineProperty(new ConstrS4(), "setterProperty", { set: setter4 });
+TestSetterInAllContexts(setter4, obj, false);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter4, obj, false);
+
+// -----------------------------------------------------------------------------
+// Test setter with too many arguments without a return in all contexts.
+
+function setter5(value, foo) {
+ assertSame(obj, this);
+ assertEquals(undefined, foo);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ setterValue = value;
+}
+
+function ConstrS5() { }
+obj = Object.defineProperty(new ConstrS5(), "setterProperty", { set: setter5 });
+TestSetterInAllContexts(setter5, obj, false);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter5, obj, false);
+
+// -----------------------------------------------------------------------------
+// Test setter with too many arguments with a return in all contexts.
+
+function setter6(value, foo) {
+ assertSame(obj, this);
+ assertEquals(undefined, foo);
+ accessorCallCount++;
+ forceDeopt.deopt;
+ setterValue = value;
+ return 3000000;
+}
+
+function ConstrS6() { }
+obj = Object.defineProperty(new ConstrS6(), "setterProperty", { set: setter6 });
+TestSetterInAllContexts(setter6, obj, false);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter6, obj, false);
+
+// -----------------------------------------------------------------------------
+// Test setter which throws from optimized code.
+
+function setter7(value) {
+ accessorCallCount++;
+ forceDeopt.deopt;
+ if (accessorCallCount == 4) { 123 in null; }
+ setterValue = value;
+}
+
+function ConstrS7() { }
+obj = Object.defineProperty(new ConstrS7(), "setterProperty", { set: setter7 });
+TestSetterInAllContexts(setter7, obj, true);
+obj = Object.create(obj);
+TestSetterInAllContexts(setter7, obj, true);
diff --git a/deps/v8/test/mjsunit/compiler/inline-arguments.js b/deps/v8/test/mjsunit/compiler/inline-arguments.js
index f8a247608b..572340ab6b 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arguments.js
@@ -158,6 +158,30 @@ test_toarr(toarr1);
test_toarr(toarr2);
// Test that arguments access from inlined function uses correct values.
+// TODO(mstarzinger): Tests disabled, see bug 2261
+/*
+(function () {
+ function inner(x, y) {
+ "use strict";
+ x = 10;
+ y = 20;
+ for (var i = 0; i < 1; i++) {
+ for (var j = 1; j <= arguments.length; j++) {
+ return arguments[arguments.length - j];
+ }
+ }
+ }
+
+ function outer(x, y) {
+ return inner(x, y);
+ }
+
+ %OptimizeFunctionOnNextCall(outer);
+ %OptimizeFunctionOnNextCall(inner);
+ assertEquals(2, outer(1, 2));
+})();
+
+
(function () {
function inner(x, y) {
"use strict";
@@ -180,3 +204,4 @@ test_toarr(toarr2);
%OptimizeFunctionOnNextCall(outer);
assertEquals(2, outer(1, 2));
})();
+*/
diff --git a/deps/v8/test/mjsunit/compiler/inline-construct.js b/deps/v8/test/mjsunit/compiler/inline-construct.js
index 7a3f1e44bd..fa784cfc99 100644
--- a/deps/v8/test/mjsunit/compiler/inline-construct.js
+++ b/deps/v8/test/mjsunit/compiler/inline-construct.js
@@ -29,63 +29,72 @@
// Test inlining of constructor calls.
-function TestInlinedConstructor(closure) {
+function TestInlinedConstructor(constructor, closure) {
var result;
var counter = { value:0 };
- result = closure(11, 12, counter);
- assertEquals(23, result);
+ var noDeopt = { deopt:0 };
+ var forceDeopt = { /*empty*/ };
+
+ result = closure(constructor, 11, noDeopt, counter);
+ assertEquals(11, result);
assertEquals(1, counter.value);
- result = closure(23, 19, counter);
- assertEquals(42, result);
+
+ result = closure(constructor, 23, noDeopt, counter);
+ assertEquals(23, result);
assertEquals(2, counter.value);
+
%OptimizeFunctionOnNextCall(closure);
- result = closure(1, 42, counter)
- assertEquals(43, result);
+ result = closure(constructor, 42, noDeopt, counter);
+ assertEquals(42, result);
assertEquals(3, counter.value);
- result = closure("foo", "bar", counter)
- assertEquals("foobar", result)
+
+ result = closure(constructor, 127, forceDeopt, counter);
+ assertEquals(127, result)
assertEquals(4, counter.value);
+
+ %DeoptimizeFunction(closure);
+ %ClearFunctionTypeFeedback(closure);
+ %ClearFunctionTypeFeedback(constructor);
}
-function TestInAllContexts(constructor) {
- function value_context(a, b, counter) {
- var obj = new constructor(a, b, counter);
- return obj.x;
- }
- function test_context(a, b, counter) {
- if (!new constructor(a, b, counter)) {
- assertUnreachable("should not happen");
- }
- return a + b;
- }
- function effect_context(a, b, counter) {
- new constructor(a, b, counter);
- return a + b;
+function value_context(constructor, val, deopt, counter) {
+ var obj = new constructor(val, deopt, counter);
+ return obj.x;
+}
+
+function test_context(constructor, val, deopt, counter) {
+ if (!new constructor(val, deopt, counter)) {
+ assertUnreachable("should not happen");
}
- TestInlinedConstructor(value_context);
- TestInlinedConstructor(test_context);
- TestInlinedConstructor(effect_context);
- %DeoptimizeFunction(value_context);
- %DeoptimizeFunction(test_context);
- %DeoptimizeFunction(effect_context);
- %ClearFunctionTypeFeedback(value_context);
- %ClearFunctionTypeFeedback(test_context);
- %ClearFunctionTypeFeedback(effect_context);
+ return val;
+}
+
+function effect_context(constructor, val, deopt, counter) {
+ new constructor(val, deopt, counter);
+ return val;
+}
+
+function TestInAllContexts(constructor) {
+ TestInlinedConstructor(constructor, value_context);
+ TestInlinedConstructor(constructor, test_context);
+ TestInlinedConstructor(constructor, effect_context);
}
// Test constructor returning nothing in all contexts.
-function c1(a, b, counter) {
- this.x = a + b;
+function c1(val, deopt, counter) {
+ deopt.deopt;
+ this.x = val;
counter.value++;
}
TestInAllContexts(c1);
// Test constructor returning an object in all contexts.
-function c2(a, b, counter) {
- var obj = new Object();
- obj.x = a + b;
+function c2(val, deopt, counter) {
+ var obj = {};
+ deopt.deopt;
+ obj.x = val;
counter.value++;
return obj;
}
@@ -93,8 +102,9 @@ TestInAllContexts(c2);
// Test constructor returning a primitive value in all contexts.
-function c3(a, b, counter) {
- this.x = a + b;
+function c3(val, deopt, counter) {
+ deopt.deopt;
+ this.x = val;
counter.value++;
return "not an object";
}
@@ -133,9 +143,10 @@ assertEquals("foo1", f_too_few("foo"))
// Test constructor that cannot be inlined.
-function c_unsupported_syntax(a, b, counter) {
+function c_unsupported_syntax(val, deopt, counter) {
try {
- this.x = a + b;
+ deopt.deopt;
+ this.x = val;
counter.value++;
} catch(e) {
throw new Error();
@@ -146,9 +157,10 @@ TestInAllContexts(c_unsupported_syntax);
// Regression test: Inlined constructors called as functions do not get their
// implicit receiver object set to undefined, even in strict mode.
-function c_strict(a, b, counter) {
+function c_strict(val, deopt, counter) {
"use strict";
- this.x = a + b;
+ deopt.deopt;
+ this.x = val;
counter.value++;
}
TestInAllContexts(c_strict);
diff --git a/deps/v8/test/mjsunit/compiler/inline-literals.js b/deps/v8/test/mjsunit/compiler/inline-literals.js
index f78abe82d1..1422586912 100644
--- a/deps/v8/test/mjsunit/compiler/inline-literals.js
+++ b/deps/v8/test/mjsunit/compiler/inline-literals.js
@@ -29,6 +29,26 @@
// Test that we can inline functions containing materialized literals.
+function a2(b, c) {
+ return [b, c, b + c];
+}
+
+function a1(a, b, c) {
+ return [a, a2(b, c)];
+}
+
+function TestArrayLiteral(a, b, c) {
+ var expected = [a, [b, c, b + c]];
+ var result = a1(a, b, c);
+ assertEquals(expected, result, "TestArrayLiteral");
+}
+
+TestArrayLiteral(1, 2, 3);
+TestArrayLiteral(1, 2, 3);
+%OptimizeFunctionOnNextCall(TestArrayLiteral);
+TestArrayLiteral(1, 2, 3);
+TestArrayLiteral('a', 'b', 'c');
+
function o2(b, c) {
return { 'b':b, 'c':c, 'y':b + c };
}
@@ -48,3 +68,22 @@ TestObjectLiteral(1, 2, 3);
%OptimizeFunctionOnNextCall(TestObjectLiteral);
TestObjectLiteral(1, 2, 3);
TestObjectLiteral('a', 'b', 'c');
+
+function r2(s, x, y) {
+ return s.replace(/a/, x + y);
+}
+
+function r1(s, x, y) {
+ return r2(s, x, y).replace(/b/, y + x);
+}
+
+function TestRegExpLiteral(s, x, y, expected) {
+ var result = r1(s, x, y);
+ assertEquals(expected, result, "TestRegExpLiteral");
+}
+
+TestRegExpLiteral("a-", "reg", "exp", "regexp-");
+TestRegExpLiteral("-b", "reg", "exp", "-expreg");
+%OptimizeFunctionOnNextCall(TestRegExpLiteral);
+TestRegExpLiteral("ab", "reg", "exp", "regexpexpreg");
+TestRegExpLiteral("ab", 12345, 54321, "6666666666");
diff --git a/deps/v8/test/mjsunit/compiler/optimized-closures.js b/deps/v8/test/mjsunit/compiler/optimized-closures.js
new file mode 100644
index 0000000000..eaf75f8d00
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-closures.js
@@ -0,0 +1,57 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test optimized closures.
+
+var a = new Array(100);
+
+function f() {
+ var x=0;
+ for (var i=0; i<100; i++) {
+ var g = function goo(y) {
+ function h() {
+ if (goo.arguments[0] == 23) return -42;
+ return 42;
+ }
+ return x + y + h(y);
+ }
+ g(0);
+ %OptimizeFunctionOnNextCall(g);
+ a[i] = g(i);
+ }
+}
+
+f();
+assertEquals(42, a[0]);
+assertEquals(49, a[7]);
+assertEquals(-19, a[23]);
+
+
+
+
diff --git a/deps/v8/test/mjsunit/compiler/uint32.js b/deps/v8/test/mjsunit/compiler/uint32.js
new file mode 100644
index 0000000000..abed285830
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/uint32.js
@@ -0,0 +1,173 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// Test uint32 handing in optimized frames.
+
+var K1 = 0x7fffffff;
+var K2 = 0xffffffff;
+
+var uint32_array = new Uint32Array(2);
+uint32_array[0] = K1;
+uint32_array[1] = K2;
+
+function ChangeI2T(arr, i) {
+ return uint32_array[i];
+}
+
+assertEquals(K1, ChangeI2T(uint32_array, 0));
+assertEquals(K2, ChangeI2T(uint32_array, 1));
+%OptimizeFunctionOnNextCall(ChangeI2T);
+assertEquals(K1, ChangeI2T(uint32_array, 0));
+// Loop to force inline allocation failure and a call into runtime.
+for (var i = 0; i < 80000; i++) {
+ assertEquals(K2, ChangeI2T(uint32_array, 1));
+}
+
+function SideEffect() {
+ with ({}) { } // not inlinable
+}
+
+function Deopt(obj, arr, i) {
+ var x = arr[i];
+ SideEffect(); // x will be used by HSimulate.
+ obj.x;
+ return x;
+}
+
+assertEquals(K1, Deopt({x: 0}, uint32_array, 0));
+assertEquals(K2, Deopt({x: 0}, uint32_array, 1));
+%OptimizeFunctionOnNextCall(Deopt);
+assertEquals(K2, Deopt({}, uint32_array, 1));
+
+function ChangeI2D(arr) {
+ // This addition will have a double type feedback so ChangeI2D will
+ // be generated for its operands.
+ return arr[0] + arr[1];
+}
+
+assertEquals(K1 + K2, ChangeI2D(uint32_array));
+assertEquals(K1 + K2, ChangeI2D(uint32_array));
+%OptimizeFunctionOnNextCall(ChangeI2D);
+assertEquals(K1 + K2, ChangeI2D(uint32_array));
+
+function ShrShr(val) {
+ return (val >>> 0) >>> 1;
+}
+
+assertEquals(K1, ShrShr(K2 | 0));
+assertEquals(K1, ShrShr(K2 | 0));
+%OptimizeFunctionOnNextCall(ShrShr);
+assertEquals(K1, ShrShr(K2 | 0));
+
+function SarShr(val) {
+ return val >> (-2 >>> 0);
+}
+
+var K3 = 0x80000000;
+assertEquals(-2, SarShr(K3 | 0));
+assertEquals(-2, SarShr(K3 | 0));
+%OptimizeFunctionOnNextCall(SarShr);
+assertEquals(-2, SarShr(K3 | 0));
+
+function Uint32Phi(a, b, c) {
+ var i = a ? (b >>> 0) : (c >>> 0);
+ return (i | 0);
+}
+
+var K4 = 0x80000001;
+assertEquals(K3 | 0, Uint32Phi(true, K3, K4));
+assertEquals(K4 | 0, Uint32Phi(false, K3, K4));
+assertEquals(K3 | 0, Uint32Phi(true, K3, K4));
+assertEquals(K4 | 0, Uint32Phi(false, K3, K4));
+%OptimizeFunctionOnNextCall(Uint32Phi);
+assertEquals(K3 | 0, Uint32Phi(true, K3, K4));
+assertEquals(K4 | 0, Uint32Phi(false, K3, K4));
+
+function NonUint32Phi(a, b, c) {
+ var i = a ? (b >>> 0) : c;
+ return (i | 0);
+}
+
+assertEquals(K3 | 0, NonUint32Phi(true, K3, K4));
+assertEquals(K4 | 0, NonUint32Phi(false, K3, K4));
+assertEquals(K3 | 0, NonUint32Phi(true, K3, K4));
+assertEquals(K4 | 0, NonUint32Phi(false, K3, K4));
+%OptimizeFunctionOnNextCall(NonUint32Phi);
+assertEquals(K3 | 0, NonUint32Phi(true, K3, K4));
+assertEquals(K4 | 0, NonUint32Phi(false, K3, K4));
+
+function PhiOfPhi(x) {
+ var a = (x >>> 0);
+ for (var i = 0; i < 2; i++) {
+ for (var j = 0; j < 2; j++) {
+ a = (a >>> 0);
+ }
+ }
+ return (a | 0);
+}
+
+assertEquals(1, PhiOfPhi(1));
+assertEquals(1, PhiOfPhi(1));
+%OptimizeFunctionOnNextCall(PhiOfPhi);
+assertEquals(K3 | 0, PhiOfPhi(K3));
+
+function PhiOfPhiUnsafe(x) {
+ var a = x >>> 0;
+ for (var i = 0; i < 2; i++) {
+ for (var j = 0; j < 2; j++) {
+ a = (a >>> 0);
+ }
+ }
+ return a + a;
+}
+
+assertEquals(2, PhiOfPhiUnsafe(1));
+assertEquals(2, PhiOfPhiUnsafe(1));
+%OptimizeFunctionOnNextCall(PhiOfPhiUnsafe);
+assertEquals(2 * K3, PhiOfPhiUnsafe(K3));
+
+var old_array = new Array(1000);
+
+for (var i = 0; i < old_array.length; i++) old_array[i] = null;
+
+// Force promotion.
+gc();
+gc();
+
+function FillOldArrayWithHeapNumbers(N) {
+ for (var i = 0; i < N; i++) {
+ old_array[i] = uint32_array[1];
+ }
+}
+
+FillOldArrayWithHeapNumbers(1);
+FillOldArrayWithHeapNumbers(1);
+%OptimizeFunctionOnNextCall(FillOldArrayWithHeapNumbers);
+FillOldArrayWithHeapNumbers(old_array.length);
+gc();
diff --git a/deps/v8/test/mjsunit/count-based-osr.js b/deps/v8/test/mjsunit/count-based-osr.js
index 125c4e26d5..fbff91e4a2 100644
--- a/deps/v8/test/mjsunit/count-based-osr.js
+++ b/deps/v8/test/mjsunit/count-based-osr.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges --allow-natives-syntax
+// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges
+// Flags: --allow-natives-syntax --noparallel-recompilation
// Test that OSR works properly when using count-based interrupting/profiling.
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index 3e153aba7f..5aaa3bb94e 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Test date construction from other dates.
var date0 = new Date(1111);
var date1 = new Date(date0);
@@ -319,3 +321,23 @@ for (var i = 0; i < 24; i++) {
assertEquals(70674603500 - ms, Date.parse(string), string);
}
}
+
+assertThrows('Date.prototype.setTime.call("", 1);', TypeError);
+assertThrows('Date.prototype.setYear.call("", 1);', TypeError);
+assertThrows('Date.prototype.setHours.call("", 1, 2, 3, 4);', TypeError);
+assertThrows('Date.prototype.getDate.call("");', TypeError);
+assertThrows('Date.prototype.getUTCDate.call("");', TypeError);
+
+var date = new Date();
+date.getTime();
+date.getTime();
+%OptimizeFunctionOnNextCall(Date.prototype.getTime);
+assertThrows(function() { Date.prototype.getTime.call(""); }, TypeError);
+assertTrue(%GetOptimizationStatus(Date.prototype.getTime) != 1);
+
+date.getYear();
+date.getYear();
+%OptimizeFunctionOnNextCall(Date.prototype.getYear);
+assertThrows(function() { Date.prototype.getYear.call(""); }, TypeError);
+opt_status = %GetOptimizationStatus(Date.prototype.getYear);
+assertTrue(%GetOptimizationStatus(Date.prototype.getTime) != 1); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/debug-break-inline.js b/deps/v8/test/mjsunit/debug-break-inline.js
index 4418fa8d1b..464cb73637 100644
--- a/deps/v8/test/mjsunit/debug-break-inline.js
+++ b/deps/v8/test/mjsunit/debug-break-inline.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --noparallel-recompilation
// This test tests that deoptimization due to debug breaks works for
// inlined functions where the full-code is generated before the
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
index efbb2cc8ca..8d91b973ce 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
@@ -25,7 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax --inline-construct
+// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax
+// Flags: --inline-construct
+
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
@@ -43,13 +45,17 @@ var input = [
];
var expected = [
- { locals: {a0: 1.01, b0: 2.02}, args: { names: ["i", "x0", "y0"], values: [0, 3.03, 4.04] } },
- { locals: {a1: 3.03, b1: 4.04}, args: { names: ["i", "x1", "y1"], values: [1, 5.05, 6.06] } },
- { locals: {a2: 5.05, b2: 6.06}, args: { names: ["i"], values: [2] } },
- { locals: {a3: 7.07, b3: 8.08}, args: { names: ["i", "x3", "y3", "z3"],
- values: [3, 9.09, 10.10, undefined] }
- },
- { locals: {a4: 9.09, b4: 10.10}, args: { names: ["i", "x4", "y4"], values: [4, 11.11, 12.12] } }
+ { locals: {a0: 1.01, b0: 2.02},
+ args: { names: ["i", "x0", "y0"], values: [0, 3.03, 4.04] } },
+ { locals: {a1: 3.03, b1: 4.04},
+ args: { names: ["i", "x1", "y1"], values: [1, 5.05, 6.06] } },
+ { locals: {a2: 5.05, b2: 6.06},
+ args: { names: ["i"], values: [2] } },
+ { locals: {a3: 7.07, b3: 8.08},
+ args: { names: ["i", "x3", "y3", "z3"],
+ values: [3, 9.09, 10.10, undefined] } },
+ { locals: {a4: 9.09, b4: 10.10},
+ args: { names: ["i", "x4", "y4"], values: [4, 11.11, 12.12] } }
];
function arraySum(arr) {
@@ -78,7 +84,8 @@ function listener(event, exec_state, event_data, data) {
// All frames except the bottom one have expected arguments.
for (var j = 0; j < expected_args.names.length; j++) {
assertEquals(expected_args.names[j], frame.argumentName(j));
- assertEquals(expected_args.values[j], frame.argumentValue(j).value());
+ assertEquals(expected_args.values[j],
+ frame.argumentValue(j).value());
}
// All frames except the bottom one have two scopes.
@@ -87,13 +94,15 @@ function listener(event, exec_state, event_data, data) {
assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
Object.keys(expected_locals).forEach(function (name) {
- assertEquals(expected_locals[name], frame.scope(0).scopeObject().value()[name]);
+ assertEquals(expected_locals[name],
+ frame.scope(0).scopeObject().value()[name]);
});
for (var j = 0; j < expected_args.names.length; j++) {
var arg_name = expected_args.names[j];
var arg_value = expected_args.values[j];
- assertEquals(arg_value, frame.scope(0).scopeObject().value()[arg_name]);
+ assertEquals(arg_value,
+ frame.scope(0).scopeObject().value()[arg_name]);
}
// Evaluate in the inlined frame.
@@ -114,7 +123,8 @@ function listener(event, exec_state, event_data, data) {
map(function (k) { return expected_locals[k]; }));
assertEquals(expected_locals_sum + expected_args_sum,
- frame.evaluate(Object.keys(expected_locals).join('+') + ' + ' +
+ frame.evaluate(Object.keys(expected_locals).join('+') +
+ ' + ' +
expected_args.names.join('+')).value());
var arguments_sum = expected_args.names.map(function(_, idx) {
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
index 9c56a12be2..f66291288e 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
@@ -25,7 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax --inline-construct
+// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax
+// Flags: --inline-construct
+
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
@@ -35,11 +37,16 @@ var exception = false;
var testingConstructCall = false;
var expected = [
- { locals: {a0: 1, b0: 2}, args: { names: ["i", "x0", "y0"], values: [0, 3, 4] } },
- { locals: {a1: 3, b1: 4}, args: { names: ["i", "x1", "y1"], values: [1, 5, 6] } },
- { locals: {a2: 5, b2: 6}, args: { names: ["i"], values: [2] } },
- { locals: {a3: 7, b3: 8}, args: { names: ["i", "x3", "y3", "z3"], values: [3, 9, 10, undefined] } },
- { locals: {a4: 9, b4: 10}, args: { names: ["i", "x4", "y4"], values: [4, 11, 12] } }
+ { locals: {a0: 1, b0: 2},
+ args: { names: ["i", "x0", "y0"], values: [0, 3, 4] } },
+ { locals: {a1: 3, b1: 4},
+ args: { names: ["i", "x1", "y1"], values: [1, 5, 6] } },
+ { locals: {a2: 5, b2: 6},
+ args: { names: ["i"], values: [2] } },
+ { locals: {a3: 7, b3: 8},
+ args: { names: ["i", "x3", "y3", "z3"], values: [3, 9, 10, undefined] } },
+ { locals: {a4: 9, b4: 10},
+ args: { names: ["i", "x4", "y4"], values: [4, 11, 12] } }
];
function arraySum(arr) {
@@ -68,7 +75,8 @@ function listener(event, exec_state, event_data, data) {
// All frames except the bottom one have expected arguments.
for (var j = 0; j < expected_args.names.length; j++) {
assertEquals(expected_args.names[j], frame.argumentName(j));
- assertEquals(expected_args.values[j], frame.argumentValue(j).value());
+ assertEquals(expected_args.values[j],
+ frame.argumentValue(j).value());
}
// All frames except the bottom one have two scopes.
@@ -77,13 +85,15 @@ function listener(event, exec_state, event_data, data) {
assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
Object.keys(expected_locals).forEach(function (name) {
- assertEquals(expected_locals[name], frame.scope(0).scopeObject().value()[name]);
+ assertEquals(expected_locals[name],
+ frame.scope(0).scopeObject().value()[name]);
});
for (var j = 0; j < expected_args.names.length; j++) {
var arg_name = expected_args.names[j];
var arg_value = expected_args.values[j];
- assertEquals(arg_value, frame.scope(0).scopeObject().value()[arg_name]);
+ assertEquals(arg_value,
+ frame.scope(0).scopeObject().value()[arg_name]);
}
// Evaluate in the inlined frame.
@@ -104,7 +114,8 @@ function listener(event, exec_state, event_data, data) {
map(function (k) { return expected_locals[k]; }));
assertEquals(expected_locals_sum + expected_args_sum,
- frame.evaluate(Object.keys(expected_locals).join('+') + ' + ' +
+ frame.evaluate(Object.keys(expected_locals).join('+') +
+ ' + ' +
expected_args.names.join('+')).value());
var arguments_sum = expected_args.names.map(function(_, idx) {
diff --git a/deps/v8/test/mjsunit/debug-liveedit-double-call.js b/deps/v8/test/mjsunit/debug-liveedit-double-call.js
new file mode 100644
index 0000000000..1df806ab75
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-double-call.js
@@ -0,0 +1,142 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug
+
+
+function TestCase(test_scenario, expected_output) {
+ // Global variable, accessed from eval'd script.
+ test_output = "";
+
+ var script_text_generator = (function() {
+ var variables = { a: 1, b: 1, c: 1, d: 1, e: 1, f: 1 };
+
+ return {
+ get: function() {
+ return "(function() {\n " +
+ " function A() {\n " +
+ " test_output += 'a' + " + variables.a + ";\n " +
+ " test_output += '=';\n " +
+ " debugger;\n " +
+ " return 'Capybara';\n " +
+ " }\n " +
+ " function B(p1, p2) {\n " +
+ " test_output += 'b' + " + variables.b + ";\n " +
+ " return A();\n " +
+ " }\n " +
+ " function C() {\n " +
+ " test_output += 'c' + " + variables.c + ";\n " +
+ " // Function call with argument adaptor is intentional.\n " +
+ " return B();\n " +
+ " }\n " +
+ " function D() {\n " +
+ " test_output += 'd' + " + variables.d + ";\n " +
+ " // Function call with argument adaptor is intentional.\n " +
+ " return C(1, 2);\n " +
+ " }\n " +
+ " function E() {\n " +
+ " test_output += 'e' + " + variables.e + ";\n " +
+ " return D();\n " +
+ " }\n " +
+ " function F() {\n " +
+ " test_output += 'f' + " + variables.f + ";\n " +
+ " return E();\n " +
+ " }\n " +
+ " return F();\n " +
+ "})\n";
+ },
+ change: function(var_name) {
+ variables[var_name]++;
+ }
+ };
+ })();
+
+ var test_fun = eval(script_text_generator.get());
+
+ var script = Debug.findScript(test_fun);
+
+ var scenario_pos = 0;
+
+ function DebuggerStatementHandler() {
+ while (true) {
+ assertTrue(scenario_pos < test_scenario.length);
+ var change_var = test_scenario[scenario_pos++];
+ if (change_var == '=') {
+ // Continue.
+ return;
+ }
+ script_text_generator.change(change_var);
+ try {
+ Debug.LiveEdit.SetScriptSource(script, script_text_generator.get(),
+ false, []);
+ } catch (e) {
+ print("LiveEdit exception: " + e);
+ throw e;
+ }
+ }
+ }
+
+ var saved_exception = null;
+
+ function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ try {
+ DebuggerStatementHandler();
+ } catch (e) {
+ saved_exception = e;
+ }
+ } else {
+ print("Other: " + event);
+ }
+ }
+
+ Debug.setListener(listener);
+ assertEquals("Capybara", test_fun());
+ Debug.setListener(null);
+
+ if (saved_exception) {
+ print("Exception: " + saved_exception);
+ assertUnreachable();
+ }
+
+ print(test_output);
+
+ assertEquals(expected_output, test_output);
+}
+
+TestCase(['='], "f1e1d1c1b1a1=");
+
+TestCase(['c', '=', '='], "f1e1d1c1b1a1=c2b1a1=");
+
+TestCase(['b', 'c', 'd', 'e', '=', '='], "f1e1d1c1b1a1=e2d2c2b2a1=");
+
+TestCase(['b', 'c', '=', 'b', 'c', 'd', 'e', '=', '='], "f1e1d1c1b1a1=c2b2a1=e2d2c3b3a1=");
+
+TestCase(['e', 'f', '=', '='], "f1e1d1c1b1a1=f2e2d1c1b1a1=");
diff --git a/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js b/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js
new file mode 100644
index 0000000000..d978a9709f
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js
@@ -0,0 +1,153 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug
+
+function FindCallFrame(exec_state, frame_code) {
+ var number = Number(frame_code);
+ if (number >= 0) {
+ return exec_state.frame(number);
+ } else {
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ var frame = exec_state.frame(i);
+ var func_mirror = frame.func();
+ if (frame_code == func_mirror.name()) {
+ return frame;
+ }
+ }
+ }
+ throw new Error("Failed to find function name " + function_name);
+}
+
+function TestCase(test_scenario, expected_output) {
+ // Global variable, accessed from eval'd script.
+ test_output = "";
+
+ function TestCode() {
+ function A() {
+ // Extra stack variable. To make function not slim.
+ // Restarter doesn't work on slim function when stopped on 'debugger'
+ // statement. (There is no padding for 'debugger' statement).
+ var o = {};
+ test_output += 'A';
+ test_output += '=';
+ debugger;
+ return 'Capybara';
+ }
+ function B(p1, p2) {
+ test_output += 'B';
+ return A();
+ }
+ function C() {
+ test_output += 'C';
+ // Function call with argument adaptor is intentional.
+ return B();
+ }
+ function D() {
+ test_output += 'D';
+ // Function call with argument adaptor is intentional.
+ return C(1, 2);
+ }
+ function E() {
+ test_output += 'E';
+ return D();
+ }
+ function F() {
+ test_output += 'F';
+ return E();
+ }
+ return F();
+ }
+
+ var scenario_pos = 0;
+
+ function DebuggerStatementHandler(exec_state) {
+ while (true) {
+ assertTrue(scenario_pos < test_scenario.length);
+ var change_code = test_scenario[scenario_pos++];
+ if (change_code == '=') {
+ // Continue.
+ return;
+ }
+ var frame = FindCallFrame(exec_state, change_code);
+ // Throws if fails.
+ Debug.LiveEdit.RestartFrame(frame);
+ }
+ }
+
+ var saved_exception = null;
+
+ function listener(event, exec_state, event_data, data) {
+ if (saved_exception != null) {
+ return;
+ }
+ if (event == Debug.DebugEvent.Break) {
+ try {
+ DebuggerStatementHandler(exec_state);
+ } catch (e) {
+ saved_exception = e;
+ }
+ } else {
+ print("Other: " + event);
+ }
+ }
+
+ Debug.setListener(listener);
+ assertEquals("Capybara", TestCode());
+ Debug.setListener(null);
+
+ if (saved_exception) {
+ print("Exception: " + saved_exception);
+ print("Stack: " + saved_exception.stack);
+ assertUnreachable();
+ }
+
+ print(test_output);
+
+ assertEquals(expected_output, test_output);
+}
+
+TestCase('0==', "FEDCBA=A=");
+TestCase('1==', "FEDCBA=BA=");
+TestCase('2==', "FEDCBA=CBA=");
+TestCase('3==', "FEDCBA=DCBA=");
+TestCase('4==', "FEDCBA=EDCBA=");
+TestCase('5==', "FEDCBA=FEDCBA=");
+
+TestCase('=', "FEDCBA=");
+
+TestCase('C==', "FEDCBA=CBA=");
+
+TestCase('B=C=A=D==', "FEDCBA=BA=CBA=A=DCBA=");
+
+// Successive restarts don't work now and require additional fix.
+//TestCase('BCDE==', "FEDCBA=EDCBA=");
+//TestCase('BC=BCDE==', "FEDCBA=CBA=EDCBA=");
+//TestCase('EF==', "FEDCBA=FEDCBA=");
diff --git a/deps/v8/test/mjsunit/debug-script-breakpoints-closure.js b/deps/v8/test/mjsunit/debug-script-breakpoints-closure.js
new file mode 100644
index 0000000000..7c89718f08
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-script-breakpoints-closure.js
@@ -0,0 +1,67 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// Simple debug event handler which just counts the number of break points hit.
+var break_point_hit_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ break_point_hit_count++;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function makeClosure() {
+ var x;
+ return function() {
+ return x; // Breakpoint line ( #47 )
+ };
+}
+
+// Create closure before break point is set.
+var closure = makeClosure();
+
+// The debugger triggers re-compilation.
+assertEquals(0, Debug.scriptBreakPoints().length);
+var scr = Debug.findScript(makeClosure);
+var sbp = Debug.setScriptBreakPointById(scr.id, 47);
+assertEquals(1, Debug.scriptBreakPoints().length);
+
+// Ensure the closure actually triggers a break point hit.
+closure();
+assertEquals(1, break_point_hit_count);
+
+// Remove script break point.
+assertEquals(1, Debug.scriptBreakPoints().length);
+Debug.clearBreakPoint(sbp);
+assertEquals(0, Debug.scriptBreakPoints().length);
diff --git a/deps/v8/test/mjsunit/debug-script-breakpoints-nested.js b/deps/v8/test/mjsunit/debug-script-breakpoints-nested.js
new file mode 100644
index 0000000000..ce25c17814
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-script-breakpoints-nested.js
@@ -0,0 +1,82 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// Simple debug event handler which just counts the number of break points hit.
+var break_point_hit_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ break_point_hit_count++;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+eval(
+ "var inner;\n" +
+ "function outer() {\n" + // Non-trivial outer closure.
+ " var x = 5;\n" +
+ " function a() {\n" +
+ " var foo = 0, y = 7;\n" +
+ " function b() {\n" +
+ " var bar = 0, baz = 0, z = 11;\n" +
+ " function c() {\n" +
+ " return x + y + z;\n" + // Breakpoint line ( #8 )
+ " }\n" +
+ " inner = c;\n" +
+ " return c();\n" +
+ " }\n" +
+ " return b();\n" +
+ " }\n" +
+ " return a();\n" +
+ "}"
+);
+
+var script = Debug.findScript(outer);
+
+// The debugger triggers compilation of inner closures.
+assertEquals(0, Debug.scriptBreakPoints().length);
+var sbp = Debug.setScriptBreakPointById(script.id, 8);
+assertEquals(1, Debug.scriptBreakPoints().length);
+
+// The compiled outer closure should behave correctly.
+assertEquals(23, outer());
+assertEquals(1, break_point_hit_count);
+
+// The compiled inner closure should behave correctly.
+assertEquals(23, inner());
+assertEquals(2, break_point_hit_count);
+
+// Remove script break point.
+assertEquals(1, Debug.scriptBreakPoints().length);
+Debug.clearBreakPoint(sbp);
+assertEquals(0, Debug.scriptBreakPoints().length);
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 9767888f7c..d7ffb56958 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --expose-gc
+// Flags: --expose-debug-as debug --expose-gc --noparallel-recompilation
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 508a6b3cee..b74a212437 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -143,7 +143,7 @@ assertKind(elements_kind.external_int, new Int32Array(0xF));
assertKind(elements_kind.external_unsigned_int, new Uint32Array(23));
assertKind(elements_kind.external_float, new Float32Array(7));
assertKind(elements_kind.external_double, new Float64Array(0));
-assertKind(elements_kind.external_pixel, new PixelArray(512));
+assertKind(elements_kind.external_pixel, new Uint8ClampedArray(512));
// Crankshaft support for smi-only array elements.
function monomorphic(array) {
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index 9ffb67ecf0..5fb3889c6e 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --noparallel-recompilation
// Ensure that ElementsKind transitions in various situations are hoisted (or
// not hoisted) correctly, don't change the semantics programs and don't trigger
diff --git a/deps/v8/test/mjsunit/eval-stack-trace.js b/deps/v8/test/mjsunit/eval-stack-trace.js
new file mode 100644
index 0000000000..723d522c78
--- /dev/null
+++ b/deps/v8/test/mjsunit/eval-stack-trace.js
@@ -0,0 +1,203 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Return the stack frames of an Error object.
+Error.prototype.getFrames = function() {
+ Error.prepareStackTrace = function(error, frames) {
+ return frames;
+ }
+ var frames = this.stack;
+ Error.prepareStackTrace = undefined;
+ return frames;
+}
+
+String.prototype.contains = function(pattern) {
+ return this.indexOf(pattern) > -1;
+}
+
+// Check for every frame that a certain method returns the
+// expected value for every frame.
+Array.prototype.verifyEquals = function(frames, func_name) {
+ this.forEach(
+ function(element, index) {
+ var frame = frames[index];
+ if (element === null) return;
+ assertEquals(element, (frame[func_name])());
+ }
+ );
+}
+
+// Check for every frame that a certain method has a return value
+// that contains the expected pattern for every frame.
+Array.prototype.verifyContains = function(frames, func_name) {
+ this.forEach(
+ function(element, index) {
+ var frame = frames[index];
+ if (element === null) return;
+ assertTrue((frame[func_name])().contains(element));
+ }
+ );
+}
+
+// Check for every frame that a certain method returns undefined
+// when expected.
+Array.prototype.verifyUndefined = function(frames, func_name) {
+ this.forEach(
+ function(element, index) {
+ var frame = frames[index];
+ if (element === null) return;
+ assertEquals(element, (frame[func_name])() === undefined);
+ }
+ );
+}
+
+
+// Simple eval.
+var code1 = "function f() { \n" +
+ " throw new Error(3); \n" + // Line 2
+ "} \n" +
+ "f(); \n"; // Line 4
+
+function g() {
+ eval(code1);
+}
+
+try {
+ g();
+} catch (e) {
+ // We expect something like
+ // f (eval at g (eval-stack.js:87:8), <anonymous>:2:9)
+ // eval (eval at g (eval-stack.js:87:8), <anonymous>:4:1)
+ // g (eval-stack.js:87:3)
+ // eval-stack.js:94:3
+ var frames = e.getFrames();
+ assertEquals(4, frames.length);
+ ["f", "eval", "g"]
+ .verifyEquals(frames, "getFunctionName");
+ [2, 4]
+ .verifyEquals(frames, "getLineNumber");
+ ["<anonymous>:2:", "<anonymous>:4:"]
+ .verifyContains(frames, "toString");
+ [true, true, false, false]
+ .verifyUndefined(frames, "getFileName");
+ ["eval at g", "eval at g"]
+ .verifyContains(frames, "getEvalOrigin");
+}
+
+
+// Nested eval.
+var code2 = "function h() { \n" +
+ " // Empty \n" +
+ " eval(code1); \n" + // Line 3
+ "} \n" +
+ "h(); \n"; // Line 5
+
+try {
+ eval(code2);
+} catch (e) {
+ // We expect something like
+ // f (eval at h (eval at <anonymous> (eval-stack.js:116:8)),
+ // <anonymous>:2:9)
+ // eval (eval at h (eval at <anonymous> (eval-stack.js:116:8)),
+ // <anonymous>:4:1)
+ // h (eval at <anonymous> (eval-stack.js:116:8), <anonymous>:3:3)
+ // eval (eval at <anonymous> (eval-stack.js:116:8), <anonymous>:5:1)
+ // eval-stack.js:116:3
+ var frames = e.getFrames();
+ assertEquals(5, frames.length);
+ ["f", "eval", "h", "eval"]
+ .verifyEquals(frames, "getFunctionName");
+ [2, 4, 3, 5]
+ .verifyEquals(frames, "getLineNumber");
+ ["<anonymous>:2:", "<anonymous>:4:", "<anonymous>:3:", "<anonymous>:5:"]
+ .verifyContains(frames, "toString");
+ [true, true, true, true, false]
+ .verifyUndefined(frames, "getFileName");
+ ["eval at h (eval at <anonymous> (",
+ "eval at h (eval at <anonymous> (",
+ "eval at <anonymous> (",
+ "eval at <anonymous> ("]
+ .verifyContains(frames, "getEvalOrigin");
+}
+
+
+// Nested eval calling through non-eval defined function.
+var code3 = "function h() { \n" +
+ " // Empty \n" +
+ " g(); \n" + // Line 3
+ "} \n" +
+ "h(); \n"; // Line 5
+
+try {
+ eval(code3);
+} catch (e) {
+ // We expect something like
+ // f (eval at g (test.js:83:8), <anonymous>:2:9)
+ // eval (eval at g (test.js:83:8), <anonymous>:4:1)
+ // g (test.js:83:3)
+ // h (eval at <anonymous> (test.js:149:8), <anonymous>:3:3)
+ // eval (eval at <anonymous> (test.js:149:8), <anonymous>:5:1)
+ // test.js:149:3
+ var frames = e.getFrames();
+ assertEquals(6, frames.length);
+ ["f", "eval", "g", "h", "eval"]
+ .verifyEquals(frames, "getFunctionName");
+ [2, 4, null, 3, 5]
+ .verifyEquals(frames, "getLineNumber");
+ ["<anonymous>:2:", "<anonymous>:4:", null, "<anonymous>:3:", "<anonymous>:5:"]
+ .verifyContains(frames, "toString");
+ [true, true, false, true, true, false]
+ .verifyUndefined(frames, "getFileName");
+ ["eval at g (",
+ "eval at g (",
+ null,
+ "eval at <anonymous> (",
+ "eval at <anonymous> ("]
+ .verifyContains(frames, "getEvalOrigin");
+}
+
+
+// Calling function defined in eval.
+eval("function f() { \n" +
+ " throw new Error(3); \n" +
+ "} \n");
+
+try {
+ f();
+} catch (e) {
+ // We expect something like
+ // f (eval at <anonymous> (test.js:182:40), <anonymous>:2:9)
+ // test.js:186:3
+ var frames = e.getFrames();
+ assertEquals(2, frames.length);
+ ["f"].verifyEquals(frames, "getFunctionName");
+ [2].verifyEquals(frames, "getLineNumber");
+ ["<anonymous>:2:"].verifyContains(frames, "toString");
+ [true, false].verifyUndefined(frames, "getFileName");
+ ["eval at <anonymous> ("].verifyContains(frames, "getEvalOrigin");
+}
+
diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js
index d02922006a..85a8cc5847 100644
--- a/deps/v8/test/mjsunit/external-array.js
+++ b/deps/v8/test/mjsunit/external-array.js
@@ -27,6 +27,12 @@
// Flags: --allow-natives-syntax --expose-gc
+// Helper
+function assertInstance(o, f) {
+ assertSame(o.constructor, f);
+ assertInstanceof(o, f);
+}
+
// This is a regression test for overlapping key and value registers.
function f(a) {
a[0] = 0;
@@ -51,49 +57,58 @@ assertThrows(abfunc1);
// Test derivation from an ArrayBuffer
var ab = new ArrayBuffer(12);
+assertInstance(ab, ArrayBuffer);
var derived_uint8 = new Uint8Array(ab);
+assertInstance(derived_uint8, Uint8Array);
assertSame(ab, derived_uint8.buffer);
assertEquals(12, derived_uint8.length);
assertEquals(12, derived_uint8.byteLength);
assertEquals(0, derived_uint8.byteOffset);
assertEquals(1, derived_uint8.BYTES_PER_ELEMENT);
var derived_uint8_2 = new Uint8Array(ab,7);
+assertInstance(derived_uint8_2, Uint8Array);
assertSame(ab, derived_uint8_2.buffer);
assertEquals(5, derived_uint8_2.length);
assertEquals(5, derived_uint8_2.byteLength);
assertEquals(7, derived_uint8_2.byteOffset);
assertEquals(1, derived_uint8_2.BYTES_PER_ELEMENT);
var derived_int16 = new Int16Array(ab);
+assertInstance(derived_int16, Int16Array);
assertSame(ab, derived_int16.buffer);
assertEquals(6, derived_int16.length);
assertEquals(12, derived_int16.byteLength);
assertEquals(0, derived_int16.byteOffset);
assertEquals(2, derived_int16.BYTES_PER_ELEMENT);
var derived_int16_2 = new Int16Array(ab,6);
+assertInstance(derived_int16_2, Int16Array);
assertSame(ab, derived_int16_2.buffer);
assertEquals(3, derived_int16_2.length);
assertEquals(6, derived_int16_2.byteLength);
assertEquals(6, derived_int16_2.byteOffset);
assertEquals(2, derived_int16_2.BYTES_PER_ELEMENT);
var derived_uint32 = new Uint32Array(ab);
+assertInstance(derived_uint32, Uint32Array);
assertSame(ab, derived_uint32.buffer);
assertEquals(3, derived_uint32.length);
assertEquals(12, derived_uint32.byteLength);
assertEquals(0, derived_uint32.byteOffset);
assertEquals(4, derived_uint32.BYTES_PER_ELEMENT);
var derived_uint32_2 = new Uint32Array(ab,4);
+assertInstance(derived_uint32_2, Uint32Array);
assertSame(ab, derived_uint32_2.buffer);
assertEquals(2, derived_uint32_2.length);
assertEquals(8, derived_uint32_2.byteLength);
assertEquals(4, derived_uint32_2.byteOffset);
assertEquals(4, derived_uint32_2.BYTES_PER_ELEMENT);
var derived_uint32_3 = new Uint32Array(ab,4,1);
+assertInstance(derived_uint32_3, Uint32Array);
assertSame(ab, derived_uint32_3.buffer);
assertEquals(1, derived_uint32_3.length);
assertEquals(4, derived_uint32_3.byteLength);
assertEquals(4, derived_uint32_3.byteOffset);
assertEquals(4, derived_uint32_3.BYTES_PER_ELEMENT);
var derived_float64 = new Float64Array(ab,0,1);
+assertInstance(derived_float64, Float64Array);
assertSame(ab, derived_float64.buffer);
assertEquals(1, derived_float64.length);
assertEquals(8, derived_float64.byteLength);
@@ -144,6 +159,7 @@ a = new Float64Array(7);
assertSame(a.buffer, (new Uint16Array(a.buffer)).buffer);
assertSame(a.buffer, (new Float32Array(a.buffer,4)).buffer);
assertSame(a.buffer, (new Int8Array(a.buffer,3,51)).buffer);
+assertInstance(a.buffer, ArrayBuffer);
// Test the correct behavior of the |BYTES_PER_ELEMENT| property (which is
// "constant", but not read-only).
@@ -198,7 +214,7 @@ assertEquals(4, array_with_length_from_non_number.length);
// Test loads and stores.
types = [Array, Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array,
- Uint32Array, PixelArray, Float32Array, Float64Array];
+ Uint32Array, Uint8ClampedArray, Float32Array, Float64Array];
test_result_nan = [NaN, 0, 0, 0, 0, 0, 0, 0, NaN, NaN];
test_result_low_int = [-1, -1, 255, -1, 65535, -1, 0xFFFFFFFF, 0, -1, -1];
@@ -412,22 +428,289 @@ assertTrue(isNaN(float64_array[0]));
// Check handling of 0-sized buffers and arrays.
-
ab = new ArrayBuffer(0);
+assertInstance(ab, ArrayBuffer);
assertEquals(0, ab.byteLength);
a = new Int8Array(ab);
+assertInstance(a, Int8Array);
assertEquals(0, a.byteLength);
assertEquals(0, a.length);
a[0] = 1;
-assertEquals(undefined, a[0])
+assertEquals(undefined, a[0]);
ab = new ArrayBuffer(16);
+assertInstance(ab, ArrayBuffer);
a = new Float32Array(ab,4,0);
+assertInstance(a, Float32Array);
assertEquals(0, a.byteLength);
assertEquals(0, a.length);
a[0] = 1;
-assertEquals(undefined, a[0])
+assertEquals(undefined, a[0]);
a = new Uint16Array(0);
+assertInstance(a, Uint16Array);
assertEquals(0, a.byteLength);
assertEquals(0, a.length);
a[0] = 1;
-assertEquals(undefined, a[0])
+assertEquals(undefined, a[0]);
+
+
+// Check construction from arrays.
+a = new Uint32Array([]);
+assertInstance(a, Uint32Array);
+assertEquals(0, a.length);
+assertEquals(0, a.byteLength);
+assertEquals(0, a.buffer.byteLength);
+assertEquals(4, a.BYTES_PER_ELEMENT);
+assertInstance(a.buffer, ArrayBuffer);
+a = new Uint16Array([1,2,3]);
+assertInstance(a, Uint16Array);
+assertEquals(3, a.length);
+assertEquals(6, a.byteLength);
+assertEquals(6, a.buffer.byteLength);
+assertEquals(2, a.BYTES_PER_ELEMENT);
+assertEquals(1, a[0]);
+assertEquals(3, a[2]);
+assertInstance(a.buffer, ArrayBuffer);
+a = new Uint32Array(a);
+assertInstance(a, Uint32Array);
+assertEquals(3, a.length);
+assertEquals(12, a.byteLength);
+assertEquals(12, a.buffer.byteLength);
+assertEquals(4, a.BYTES_PER_ELEMENT);
+assertEquals(1, a[0]);
+assertEquals(3, a[2]);
+assertInstance(a.buffer, ArrayBuffer);
+
+// Check subarrays.
+a = new Uint16Array([1,2,3,4,5,6]);
+aa = a.subarray(3);
+assertInstance(aa, Uint16Array);
+assertEquals(3, aa.length);
+assertEquals(6, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(3,5);
+assertInstance(aa, Uint16Array);
+assertEquals(2, aa.length);
+assertEquals(4, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(4,8);
+assertInstance(aa, Uint16Array);
+assertEquals(2, aa.length);
+assertEquals(4, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(9);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(-4);
+assertInstance(aa, Uint16Array);
+assertEquals(4, aa.length);
+assertEquals(8, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(-3,-1);
+assertInstance(aa, Uint16Array);
+assertEquals(2, aa.length);
+assertEquals(4, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(3,2);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(-3,-4);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(0,-8);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+
+assertThrows(function(){ a.subarray.call({}, 0) });
+assertThrows(function(){ a.subarray.call([], 0) });
+assertThrows(function(){ a.subarray.call(a) });
+
+
+// Call constructors directly as functions, and through .call and .apply
+
+b = ArrayBuffer(100)
+a = Int8Array(b, 5, 77)
+assertInstance(b, ArrayBuffer)
+assertInstance(a, Int8Array)
+assertSame(b, a.buffer)
+assertEquals(5, a.byteOffset)
+assertEquals(77, a.byteLength)
+b = ArrayBuffer.call(null, 10)
+a = Uint16Array.call(null, b, 2, 4)
+assertInstance(b, ArrayBuffer)
+assertInstance(a, Uint16Array)
+assertSame(b, a.buffer)
+assertEquals(2, a.byteOffset)
+assertEquals(8, a.byteLength)
+b = ArrayBuffer.apply(null, [1000])
+a = Float32Array.apply(null, [b, 128, 1])
+assertInstance(b, ArrayBuffer)
+assertInstance(a, Float32Array)
+assertSame(b, a.buffer)
+assertEquals(128, a.byteOffset)
+assertEquals(4, a.byteLength)
+
+
+// Test array.set in different combinations.
+
+function assertArrayPrefix(expected, array) {
+ for (var i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], array[i]);
+ }
+}
+
+var a11 = new Int16Array([1, 2, 3, 4, 0, -1])
+var a12 = new Uint16Array(15)
+a12.set(a11, 3)
+assertArrayPrefix([0, 0, 0, 1, 2, 3, 4, 0, 0xffff, 0, 0], a12)
+assertThrows(function(){ a11.set(a12) })
+
+var a21 = [1, undefined, 10, NaN, 0, -1, {valueOf: function() {return 3}}]
+var a22 = new Int32Array(12)
+a22.set(a21, 2)
+assertArrayPrefix([0, 0, 1, 0, 10, 0, 0, -1, 3, 0], a22)
+
+var a31 = new Float32Array([2, 4, 6, 8, 11, NaN, 1/0, -3])
+var a32 = a31.subarray(2, 6)
+a31.set(a32, 4)
+assertArrayPrefix([2, 4, 6, 8, 6, 8, 11, NaN], a31)
+assertArrayPrefix([6, 8, 6, 8], a32)
+
+var a4 = new Uint8ClampedArray([3,2,5,6])
+a4.set(a4)
+assertArrayPrefix([3, 2, 5, 6], a4)
+
+// Cases with overlapping backing store but different element sizes.
+var b = new ArrayBuffer(4)
+var a5 = new Int16Array(b)
+var a50 = new Int8Array(b)
+var a51 = new Int8Array(b, 0, 2)
+var a52 = new Int8Array(b, 1, 2)
+var a53 = new Int8Array(b, 2, 2)
+
+a5.set([0x5050, 0x0a0a])
+assertArrayPrefix([0x50, 0x50, 0x0a, 0x0a], a50)
+assertArrayPrefix([0x50, 0x50], a51)
+assertArrayPrefix([0x50, 0x0a], a52)
+assertArrayPrefix([0x0a, 0x0a], a53)
+
+a50.set([0x50, 0x50, 0x0a, 0x0a])
+a51.set(a5)
+assertArrayPrefix([0x50, 0x0a, 0x0a, 0x0a], a50)
+
+a50.set([0x50, 0x50, 0x0a, 0x0a])
+a52.set(a5)
+assertArrayPrefix([0x50, 0x50, 0x0a, 0x0a], a50)
+
+a50.set([0x50, 0x50, 0x0a, 0x0a])
+a53.set(a5)
+assertArrayPrefix([0x50, 0x50, 0x50, 0x0a], a50)
+
+a50.set([0x50, 0x51, 0x0a, 0x0b])
+a5.set(a51)
+assertArrayPrefix([0x0050, 0x0051], a5)
+
+a50.set([0x50, 0x51, 0x0a, 0x0b])
+a5.set(a52)
+assertArrayPrefix([0x0051, 0x000a], a5)
+
+a50.set([0x50, 0x51, 0x0a, 0x0b])
+a5.set(a53)
+assertArrayPrefix([0x000a, 0x000b], a5)
+
+// Mixed types of same size.
+var a61 = new Float32Array([1.2, 12.3])
+var a62 = new Int32Array(2)
+a62.set(a61)
+assertArrayPrefix([1, 12], a62)
+a61.set(a62)
+assertArrayPrefix([1, 12], a61)
+
+// Invalid source
+assertThrows(function() { a.set(0) })
+assertThrows(function() { a.set({}) })
+
+
+// Test arraybuffer.slice
+
+var a0 = new Int8Array([1, 2, 3, 4, 5, 6])
+var b0 = a0.buffer
+
+var b1 = b0.slice(0)
+assertEquals(b0.byteLength, b1.byteLength)
+assertArrayPrefix([1, 2, 3, 4, 5, 6], Int8Array(b1))
+
+var b2 = b0.slice(3)
+assertEquals(b0.byteLength - 3, b2.byteLength)
+assertArrayPrefix([4, 5, 6], Int8Array(b2))
+
+var b3 = b0.slice(2, 4)
+assertEquals(2, b3.byteLength)
+assertArrayPrefix([3, 4], Int8Array(b3))
+
+function goo(a, i) {
+ return a[i];
+}
+
+function boo(a, i, v) {
+ return a[i] = v;
+}
+
+function do_tagged_index_external_array_test(constructor) {
+ var t_array = new constructor([1, 2, 3, 4, 5, 6]);
+ assertEquals(1, goo(t_array, 0));
+ assertEquals(1, goo(t_array, 0));
+ boo(t_array, 0, 13);
+ assertEquals(13, goo(t_array, 0));
+ %OptimizeFunctionOnNextCall(goo);
+ %OptimizeFunctionOnNextCall(boo);
+ boo(t_array, 0, 15);
+ assertEquals(15, goo(t_array, 0));
+ %ClearFunctionTypeFeedback(goo);
+ %ClearFunctionTypeFeedback(boo);
+}
+
+do_tagged_index_external_array_test(Int8Array);
+do_tagged_index_external_array_test(Uint8Array);
+do_tagged_index_external_array_test(Int16Array);
+do_tagged_index_external_array_test(Uint16Array);
+do_tagged_index_external_array_test(Int32Array);
+do_tagged_index_external_array_test(Uint32Array);
+do_tagged_index_external_array_test(Float32Array);
+do_tagged_index_external_array_test(Float64Array);
+
+var built_in_array = new Array(1, 2, 3, 4, 5, 6);
+assertEquals(1, goo(built_in_array, 0));
+assertEquals(1, goo(built_in_array, 0));
+%OptimizeFunctionOnNextCall(goo);
+%OptimizeFunctionOnNextCall(boo);
+boo(built_in_array, 0, 11);
+assertEquals(11, goo(built_in_array, 0));
+%ClearFunctionTypeFeedback(goo);
+%ClearFunctionTypeFeedback(boo);
+
+built_in_array = new Array(1.5, 2, 3, 4, 5, 6);
+assertEquals(1.5, goo(built_in_array, 0));
+assertEquals(1.5, goo(built_in_array, 0));
+%OptimizeFunctionOnNextCall(goo);
+%OptimizeFunctionOnNextCall(boo);
+boo(built_in_array, 0, 2.5);
+assertEquals(2.5, goo(built_in_array, 0));
+%ClearFunctionTypeFeedback(goo);
+%ClearFunctionTypeFeedback(boo);
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index 2965e74573..225a44d8a5 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -149,6 +149,7 @@ var knownProblems = {
"PushBlockContext": true,
"LazyCompile": true,
"LazyRecompile": true,
+ "ParallelRecompile": true,
"NotifyDeoptimized": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts.js b/deps/v8/test/mjsunit/harmony/block-conflicts.js
index 8388504bcd..3aa9d22223 100644
--- a/deps/v8/test/mjsunit/harmony/block-conflicts.js
+++ b/deps/v8/test/mjsunit/harmony/block-conflicts.js
@@ -35,7 +35,8 @@
function CheckException(e) {
var string = e.toString();
assertTrue(string.indexOf("has already been declared") >= 0 ||
- string.indexOf("redeclaration") >= 0); return 'Conflict';
+ string.indexOf("redeclaration") >= 0);
+ return 'Conflict';
}
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
index 1db1792ea6..d01e5c08ab 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --allow-natives-syntax
+// Flags: --harmony-scoping --allow-natives-syntax --noparallel-recompilation
// TODO(ES6): properly activate extended mode
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/collections.js b/deps/v8/test/mjsunit/harmony/collections.js
index 412e6f14c3..f3db7ea2b7 100644
--- a/deps/v8/test/mjsunit/harmony/collections.js
+++ b/deps/v8/test/mjsunit/harmony/collections.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -65,9 +65,11 @@ TestInvalidCalls(new WeakMap);
// Test expected behavior for Sets
function TestSet(set, key) {
assertFalse(set.has(key));
- set.add(key);
+ assertSame(undefined, set.add(key));
assertTrue(set.has(key));
- set.delete(key);
+ assertTrue(set.delete(key));
+ assertFalse(set.has(key));
+ assertFalse(set.delete(key));
assertFalse(set.has(key));
}
function TestSetBehavior(set) {
@@ -87,7 +89,7 @@ TestSetBehavior(new Set);
// Test expected mapping behavior for Maps and WeakMaps
function TestMapping(map, key, value) {
- map.set(key, value);
+ assertSame(undefined, map.set(key, value));
assertSame(value, map.get(key));
}
function TestMapBehavior1(m) {
@@ -117,12 +119,12 @@ TestMapBehavior2(new Map);
// Test expected querying behavior of Maps and WeakMaps
function TestQuery(m) {
var key = new Object;
- TestMapping(m, key, 'to-be-present');
- assertTrue(m.has(key));
- assertFalse(m.has(new Object));
- TestMapping(m, key, undefined);
- assertFalse(m.has(key));
- assertFalse(m.has(new Object));
+ var values = [ 'x', 0, +Infinity, -Infinity, true, false, null, undefined ];
+ for (var i = 0; i < values.length; i++) {
+ TestMapping(m, key, values[i]);
+ assertTrue(m.has(key));
+ assertFalse(m.has(new Object));
+ }
}
TestQuery(new Map);
TestQuery(new WeakMap);
diff --git a/deps/v8/test/mjsunit/harmony/module-linking.js b/deps/v8/test/mjsunit/harmony/module-linking.js
index 13ca6f782f..a4b272f468 100644
--- a/deps/v8/test/mjsunit/harmony/module-linking.js
+++ b/deps/v8/test/mjsunit/harmony/module-linking.js
@@ -27,10 +27,188 @@
// Flags: --harmony-modules --harmony-scoping
-// Test basic module linking.
+// Test basic module linking and initialization.
"use strict";
+module R {
+ // At this point, only functions and modules are initialized.
+ assertEquals(undefined, v)
+ assertEquals(undefined, vv)
+ assertEquals(undefined, R.v)
+ assertEquals(undefined, M.v)
+ assertEquals(undefined, MM.v)
+ assertEquals(undefined, F.v)
+ assertEquals(undefined, G.v)
+ assertThrows(function() { l }, ReferenceError)
+ assertThrows(function() { ll }, ReferenceError)
+ assertThrows(function() { R.l }, ReferenceError)
+ assertThrows(function() { M.l }, ReferenceError)
+ assertThrows(function() { MM.l }, ReferenceError)
+ assertThrows(function() { F.l }, ReferenceError)
+ assertThrows(function() { G.l }, ReferenceError)
+ assertThrows(function() { c }, ReferenceError)
+ assertThrows(function() { cc }, ReferenceError)
+ assertThrows(function() { R.c }, ReferenceError)
+ assertThrows(function() { M.c }, ReferenceError)
+ assertThrows(function() { MM.c }, ReferenceError)
+ assertThrows(function() { F.c }, ReferenceError)
+ assertThrows(function() { G.c }, ReferenceError)
+ assertEquals(4, f())
+ assertEquals(24, ff())
+ assertEquals(4, R.f())
+ assertEquals(14, M.f())
+ assertEquals(34, MM.f())
+ assertEquals(44, F.f())
+ assertEquals(14, G.f())
+
+ // All properties should already exist on the instance objects, though.
+ assertTrue("v" in R)
+ assertTrue("v" in RR)
+ assertTrue("v" in M)
+ assertTrue("v" in MM)
+ assertTrue("v" in F)
+ assertTrue("v" in G)
+ assertTrue("l" in R)
+ assertTrue("l" in RR)
+ assertTrue("l" in M)
+ assertTrue("l" in MM)
+ assertTrue("l" in F)
+ assertTrue("l" in G)
+ assertTrue("c" in R)
+ assertTrue("c" in RR)
+ assertTrue("c" in M)
+ assertTrue("c" in MM)
+ assertTrue("c" in F)
+ assertTrue("c" in G)
+ assertTrue("f" in R)
+ assertTrue("f" in RR)
+ assertTrue("f" in M)
+ assertTrue("f" in MM)
+ assertTrue("f" in F)
+ assertTrue("f" in G)
+ assertTrue("M" in R)
+ assertTrue("M" in RR)
+ assertTrue("RR" in R)
+ assertTrue("RR" in RR)
+
+ // And aliases should be identical.
+ assertSame(R, RR)
+ assertSame(R, R.RR)
+ assertSame(M, R.M)
+ assertSame(M, G)
+
+ // We can only assign to var.
+ assertEquals(-1, v = -1)
+ assertEquals(-2, R.v = -2)
+ assertEquals(-2, v)
+ assertEquals(-2, R.v)
+
+ assertThrows(function() { l = -1 }, ReferenceError)
+ assertThrows(function() { R.l = -2 }, ReferenceError)
+ assertThrows(function() { l }, ReferenceError)
+ assertThrows(function() { R.l }, ReferenceError)
+
+ assertThrows(function() { eval("c = -1") }, SyntaxError)
+ assertThrows(function() { R.c = -2 }, TypeError)
+
+ // Initialize first bunch or variables.
+ export var v = 1
+ export let l = 2
+ export const c = 3
+ export function f() { return 4 }
+
+ assertEquals(1, v)
+ assertEquals(1, R.v)
+ assertEquals(2, l)
+ assertEquals(2, R.l)
+ assertEquals(3, c)
+ assertEquals(3, R.c)
+
+ assertEquals(-3, v = -3)
+ assertEquals(-4, R.v = -4)
+ assertEquals(-3, l = -3)
+ assertEquals(-4, R.l = -4)
+ assertThrows(function() { eval("c = -3") }, SyntaxError)
+ assertThrows(function() { R.c = -4 }, TypeError)
+
+ assertEquals(-4, v)
+ assertEquals(-4, R.v)
+ assertEquals(-4, l)
+ assertEquals(-4, R.l)
+ assertEquals(3, c)
+ assertEquals(3, R.c)
+
+ // Initialize nested module.
+ export module M {
+ export var v = 11
+ export let l = 12
+ export const c = 13
+ export function f() { return 14 }
+ }
+
+ assertEquals(11, M.v)
+ assertEquals(11, G.v)
+ assertEquals(12, M.l)
+ assertEquals(12, G.l)
+ assertEquals(13, M.c)
+ assertEquals(13, G.c)
+
+ // Initialize non-exported variables.
+ var vv = 21
+ let ll = 22
+ const cc = 23
+ function ff() { return 24 }
+
+ assertEquals(21, vv)
+ assertEquals(22, ll)
+ assertEquals(23, cc)
+
+ // Initialize non-exported module.
+ module MM {
+ export var v = 31
+ export let l = 32
+ export const c = 33
+ export function f() { return 34 }
+ }
+
+ assertEquals(31, MM.v)
+ assertEquals(32, MM.l)
+ assertEquals(33, MM.c)
+
+ // Recursive self reference.
+ export module RR = R
+}
+
+// Initialize sibling module that was forward-used.
+module F {
+ assertEquals(undefined, v)
+ assertEquals(undefined, F.v)
+ assertThrows(function() { l }, ReferenceError)
+ assertThrows(function() { F.l }, ReferenceError)
+ assertThrows(function() { c }, ReferenceError)
+ assertThrows(function() { F.c }, ReferenceError)
+
+ export var v = 41
+ export let l = 42
+ export const c = 43
+ export function f() { return 44 }
+
+ assertEquals(41, v)
+ assertEquals(41, F.v)
+ assertEquals(42, l)
+ assertEquals(42, F.l)
+ assertEquals(43, c)
+ assertEquals(43, F.c)
+}
+
+// Define recursive module alias.
+module G = R.M
+
+
+
+// Second test with side effects and more module nesting.
+
let log = "";
export let x = (log += "1");
@@ -117,5 +295,4 @@ assertSame(M2, M1.A2);
assertSame(M1, M1.A2.A1);
assertSame(M2, M2.A1.A2);
-// TODO(rossberg): inner declarations are not executed yet.
-// assertEquals("1234567890", log);
+assertEquals("1234567890", log);
diff --git a/deps/v8/test/mjsunit/harmony/module-parsing.js b/deps/v8/test/mjsunit/harmony/module-parsing.js
index cdd0a2e00d..03948e31b9 100644
--- a/deps/v8/test/mjsunit/harmony/module-parsing.js
+++ b/deps/v8/test/mjsunit/harmony/module-parsing.js
@@ -116,6 +116,11 @@ x
,
y
+var
+x
+,
+y
+
export
var
v1 = 1
diff --git a/deps/v8/test/mjsunit/harmony/module-recompile.js b/deps/v8/test/mjsunit/harmony/module-recompile.js
new file mode 100644
index 0000000000..23f5bfc4d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/module-recompile.js
@@ -0,0 +1,87 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-modules
+
+// Test that potential recompilation of the global scope does not screw up.
+
+"use strict";
+
+var N = 1e5; // Number of loop iterations that trigger optimization.
+
+module A {
+ export var x = 1
+ export function f() { return x }
+}
+var f = A.f
+
+assertEquals(1, A.x)
+assertEquals(1, A.f())
+assertEquals(1, f())
+
+A.x = 2
+
+assertEquals(2, A.x)
+assertEquals(2, A.f())
+assertEquals(2, f())
+
+for (var i = 0; i < N; i++) {
+ if (i > N) print("impossible");
+}
+
+assertEquals(2, A.x)
+assertEquals(2, A.f())
+assertEquals(2, f())
+
+
+// Same test with loop inside a module.
+
+module B {
+ module A {
+ export var x = 1
+ export function f() { return x }
+ }
+ var f = A.f
+
+ assertEquals(1, A.x)
+ assertEquals(1, A.f())
+ assertEquals(1, f())
+
+ A.x = 2
+
+ assertEquals(2, A.x)
+ assertEquals(2, A.f())
+ assertEquals(2, f())
+
+ for (var i = 0; i < N; i++) {
+ if (i > N) print("impossible");
+ }
+
+ assertEquals(2, A.x)
+ assertEquals(2, A.f())
+ assertEquals(2, f())
+}
diff --git a/deps/v8/test/mjsunit/harmony/module-resolution.js b/deps/v8/test/mjsunit/harmony/module-resolution.js
index a1b991749c..1a95347d14 100644
--- a/deps/v8/test/mjsunit/harmony/module-resolution.js
+++ b/deps/v8/test/mjsunit/harmony/module-resolution.js
@@ -33,6 +33,7 @@
print("begin.")
+
export let x = print("0")
export module B = A.B
@@ -44,15 +45,25 @@ export module A {
module BB = B
export BB, x
let x = print("2")
- let y = print("3")
+ var y = print("3")
let Ax = A.x
+ try { A.y } catch (e) {} // throws
+ let Az = A.z // undefined
+ let Az2 = z // undefined
+ A.g() // hoisted
+ g() // hoisted
let ABx = A.B.x
- let Ay = A.y
+ let ABy = A.B.y
+ let Bx = B.x
+ let By = B.y
let BBx = BB.x
+ let BBy = BB.y
let Af = A.f
function f(x,y) { return x }
}
export let y = print("4")
+ export var z = print("4.1")
+ export function g() {}
let Ax = A.x
let Bx = B.x
let ABx = A.B.x
@@ -92,6 +103,8 @@ export module E {
let Bx = B.x
// TODO(rossberg): Handle import *.
// import A.*
+ module B = A.B
+ let y = A.y
}
export module M1 {
diff --git a/deps/v8/test/mjsunit/math-floor-negative.js b/deps/v8/test/mjsunit/math-floor-negative.js
new file mode 100644
index 0000000000..4cabff577e
--- /dev/null
+++ b/deps/v8/test/mjsunit/math-floor-negative.js
@@ -0,0 +1,59 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --noenable_sse4_1 --allow-natives-syntax
+
+function test1() {
+ // Trigger overflow when converting/truncating double to integer.
+ // Divide by 10 to avoid overflow when smi-tagging at the end.
+ return Math.floor(-100000000000.5) / 10;
+}
+
+function test2() {
+ // Trigger no overflow.
+ return Math.floor(-100.2);
+}
+
+function test3() {
+ // Trigger overflow when compensating by subtracting after compare.
+ // Divide by 10 to avoid overflow when smi-tagging at the end.
+ return Math.floor(-2147483648.1) / 10;
+}
+
+test1();
+test1();
+%OptimizeFunctionOnNextCall(test1);
+test2();
+test2();
+%OptimizeFunctionOnNextCall(test2);
+test3();
+test3();
+%OptimizeFunctionOnNextCall(test3);
+
+assertEquals(-10000000000.1, test1());
+assertEquals(-101, test2());
+assertEquals(-214748364.9, test3());
diff --git a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
new file mode 100644
index 0000000000..2743490847
--- /dev/null
+++ b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
@@ -0,0 +1,40 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse_inlining --noparallel-recompilation
+
+// Test for negative zero that doesn't need bail out
+
+function test_div_no_deopt_minus_zero() {
+ var zero_in_array = [0];
+ assertTrue(0 === (Math.floor((zero_in_array[0] | 0) / -1) | 0));
+}
+
+test_div_no_deopt_minus_zero();
+%OptimizeFunctionOnNextCall(test_div_no_deopt_minus_zero);
+test_div_no_deopt_minus_zero();
+assertTrue(2 != %GetOptimizationStatus(test_div_no_deopt_minus_zero));
diff --git a/deps/v8/test/mjsunit/mirror-object.js b/deps/v8/test/mjsunit/mirror-object.js
index d4d228cf07..8bf8a2d4f8 100644
--- a/deps/v8/test/mjsunit/mirror-object.js
+++ b/deps/v8/test/mjsunit/mirror-object.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,19 +49,19 @@ function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
JSON.stringify(serializer.serializeReferencedObjects()));
// Check the mirror hierachy.
- assertTrue(mirror instanceof debug.Mirror, 'Unexpected mirror hierachy');
- assertTrue(mirror instanceof debug.ValueMirror, 'Unexpected mirror hierachy');
- assertTrue(mirror instanceof debug.ObjectMirror, 'Unexpected mirror hierachy');
+ assertTrue(mirror instanceof debug.Mirror, 'Unexpected mirror hierarchy');
+ assertTrue(mirror instanceof debug.ValueMirror, 'Unexpected mirror hierarchy');
+ assertTrue(mirror instanceof debug.ObjectMirror, 'Unexpected mirror hierarchy');
// Check the mirror properties.
assertTrue(mirror.isObject(), 'Unexpected mirror');
assertEquals('object', mirror.type(), 'Unexpected mirror type');
assertFalse(mirror.isPrimitive(), 'Unexpected primitive mirror');
assertEquals(cls_name, mirror.className(), 'Unexpected mirror class name');
- assertTrue(mirror.constructorFunction() instanceof debug.ObjectMirror, 'Unexpected mirror hierachy');
+ assertTrue(mirror.constructorFunction() instanceof debug.ObjectMirror, 'Unexpected mirror hierarchy');
assertEquals(ctor_name, mirror.constructorFunction().name(), 'Unexpected constructor function name');
- assertTrue(mirror.protoObject() instanceof debug.Mirror, 'Unexpected mirror hierachy');
- assertTrue(mirror.prototypeObject() instanceof debug.Mirror, 'Unexpected mirror hierachy');
+ assertTrue(mirror.protoObject() instanceof debug.Mirror, 'Unexpected mirror hierarchy');
+ assertTrue(mirror.prototypeObject() instanceof debug.Mirror, 'Unexpected mirror hierarchy');
assertFalse(mirror.hasNamedInterceptor(), 'No named interceptor expected');
assertFalse(mirror.hasIndexedInterceptor(), 'No indexed interceptor expected');
@@ -69,12 +69,19 @@ function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
var properties = mirror.properties();
assertEquals(names.length, properties.length);
for (var i = 0; i < properties.length; i++) {
- assertTrue(properties[i] instanceof debug.Mirror, 'Unexpected mirror hierachy');
- assertTrue(properties[i] instanceof debug.PropertyMirror, 'Unexpected mirror hierachy');
+ assertTrue(properties[i] instanceof debug.Mirror, 'Unexpected mirror hierarchy');
+ assertTrue(properties[i] instanceof debug.PropertyMirror, 'Unexpected mirror hierarchy');
assertEquals('property', properties[i].type(), 'Unexpected mirror type');
assertEquals(names[i], properties[i].name(), 'Unexpected property name');
}
+ var internalProperties = mirror.internalProperties();
+ for (var i = 0; i < internalProperties.length; i++) {
+ assertTrue(internalProperties[i] instanceof debug.Mirror, 'Unexpected mirror hierarchy');
+ assertTrue(internalProperties[i] instanceof debug.InternalPropertyMirror, 'Unexpected mirror hierarchy');
+ assertEquals('internalProperty', internalProperties[i].type(), 'Unexpected mirror type');
+ }
+
for (var p in obj) {
var property_mirror = mirror.property(p);
assertTrue(property_mirror instanceof debug.PropertyMirror);
@@ -172,6 +179,7 @@ testObjectMirror(this, 'global', '', true); // Global object has special proper
testObjectMirror(this.__proto__, 'Object', '');
testObjectMirror([], 'Array', 'Array');
testObjectMirror([1,2], 'Array', 'Array');
+testObjectMirror(Object(17), 'Number', 'Number');
// Test circular references.
o = {};
@@ -230,3 +238,29 @@ assertTrue(mirror.property('length').isNative());
assertEquals('a', mirror.property(0).value().value());
assertEquals('b', mirror.property(1).value().value());
assertEquals('c', mirror.property(2).value().value());
+
+// Test value wrapper internal properties.
+mirror = debug.MakeMirror(Object("Capybara"));
+var ip = mirror.internalProperties();
+assertEquals(1, ip.length);
+assertEquals("[[PrimitiveValue]]", ip[0].name());
+assertEquals("string", ip[0].value().type());
+assertEquals("Capybara", ip[0].value().value());
+
+// Test bound function internal properties.
+mirror = debug.MakeMirror(Number.bind(Array, 2));
+ip = mirror.internalProperties();
+assertEquals(3, ip.length);
+var property_map = {};
+for (var i = 0; i < ip.length; i++) {
+ property_map[ip[i].name()] = ip[i];
+}
+assertTrue("[[BoundThis]]" in property_map);
+assertEquals("function", property_map["[[BoundThis]]"].value().type());
+assertEquals(Array, property_map["[[BoundThis]]"].value().value());
+assertTrue("[[TargetFunction]]" in property_map);
+assertEquals("function", property_map["[[TargetFunction]]"].value().type());
+assertEquals(Number, property_map["[[TargetFunction]]"].value().value());
+assertTrue("[[BoundArgs]]" in property_map);
+assertEquals("object", property_map["[[BoundArgs]]"].value().type());
+assertEquals(1, property_map["[[BoundArgs]]"].value().value().length);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 65fb301b44..25d7c00432 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -321,7 +321,7 @@ var assertUnreachable;
assertInstanceof = function assertInstanceof(obj, type) {
if (!(obj instanceof type)) {
var actualTypeName = null;
- var actualConstructor = Object.prototypeOf(obj).constructor;
+ var actualConstructor = Object.getPrototypeOf(obj).constructor;
if (typeof actualConstructor == "function") {
actualTypeName = actualConstructor.name || String(actualConstructor);
}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index e311ffbcba..357b33bf08 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -34,9 +34,6 @@ bugs/*: FAIL
# Fails.
regress/regress-1119: FAIL
-# Issue 2177: Debugger on ARM broken due to variable literal pool size.
-debug-liveedit-breakpoints: PASS, SKIP if ($arch == arm)
-
# Issue 1719: Slow to collect arrays over several contexts.
regress/regress-524: SKIP
# When that bug is fixed, revert the expectation to:
@@ -52,33 +49,28 @@ regress/regress-create-exception: PASS, SKIP if $mode == debug
##############################################################################
# This one uses a built-in that's only present in debug mode. It takes
# too long to run in debug mode on ARM and MIPS.
-fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm || $arch == mips)
+fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm || $arch == android_arm || $arch == mipsel)
-big-object-literal: PASS, SKIP if ($arch == arm)
+big-object-literal: PASS, SKIP if ($arch == arm || $arch == android_arm)
# Issue 488: this test sometimes times out.
array-constructor: PASS || TIMEOUT
# Very slow on ARM and MIPS, contains no architecture dependent code.
-unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == mips)
-
-# Stack manipulations in LiveEdit are buggy - see bug 915
-debug-liveedit-check-stack: SKIP
-debug-liveedit-patch-positions-replace: SKIP
-debug-liveedit-stack-padding: SKIP
+unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel)
# Test Crankshaft compilation time. Expected to take too long in debug mode.
-regress/regress-1969: PASS, SKIP if $mode == debug
+regress/regress-1969: PASS, SKIP if ($mode == debug || $arch == android_arm)
##############################################################################
-[ $isolates ]
-
# This test sets the umask on a per-process basis and hence cannot be
# used in multi-threaded runs.
-d8-os: SKIP
+# On android there is no /tmp directory.
+d8-os: PASS, SKIP if ($isolates || $arch == android_arm || $arch == android_ia32)
+tools/tickprocessor: PASS, SKIP if ($arch == android_arm || $arch == android_ia32)
##############################################################################
-[ $arch == arm ]
+[ $arch == arm || $arch == android_arm ]
# Slow tests which times out in debug mode.
try: PASS, SKIP if $mode == debug
@@ -93,8 +85,8 @@ compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
array-splice: PASS || TIMEOUT
# Long running test.
-mirror-object: PASS || TIMEOUT
string-indexof-2: PASS || TIMEOUT
+mirror-object: PASS || TIMEOUT
# BUG(3251035): Timeouts in long looping crankshaft optimization
# tests. Skipping because having them timeout takes too long on the
@@ -129,8 +121,17 @@ regress/regress-3247124: SKIP
# should be platform-independent.
regress/regress-1132: SKIP
+# Stack manipulations in LiveEdit is not implemented for this arch.
+debug-liveedit-check-stack: SKIP
+debug-liveedit-stack-padding: SKIP
+debug-liveedit-restart-frame: SKIP
+debug-liveedit-double-call: SKIP
+
+# Currently always deopt on minus zero
+math-floor-of-div-minus-zero: SKIP
+
##############################################################################
-[ $arch == mips ]
+[ $arch == mipsel ]
# Slow tests which times out in debug mode.
try: PASS, SKIP if $mode == debug
@@ -177,3 +178,10 @@ regress/regress-3247124: SKIP
# the test requires too much time to run. However, the problem test covers
# should be platform-independent.
regress/regress-1132: SKIP
+
+# Stack manipulations in LiveEdit is not implemented for this arch.
+debug-liveedit-check-stack: SKIP
+debug-liveedit-stack-padding: SKIP
+debug-liveedit-restart-frame: SKIP
+debug-liveedit-double-call: SKIP
+
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
index fdaf82d105..bd104aaf0b 100644
--- a/deps/v8/test/mjsunit/object-define-property.js
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -27,7 +27,7 @@
// Tests the object.defineProperty method - ES 15.2.3.6
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --es5-readonly
// Check that an exception is thrown when null is passed as object.
var exception = false;
@@ -1057,6 +1057,8 @@ assertEquals(999, o[999]);
// Regression test: Bizzare behavior on non-strict arguments object.
+// TODO(mstarzinger): Tests disabled, see bug 2261
+/*
(function test(arg0) {
// Here arguments[0] is a fast alias on arg0.
Object.defineProperty(arguments, "0", {
@@ -1075,7 +1077,7 @@ assertEquals(999, o[999]);
assertEquals(2, arg0);
assertEquals(3, arguments[0]);
})(0);
-
+*/
// Regression test: We should never observe the hole value.
var objectWithGetter = {};
@@ -1085,3 +1087,106 @@ assertEquals(undefined, objectWithGetter.__lookupSetter__('foo'));
var objectWithSetter = {};
objectWithSetter.__defineSetter__('foo', function(x) {});
assertEquals(undefined, objectWithSetter.__lookupGetter__('foo'));
+
+// An object with a getter on the prototype chain.
+function getter() { return 111; }
+function anotherGetter() { return 222; }
+
+function testGetterOnProto(expected, o) {
+ assertEquals(expected, o.quebec);
+}
+
+obj1 = {};
+Object.defineProperty(obj1, "quebec", { get: getter, configurable: true });
+obj2 = Object.create(obj1);
+obj3 = Object.create(obj2);
+
+testGetterOnProto(111, obj3);
+testGetterOnProto(111, obj3);
+%OptimizeFunctionOnNextCall(testGetterOnProto);
+testGetterOnProto(111, obj3);
+testGetterOnProto(111, obj3);
+
+Object.defineProperty(obj1, "quebec", { get: anotherGetter });
+
+testGetterOnProto(222, obj3);
+testGetterOnProto(222, obj3);
+%OptimizeFunctionOnNextCall(testGetterOnProto);
+testGetterOnProto(222, obj3);
+testGetterOnProto(222, obj3);
+
+// An object with a setter on the prototype chain.
+var modifyMe;
+function setter(x) { modifyMe = x+1; }
+function anotherSetter(x) { modifyMe = x+2; }
+
+function testSetterOnProto(expected, o) {
+ modifyMe = 333;
+ o.romeo = 444;
+ assertEquals(expected, modifyMe);
+}
+
+obj1 = {};
+Object.defineProperty(obj1, "romeo", { set: setter, configurable: true });
+obj2 = Object.create(obj1);
+obj3 = Object.create(obj2);
+
+testSetterOnProto(445, obj3);
+testSetterOnProto(445, obj3);
+%OptimizeFunctionOnNextCall(testSetterOnProto);
+testSetterOnProto(445, obj3);
+testSetterOnProto(445, obj3);
+
+Object.defineProperty(obj1, "romeo", { set: anotherSetter });
+
+testSetterOnProto(446, obj3);
+testSetterOnProto(446, obj3);
+%OptimizeFunctionOnNextCall(testSetterOnProto);
+testSetterOnProto(446, obj3);
+testSetterOnProto(446, obj3);
+
+// Removing a setter on the prototype chain.
+function testSetterOnProtoStrict(o) {
+ "use strict";
+ o.sierra = 12345;
+}
+
+obj1 = {};
+Object.defineProperty(obj1, "sierra",
+ { get: getter, set: setter, configurable: true });
+obj2 = Object.create(obj1);
+obj3 = Object.create(obj2);
+
+testSetterOnProtoStrict(obj3);
+testSetterOnProtoStrict(obj3);
+%OptimizeFunctionOnNextCall(testSetterOnProtoStrict);
+testSetterOnProtoStrict(obj3);
+testSetterOnProtoStrict(obj3);
+
+Object.defineProperty(obj1, "sierra",
+ { get: getter, set: undefined, configurable: true });
+
+exception = false;
+try {
+ testSetterOnProtoStrict(obj3);
+} catch (e) {
+ exception = true;
+ assertTrue(/which has only a getter/.test(e));
+}
+assertTrue(exception);
+
+// Test assignment to a getter-only property on the prototype chain. This makes
+// sure that crankshaft re-checks its assumptions and doesn't rely only on type
+// feedback (which would be monomorphic here).
+
+function Assign(o) {
+ o.blubb = 123;
+}
+
+function C() {}
+
+Assign(new C);
+Assign(new C);
+%OptimizeFunctionOnNextCall(Assign);
+Object.defineProperty(C.prototype, "blubb", {get: function() { return -42; }});
+Assign(new C);
diff --git a/deps/v8/test/mjsunit/packed-elements.js b/deps/v8/test/mjsunit/packed-elements.js
index 7f333e56e5..cfcdf8031f 100644
--- a/deps/v8/test/mjsunit/packed-elements.js
+++ b/deps/v8/test/mjsunit/packed-elements.js
@@ -96,9 +96,9 @@ function test6() {
function test_with_optimization(f) {
// Run tests in a loop to make sure that inlined Array() constructor runs out
// of new space memory and must fall back on runtime impl.
- for (i = 0; i < 250000; ++i) f();
+ for (i = 0; i < 25000; ++i) f();
%OptimizeFunctionOnNextCall(f);
- for (i = 0; i < 250000; ++i) f(); // Make sure GC happens
+ for (i = 0; i < 25000; ++i) f(); // Make sure GC happens
}
if (has_packed_elements) {
diff --git a/deps/v8/test/mjsunit/parse-int-float.js b/deps/v8/test/mjsunit/parse-int-float.js
index 2e4f648437..5a9b6f33cc 100644
--- a/deps/v8/test/mjsunit/parse-int-float.js
+++ b/deps/v8/test/mjsunit/parse-int-float.js
@@ -29,10 +29,10 @@ assertEquals(0, parseInt('0'));
assertEquals(0, parseInt(' 0'));
assertEquals(0, parseInt(' 0 '));
-assertEquals(63, parseInt('077'));
-assertEquals(63, parseInt(' 077'));
-assertEquals(63, parseInt(' 077 '));
-assertEquals(-63, parseInt(' -077'));
+assertEquals(77, parseInt('077'));
+assertEquals(77, parseInt(' 077'));
+assertEquals(77, parseInt(' 077 '));
+assertEquals(-77, parseInt(' -077'));
assertEquals(3, parseInt('11', 2));
assertEquals(4, parseInt('11', 3));
diff --git a/deps/v8/test/mjsunit/pixel-array-rounding.js b/deps/v8/test/mjsunit/pixel-array-rounding.js
index ef5a10bdff..0c307e62e5 100644
--- a/deps/v8/test/mjsunit/pixel-array-rounding.js
+++ b/deps/v8/test/mjsunit/pixel-array-rounding.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax
-var pixels = new PixelArray(8);
+var pixels = new Uint8ClampedArray(8);
function f() {
for (var i = 0; i < 8; i++) {
diff --git a/deps/v8/test/mjsunit/regexp-global.js b/deps/v8/test/mjsunit/regexp-global.js
index cc360d3ce0..093dba17c1 100644
--- a/deps/v8/test/mjsunit/regexp-global.js
+++ b/deps/v8/test/mjsunit/regexp-global.js
@@ -139,3 +139,116 @@ str = str.replace(/\b(?=u(p))/g, function(match, capture) {
});
assertEquals("1up 1up 1up 1up", str);
+
+
+// Create regexp that has a *lot* of captures.
+var re_string = "(a)";
+for (var i = 0; i < 500; i++) {
+ re_string = "(" + re_string + ")";
+}
+re_string = re_string + "1";
+// re_string = "(((...((a))...)))1"
+
+var regexps = new Array();
+var last_match_expectations = new Array();
+var first_capture_expectations = new Array();
+
+// Atomic regexp.
+regexps.push(/a1/g);
+last_match_expectations.push("a1");
+first_capture_expectations.push("");
+// Small regexp (no capture);
+regexps.push(/\w1/g);
+last_match_expectations.push("a1");
+first_capture_expectations.push("");
+// Small regexp (one capture).
+regexps.push(/(a)1/g);
+last_match_expectations.push("a1");
+first_capture_expectations.push("a");
+// Large regexp (a lot of captures).
+regexps.push(new RegExp(re_string, "g"));
+last_match_expectations.push("a1");
+first_capture_expectations.push("a");
+
+function test_replace(result_expectation,
+ subject,
+ regexp,
+ replacement) {
+ for (var i = 0; i < regexps.length; i++) {
+ // Overwrite last match info.
+ "deadbeef".replace(/(dead)beef/, "$1holeycow");
+ // Conduct tests.
+ assertEquals(result_expectation, subject.replace(regexps[i], replacement));
+ if (subject.length == 0) {
+ assertEquals("deadbeef", RegExp.lastMatch);
+ assertEquals("dead", RegExp["$1"]);
+ } else {
+ assertEquals(last_match_expectations[i], RegExp.lastMatch);
+ assertEquals(first_capture_expectations[i], RegExp["$1"]);
+ }
+ }
+}
+
+
+function test_match(result_expectation,
+ subject,
+ regexp) {
+ for (var i = 0; i < regexps.length; i++) {
+ // Overwrite last match info.
+ "deadbeef".replace(/(dead)beef/, "$1holeycow");
+ // Conduct tests.
+ if (result_expectation == null) {
+ assertNull(subject.match(regexps[i]));
+ } else {
+ assertArrayEquals(result_expectation, subject.match(regexps[i]));
+ }
+ if (subject.length == 0) {
+ assertEquals("deadbeef", RegExp.lastMatch);
+ assertEquals("dead", RegExp["$1"]);
+ } else {
+ assertEquals(last_match_expectations[i], RegExp.lastMatch);
+ assertEquals(first_capture_expectations[i], RegExp["$1"]);
+ }
+ }
+}
+
+
+// Test for different number of matches.
+for (var m = 0; m < 200; m++) {
+ // Create string that matches m times.
+ var subject = "";
+ var test_1_expectation = "";
+ var test_2_expectation = "";
+ var test_3_expectation = (m == 0) ? null : new Array();
+ for (var i = 0; i < m; i++) {
+ subject += "a11";
+ test_1_expectation += "x1";
+ test_2_expectation += "1";
+ test_3_expectation.push("a1");
+ }
+
+ // Test 1a: String.replace with string.
+ test_replace(test_1_expectation, subject, /a1/g, "x");
+
+ // Test 1b: String.replace with function.
+ function f() { return "x"; }
+ test_replace(test_1_expectation, subject, /a1/g, f);
+
+ // Test 2a: String.replace with empty string.
+ test_replace(test_2_expectation, subject, /a1/g, "");
+
+ // Test 3a: String.match.
+ test_match(test_3_expectation, subject, /a1/g);
+}
+
+
+// Test String hashing (compiling regular expression includes hashing).
+var crosscheck = "\x80";
+for (var i = 0; i < 12; i++) crosscheck += crosscheck;
+new RegExp(crosscheck);
+
+var subject = "ascii~only~string~here~";
+var replacement = "\x80";
+var result = subject.replace(/~/g, replacement);
+for (var i = 0; i < 5; i++) result += result;
+new RegExp(result);
diff --git a/deps/v8/test/mjsunit/regexp-results-cache.js b/deps/v8/test/mjsunit/regexp-results-cache.js
new file mode 100644
index 0000000000..7ee8c3fac4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-results-cache.js
@@ -0,0 +1,78 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Long string to trigger caching.
+var string =
+"Friends, Romans, countrymen, lend me your ears! \
+ I come to bury Caesar, not to praise him. \
+ The evil that men do lives after them, \
+ The good is oft interred with their bones; \
+ So let it be with Caesar. The noble Brutus \
+ Hath told you Caesar was ambitious; \
+ If it were so, it was a grievous fault, \
+ And grievously hath Caesar answer'd it. \
+ Here, under leave of Brutus and the rest- \
+ For Brutus is an honorable man; \
+ So are they all, all honorable men- \
+ Come I to speak in Caesar's funeral. \
+ He was my friend, faithful and just to me; \
+ But Brutus says he was ambitious, \
+ And Brutus is an honorable man. \
+ He hath brought many captives home to Rome, \
+ Whose ransoms did the general coffers fill. \
+ Did this in Caesar seem ambitious? \
+ When that the poor have cried, Caesar hath wept; \
+ Ambition should be made of sterner stuff: \
+ Yet Brutus says he was ambitious, \
+ And Brutus is an honorable man. \
+ You all did see that on the Lupercal \
+ I thrice presented him a kingly crown, \
+ Which he did thrice refuse. Was this ambition? \
+ Yet Brutus says he was ambitious, \
+ And sure he is an honorable man. \
+ I speak not to disprove what Brutus spoke, \
+ But here I am to speak what I do know. \
+ You all did love him once, not without cause; \
+ What cause withholds you then to mourn for him? \
+ O judgement, thou art fled to brutish beasts, \
+ And men have lost their reason. Bear with me; \
+ My heart is in the coffin there with Caesar, \
+ And I must pause till it come back to me.";
+
+var replaced = string.replace(/\b\w+\b/g, function() { return "foo"; });
+for (var i = 0; i < 3; i++) {
+ assertEquals(replaced,
+ string.replace(/\b\w+\b/g, function() { return "foo"; }));
+}
+
+// Check that the result is in a COW array.
+var words = string.split(" ");
+assertEquals("Friends,", words[0]);
+words[0] = "Enemies,";
+words = string.split(" ");
+assertEquals("Friends,", words[0]);
+
diff --git a/deps/v8/test/mjsunit/regress-2286.js b/deps/v8/test/mjsunit/regress-2286.js
new file mode 100644
index 0000000000..372451ec44
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-2286.js
@@ -0,0 +1,32 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+assertThrows("f()", ReferenceError);
+assertThrows("%f()", TypeError);
+assertThrows("%_f()", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1118.js b/deps/v8/test/mjsunit/regress/regress-1118.js
index 7e0461db4d..3e3920f3dc 100644
--- a/deps/v8/test/mjsunit/regress/regress-1118.js
+++ b/deps/v8/test/mjsunit/regress/regress-1118.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --noparallel-recompilation
// An exception thrown in a function optimized by on-stack replacement (OSR)
// should be able to construct a receiver from all optimized stack frames.
diff --git a/deps/v8/test/mjsunit/regress/regress-131994.js b/deps/v8/test/mjsunit/regress/regress-131994.js
new file mode 100644
index 0000000000..8347653a94
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-131994.js
@@ -0,0 +1,70 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Test that a variable in the local scope that shadows a context-allocated
+// variable is correctly resolved when being evaluated in the debugger.
+
+Debug = debug.Debug;
+
+var exception = false;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ var breakpoint = exec_state.frame(0);
+ try {
+ // Assert correct break point.
+ assertTrue(breakpoint.sourceLineText().indexOf("// Break") > -1);
+ // Assert correct value.
+ assertEquals(3, breakpoint.evaluate('x').value());
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+function h() {
+ var x; // Context-allocated due to g().
+
+ var g = function g() {
+ x = -7;
+ };
+
+ var f = function f() {
+ var x = 3; // Allocated in the local scope.
+ debugger; // Break.
+ };
+
+ f();
+}
+
+h();
+
+assertFalse(exception);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-136048.js b/deps/v8/test/mjsunit/regress/regress-136048.js
new file mode 100644
index 0000000000..c9972e96fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-136048.js
@@ -0,0 +1,34 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+try {
+ /foo/\u0069
+} catch (e) {
+ assertEquals(
+ "SyntaxError: Invalid flags supplied to RegExp constructor '\\u0069'",
+ e.toString());
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-137768.js b/deps/v8/test/mjsunit/regress/regress-137768.js
new file mode 100644
index 0000000000..9fbd7f30ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-137768.js
@@ -0,0 +1,73 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Create elements in a constructor function to ensure map sharing.
+function TestConstructor() {
+ this[0] = 1;
+ this[1] = 2;
+ this[2] = 3;
+}
+
+function bad_func(o,a) {
+ var s = 0;
+ for (var i = 0; i < 1; ++i) {
+ o.newFileToChangeMap = undefined;
+ var x = a[0];
+ s += x;
+ }
+ return s;
+}
+
+o = new Object();
+a = new TestConstructor();
+bad_func(o, a);
+
+// Make sure that we're out of pre-monomorphic state for the member add of
+// 'newFileToChangeMap' which causes a map transition.
+o = new Object();
+a = new TestConstructor();
+bad_func(o, a);
+
+// Optimize, before the fix, the element load and subsequent tagged-to-i were
+// hoisted above the map check, which can't be hoisted due to the map-changing
+// store.
+o = new Object();
+a = new TestConstructor();
+%OptimizeFunctionOnNextCall(bad_func);
+bad_func(o, a);
+
+// Pass in a array of doubles. Before the fix, the optimized load and
+// tagged-to-i will treat part of a double value as a pointer and de-ref it
+// before the map check was executed that should have deopt.
+o = new Object();
+// Pass in an elements buffer where the bit representation of the double numbers
+// are two adjacent small 32-bit values with the lowest bit set to one, causing
+// tagged-to-i to SIGSEGV.
+a = [2.122e-314, 2.122e-314, 2.122e-314];
+bad_func(o, a);
diff --git a/deps/v8/test/mjsunit/regress/regress-148378.js b/deps/v8/test/mjsunit/regress/regress-148378.js
new file mode 100644
index 0000000000..d37cea1cf7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-148378.js
@@ -0,0 +1,38 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"a".replace(/a/g, function() { return "c"; });
+
+function test() {
+ try {
+ test();
+ } catch(e) {
+ "b".replace(/(b)/g, function() { return "c"; });
+ }
+}
+
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1563.js b/deps/v8/test/mjsunit/regress/regress-1563.js
index c25b6c7f63..884b12595a 100644
--- a/deps/v8/test/mjsunit/regress/regress-1563.js
+++ b/deps/v8/test/mjsunit/regress/regress-1563.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax
-obj = new PixelArray(10);
+obj = new Uint8ClampedArray(10);
// Test that undefined gets properly clamped in Crankshafted pixel array
// assignments.
diff --git a/deps/v8/test/mjsunit/regress/regress-1591.js b/deps/v8/test/mjsunit/regress/regress-1591.js
new file mode 100644
index 0000000000..69efd0bd87
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1591.js
@@ -0,0 +1,48 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var stack;
+var used_custom_lookup = false;
+
+({
+ __lookupGetter__ : function() {
+ used_custom_lookup = true;
+ },
+
+ test : function() {
+ try {
+ f();
+ } catch (err) {
+ stack = err.stack;
+ }
+ }
+}).test();
+
+var expected_message = "ReferenceError: f is not defined";
+assertTrue(stack.indexOf(expected_message) >= 0);
+assertFalse(used_custom_lookup);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2119.js b/deps/v8/test/mjsunit/regress/regress-2119.js
new file mode 100644
index 0000000000..54840c238b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2119.js
@@ -0,0 +1,36 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --nouse-ic
+
+function strict_function() {
+ "use strict"
+ undeclared = 1;
+}
+
+assertThrows(strict_function);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2172.js b/deps/v8/test/mjsunit/regress/regress-2172.js
new file mode 100644
index 0000000000..5d06f4eef4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2172.js
@@ -0,0 +1,35 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+for (var i = 0; i < 10000; i++){
+ (i + "\0").split(/(.)\1/i);
+}
+
+for (var i = 0; i < 10000; i++){
+ (i + "\u1234\0").split(/(.)\1/i);
+}
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2185-2.js b/deps/v8/test/mjsunit/regress/regress-2185-2.js
new file mode 100644
index 0000000000..b1eedb9335
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2185-2.js
@@ -0,0 +1,145 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// These tests used to time out before this was fixed.
+
+var LEN = 2e4;
+
+function short() {
+ var sum = 0;
+ for (var i = 0; i < 1000; i++) {
+ var a = [1, 4, 34, 23, 6, 123, 3, 2, 11, 515, 4, 33, 22, 2, 2, 1, 0, 123,
+ 23, 42, 43, 1002, 44, 43, 101, 23, 55, 11, 101, 102, 45, 11, 404,
+ 31415, 34, 53, 453, 45, 34, 5, 2, 35, 5, 345, 36, 45, 345, 3, 45,
+ 3, 5, 5, 2, 2342344, 2234, 23, 2718, 1500, 2, 19, 22, 43, 41, 0,
+ -1, 33, 45, 78];
+ a.sort(function(a, b) { return a - b; });
+ sum += a[0];
+ }
+ return sum;
+}
+
+function short_bench(name, array) {
+ var start = new Date();
+ short();
+ var end = new Date();
+ var ms = end - start;
+ print("Short " + Math.floor(ms) + "ms");
+}
+
+function sawseq(a, tooth) {
+ var count = 0;
+ while (true) {
+ for (var i = 0; i < tooth; i++) {
+ a.push(i);
+ if (++count >= LEN) return a;
+ }
+ }
+}
+
+function sawseq2(a, tooth) {
+ var count = 0;
+ while (true) {
+ for (var i = 0; i < tooth; i++) {
+ a.push(i);
+ if (++count >= LEN) return a;
+ }
+ for (var i = 0; i < tooth; i++) {
+ a.push(tooth - i);
+ if (++count >= LEN) return a;
+ }
+ }
+}
+
+function sawseq3(a, tooth) {
+ var count = 0;
+ while (true) {
+ for (var i = 0; i < tooth; i++) {
+ a.push(tooth - i);
+ if (++count >= LEN) return a;
+ }
+ }
+}
+
+function up(a) {
+ for (var i = 0; i < LEN; i++) {
+ a.push(i);
+ }
+ return a;
+}
+
+function down(a) {
+ for (var i = 0; i < LEN; i++) {
+ a.push(LEN - i);
+ }
+ return a;
+}
+
+function ran(a) {
+ for (var i = 0; i < LEN; i++) {
+ a.push(Math.floor(Math.random() * LEN));
+ }
+ return a;
+}
+
+var random = ran([]);
+var asc = up([]);
+var desc = down([]);
+var asc_desc = down(up([]));
+var desc_asc = up(down([]));
+var asc_asc = up(up([]));
+var desc_desc = down(down([]));
+var saw1 = sawseq([], 1000);
+var saw2 = sawseq([], 500);
+var saw3 = sawseq([], 200);
+var saw4 = sawseq2([], 200);
+var saw5 = sawseq3([], 200);
+
+function bench(name, array) {
+ var start = new Date();
+ array.sort(function(a, b) { return a - b; });
+ var end = new Date();
+ for (var i = 0; i < array.length - 1; i++) {
+ if (array[i] > array[i + 1]) throw name + " " + i;
+ }
+ var ms = end - start;
+ print(name + " " + Math.floor(ms) + "ms");
+}
+
+short_bench();
+bench("random", random);
+bench("up", asc);
+bench("down", desc);
+bench("saw 1000", saw1);
+bench("saw 500", saw2);
+bench("saw 200", saw3);
+bench("saw 200 symmetric", saw4);
+bench("saw 200 down", saw4);
+bench("up, down", asc_desc);
+bench("up, up", asc_asc);
+bench("down, down", desc_desc);
+bench("down, up", desc_asc);
diff --git a/deps/v8/test/mjsunit/regress/regress-2185.js b/deps/v8/test/mjsunit/regress/regress-2185.js
new file mode 100644
index 0000000000..895f322fc6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2185.js
@@ -0,0 +1,36 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [];
+
+for (var i = 0; i < 2; i++) {
+ for (var j = 0; j < 30000; j++) {
+ a.push(j);
+ }
+}
+
+a.sort(function(a, b) { return a - b; } );
diff --git a/deps/v8/test/mjsunit/regress/regress-2186.js b/deps/v8/test/mjsunit/regress/regress-2186.js
new file mode 100644
index 0000000000..0921dceadb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2186.js
@@ -0,0 +1,49 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-collections
+
+function heapify(i) {
+ return 2.0 * (i / 2);
+}
+heapify(1);
+
+var ONE = 1;
+var ANOTHER_ONE = heapify(ONE);
+assertSame(ONE, ANOTHER_ONE);
+assertEquals("number", typeof ONE);
+assertEquals("number", typeof ANOTHER_ONE);
+
+var set = new Set;
+set.add(ONE);
+assertTrue(set.has(ONE));
+assertTrue(set.has(ANOTHER_ONE));
+
+var map = new Map;
+map.set(ONE, 23);
+assertSame(23, map.get(ONE));
+assertSame(23, map.get(ANOTHER_ONE));
diff --git a/deps/v8/test/mjsunit/regress/regress-2193.js b/deps/v8/test/mjsunit/regress/regress-2193.js
new file mode 100644
index 0000000000..50509bfcbd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2193.js
@@ -0,0 +1,58 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --cache-optimized-code
+
+function bozo() {};
+function MakeClosure() {
+ return function f(use_literals) {
+ if (use_literals) {
+ return [1,2,3,3,4,5,6,7,8,9,bozo];
+ } else {
+ return 0;
+ }
+ }
+}
+
+// Create two closures that share the same literal boilerplates.
+var closure1 = MakeClosure();
+var closure2 = MakeClosure();
+var expected = [1,2,3,3,4,5,6,7,8,9,bozo];
+
+// Make sure we generate optimized code for the first closure after
+// warming it up properly so that the literals boilerplate is generated
+// and the optimized code uses CreateArrayLiteralShallow runtime call.
+assertEquals(0, closure1(false));
+assertEquals(expected, closure1(true));
+%OptimizeFunctionOnNextCall(closure1);
+assertEquals(expected, closure1(true));
+
+// Optimize the second closure, which should reuse the optimized code
+// from the first closure with the same literal boilerplates.
+assertEquals(0, closure2(false));
+%OptimizeFunctionOnNextCall(closure2);
+assertEquals(expected, closure2(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-2219.js b/deps/v8/test/mjsunit/regress/regress-2219.js
new file mode 100644
index 0000000000..946c75bd80
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2219.js
@@ -0,0 +1,32 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies --expose-gc
+
+var p = Proxy.create({getPropertyDescriptor: function() { gc() }});
+var o = Object.create(p);
+assertSame(23, o.x = 23);
diff --git a/deps/v8/test/mjsunit/regress/regress-2225.js b/deps/v8/test/mjsunit/regress/regress-2225.js
new file mode 100644
index 0000000000..9957d8d463
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2225.js
@@ -0,0 +1,65 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies
+
+var proxy_has_x = false;
+var proxy = Proxy.create({ getPropertyDescriptor:function(key) {
+ assertSame('x', key);
+ if (proxy_has_x) {
+ return { configurable:true, writable:false, value:19 };
+ }
+}});
+
+// Test __lookupGetter__/__lookupSetter__ with proxy.
+assertSame(undefined, Object.prototype.__lookupGetter__.call(proxy, 'foo'));
+assertSame(undefined, Object.prototype.__lookupSetter__.call(proxy, 'bar'));
+assertSame(undefined, Object.prototype.__lookupGetter__.call(proxy, '123'));
+assertSame(undefined, Object.prototype.__lookupSetter__.call(proxy, '456'));
+
+// Test __lookupGetter__/__lookupSetter__ with proxy in prototype chain.
+var object = Object.create(proxy);
+assertSame(undefined, Object.prototype.__lookupGetter__.call(object, 'foo'));
+assertSame(undefined, Object.prototype.__lookupSetter__.call(object, 'bar'));
+assertSame(undefined, Object.prototype.__lookupGetter__.call(object, '123'));
+assertSame(undefined, Object.prototype.__lookupSetter__.call(object, '456'));
+
+// Test inline constructors with proxy as prototype.
+function f() { this.x = 23; }
+f.prototype = proxy;
+proxy_has_x = false;
+assertSame(23, new f().x);
+proxy_has_x = true;
+assertSame(19, new f().x);
+
+// Test inline constructors with proxy in prototype chain.
+function g() { this.x = 42; }
+g.prototype.__proto__ = proxy;
+proxy_has_x = false;
+assertSame(42, new g().x);
+proxy_has_x = true;
+assertSame(19, new g().x);
diff --git a/deps/v8/test/mjsunit/regress/regress-2226.js b/deps/v8/test/mjsunit/regress/regress-2226.js
new file mode 100644
index 0000000000..1ac3d3062a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2226.js
@@ -0,0 +1,36 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var foo = function() { 0; /* foo function */ };
+var bar = function() { 1; /* bar function */ };
+var baz = function() { 2; /* baz function */ };
+
+var test = foo.test = bar.test = baz;
+
+assertEquals(baz, test);
+assertEquals(baz, foo.test);
+assertEquals(baz, bar.test);
diff --git a/deps/v8/test/mjsunit/regress/regress-2234.js b/deps/v8/test/mjsunit/regress/regress-2234.js
new file mode 100644
index 0000000000..8da513e30e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2234.js
@@ -0,0 +1,41 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function test(i) {
+ // Overwrite random parts of the transcendental cache.
+ Math.sin(i / 1779 * Math.PI);
+ // Check whether the first cache line has been accidentally overwritten
+ // with incorrect key.
+ assertEquals(0, Math.sin(0));
+}
+
+for (i = 0; i < 10000; ++i) {
+ test(i);
+ if (i == 0) %OptimizeFunctionOnNextCall(test);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-2249.js b/deps/v8/test/mjsunit/regress/regress-2249.js
new file mode 100644
index 0000000000..07d687d819
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2249.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --gc-interval=10 --stress-compaction
+
+var o = {};
+o[Math.pow(2,30)-1] = 0;
+o[Math.pow(2,31)-1] = 0;
+o[1] = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-2250.js b/deps/v8/test/mjsunit/regress/regress-2250.js
new file mode 100644
index 0000000000..b3b0db3fc3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2250.js
@@ -0,0 +1,68 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// The original problem from the bug: In the example below SMI check for b
+// generated for inlining of equals invocation (marked with (*)) will be hoisted
+// out of the loop across the typeof b === "object" condition and cause an
+// immediate deopt. Another problem here is that no matter how many time we
+// deopt and reopt we will continue to produce the wrong code.
+//
+// The fix is to notice when a deopt and subsequent reopt doesn't find
+// additional type information, indicating that optimistic LICM should be
+// disabled during compilation.
+
+function eq(a, b) {
+ if (typeof b === "object") {
+ return b.equals(a); // (*)
+ }
+ return a === b;
+}
+
+Object.prototype.equals = function (other) {
+ return (this === other);
+};
+
+function test() {
+ for (var i = 0; !eq(i, 10); i++)
+ ;
+}
+
+eq({}, {});
+eq({}, {});
+eq(1, 1);
+eq(1, 1);
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
+%OptimizeFunctionOnNextCall(test);
+// Second compilation should have noticed that LICM wasn't a good idea, and now
+// function should no longer deopt when called.
+test();
+assertTrue(2 != %GetOptimizationStatus(test));
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2284.js b/deps/v8/test/mjsunit/regress/regress-2284.js
new file mode 100644
index 0000000000..561401998a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2284.js
@@ -0,0 +1,32 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+assertThrows("%foobar();", TypeError);
+assertThrows("%constructor();", TypeError);
+assertThrows("%constructor(23);", TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-2285.js b/deps/v8/test/mjsunit/regress/regress-2285.js
new file mode 100644
index 0000000000..efda4cde32
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2285.js
@@ -0,0 +1,32 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+assertThrows(function() { %_CallFunction(null, 0, ""); });
+assertThrows(function() { %_CallFunction(null, 0, 1); });
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2289.js b/deps/v8/test/mjsunit/regress/regress-2289.js
new file mode 100644
index 0000000000..e89ec6e143
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2289.js
@@ -0,0 +1,34 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var foo = "a";
+for (var i = 0; i < 12; i++) foo += foo;
+foo = foo + 'b' + foo;
+
+foo.replace(/b/, "a");
+
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2294.js b/deps/v8/test/mjsunit/regress/regress-2294.js
new file mode 100644
index 0000000000..43ba10df03
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2294.js
@@ -0,0 +1,70 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var clampedArray = new Uint8ClampedArray(10);
+
+function test() {
+ clampedArray[0] = 0.499;
+ assertEquals(0, clampedArray[0]);
+ clampedArray[0] = 0.5;
+ assertEquals(0, clampedArray[0]);
+ clampedArray[0] = 0.501;
+ assertEquals(1, clampedArray[0]);
+ clampedArray[0] = 1.499;
+ assertEquals(1, clampedArray[0]);
+ clampedArray[0] = 1.5;
+ assertEquals(2, clampedArray[0]);
+ clampedArray[0] = 1.501;
+ assertEquals(2, clampedArray[0]);
+ clampedArray[0] = 2.5;
+ assertEquals(2, clampedArray[0]);
+ clampedArray[0] = 3.5;
+ assertEquals(4, clampedArray[0]);
+ clampedArray[0] = 252.5;
+ assertEquals(252, clampedArray[0]);
+ clampedArray[0] = 253.5;
+ assertEquals(254, clampedArray[0]);
+ clampedArray[0] = 254.5;
+ assertEquals(254, clampedArray[0]);
+ clampedArray[0] = 256.5;
+ assertEquals(255, clampedArray[0]);
+ clampedArray[0] = -0.5;
+ assertEquals(0, clampedArray[0]);
+ clampedArray[0] = -1.5;
+ assertEquals(0, clampedArray[0]);
+ clampedArray[0] = 1000000000000;
+ assertEquals(255, clampedArray[0]);
+ clampedArray[0] = -1000000000000;
+ assertEquals(0, clampedArray[0]);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-2296.js b/deps/v8/test/mjsunit/regress/regress-2296.js
new file mode 100644
index 0000000000..c00f14f172
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2296.js
@@ -0,0 +1,40 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+function listener(event, exec_state, event_data, data) {
+ event_data.script().setSource(1);
+};
+
+Debug.setListener(listener);
+
+eval('0');
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-125148.js b/deps/v8/test/mjsunit/regress/regress-crbug-125148.js
index 025f9a5a4b..0f7bcd8cab 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-125148.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-125148.js
@@ -27,26 +27,64 @@
// Flags: --allow-natives-syntax
-var A = {
- foo: function() { assertUnreachable(); }
+function ToDictionaryMode(x) {
+ %OptimizeObjectForAddingMultipleProperties(x, 100);
}
-var B = {
- b: 2,
- foo: function() { return 1; }
-}
-B.__proto__ = A;
+var A, B, C;
-var C = {};
-C.__proto__ = B;
+// The initial bug report was about calling a know function...
+A = {};
+Object.defineProperty(A, "foo", { value: function() { assertUnreachable(); }});
-function bar(x) {
- return x.foo();
-}
+B = Object.create(A);
+Object.defineProperty(B, "foo", { value: function() { return 111; }});
-for (var i = 0; i < 3; i++) {
- assertEquals(1, bar(C));
-}
-%OptimizeObjectForAddingMultipleProperties(B, 100); // Force dictionary mode.
+C = Object.create(B);
+
+function bar(x) { return x.foo(); }
+
+assertEquals(111, bar(C));
+assertEquals(111, bar(C));
+ToDictionaryMode(B);
%OptimizeFunctionOnNextCall(bar);
-assertEquals(1, bar(C));
+assertEquals(111, bar(C));
+
+// Although this was not in the initial bug report: The same for getters...
+A = {};
+Object.defineProperty(A, "baz", { get: function() { assertUnreachable(); }});
+
+B = Object.create(A);
+Object.defineProperty(B, "baz", { get: function() { return 111; }});
+
+C = Object.create(B);
+
+function boo(x) { return x.baz; }
+
+assertEquals(111, boo(C));
+assertEquals(111, boo(C));
+ToDictionaryMode(B);
+%OptimizeFunctionOnNextCall(boo);
+assertEquals(111, boo(C));
+
+// And once more for setters...
+A = {};
+Object.defineProperty(A, "huh", { set: function(x) { assertUnreachable(); }});
+
+B = Object.create(A);
+var setterValue;
+Object.defineProperty(B, "huh", { set: function(x) { setterValue = x; }});
+
+C = Object.create(B);
+
+function fuu(x) {
+ setterValue = 222;
+ x.huh = 111;
+ return setterValue;
+}
+
+assertEquals(111, fuu(C));
+assertEquals(111, fuu(C));
+ToDictionaryMode(B);
+%OptimizeFunctionOnNextCall(fuu);
+assertEquals(111, fuu(C));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-134609.js b/deps/v8/test/mjsunit/regress/regress-crbug-134609.js
new file mode 100644
index 0000000000..da7d85dcb6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-134609.js
@@ -0,0 +1,59 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --inline-accessors
+
+var forceDeopt = {x:0};
+
+var objectWithGetterProperty = (function (value) {
+ var obj = {};
+ Object.defineProperty(obj, "getterProperty", {
+ get: function foo() {
+ forceDeopt.x;
+ return value;
+ },
+ });
+ return obj;
+})("bad");
+
+function test() {
+ var iAmContextAllocated = "good";
+ objectWithGetterProperty.getterProperty;
+ return iAmContextAllocated;
+
+ // Make sure that the local variable is context allocated.
+ function unused() { iAmContextAllocated; }
+}
+
+assertEquals("good", test());
+assertEquals("good", test());
+%OptimizeFunctionOnNextCall(test);
+assertEquals("good", test());
+
+// At this point, foo should have been inlined into test. Let's deopt...
+delete forceDeopt.x;
+assertEquals("good", test());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-135008.js b/deps/v8/test/mjsunit/regress/regress-crbug-135008.js
new file mode 100644
index 0000000000..2be396e805
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-135008.js
@@ -0,0 +1,45 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Filler long enough to trigger lazy parsing.
+var filler = "//" + new Array(1024).join('x');
+
+var scope = { x:23 };
+
+with(scope) {
+ eval(
+ "scope.f = (function outer() {" +
+ " function inner() {" +
+ " return x;" +
+ " }" +
+ " return inner;" +
+ "})();" +
+ filler
+ );
+};
+
+assertSame(23, scope.f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-135066.js b/deps/v8/test/mjsunit/regress/regress-crbug-135066.js
new file mode 100644
index 0000000000..1aeca8b1a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-135066.js
@@ -0,0 +1,53 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Filler long enough to trigger lazy parsing.
+var filler = "//" + new Array(1024).join('x');
+
+// Test strict eval in global context.
+eval(
+ "'use strict';" +
+ "var x = 23;" +
+ "var f = function bozo1() {" +
+ " return x;" +
+ "};" +
+ "assertSame(23, f());" +
+ filler
+);
+
+// Test default eval in strict context.
+(function() {
+ "use strict";
+ eval(
+ "var y = 42;" +
+ "var g = function bozo2() {" +
+ " return y;" +
+ "};" +
+ "assertSame(42, g());" +
+ filler
+ );
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-137689.js b/deps/v8/test/mjsunit/regress/regress-crbug-137689.js
new file mode 100644
index 0000000000..ef79d240f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-137689.js
@@ -0,0 +1,47 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function getter() { return 10; }
+function setter(v) { }
+function getter2() { return 20; }
+
+var o = {};
+var o2 = {};
+
+Object.defineProperty(o, "foo", { get: getter, configurable: true });
+Object.defineProperty(o2, "foo", { get: getter, configurable: true });
+assertTrue(%HaveSameMap(o, o2));
+
+Object.defineProperty(o, "bar", { get: getter2 });
+Object.defineProperty(o2, "bar", { get: getter2 });
+assertTrue(%HaveSameMap(o, o2));
+
+Object.defineProperty(o, "foo", { set: setter, configurable: true });
+Object.defineProperty(o2, "foo", { set: setter, configurable: true });
+assertTrue(%HaveSameMap(o, o2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-138887.js b/deps/v8/test/mjsunit/regress/regress-crbug-138887.js
new file mode 100644
index 0000000000..8d8e1694b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-138887.js
@@ -0,0 +1,48 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function worker1(ignored) {
+ return 100;
+}
+
+function factory(worker) {
+ return function(call_depth) {
+ if (call_depth == 0) return 10;
+ return 1 + worker(call_depth - 1);
+ }
+}
+
+var f1 = factory(worker1);
+var f2 = factory(f1);
+assertEquals(11, f2(1)); // Result: 1 + f1(0) == 1 + 10.
+assertEquals(11, f2(1));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(10, f1(0)); // Terminates immediately -> returns 10.
+%OptimizeFunctionOnNextCall(f2);
+assertEquals(102, f2(1000)); // 1 + f1(999) == 1 + 1 + worker1(998) == 102
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-140083.js b/deps/v8/test/mjsunit/regress/regress-crbug-140083.js
new file mode 100644
index 0000000000..e38192cd8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-140083.js
@@ -0,0 +1,44 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test that the absence of a setter in a compound/count operation works.
+
+Object.defineProperty(Object.prototype, "foo",
+ { get: function() { return 123; } });
+
+function bar(o) {
+ o.foo += 42;
+ o.foo++;
+}
+
+var baz = {};
+bar(baz);
+bar(baz);
+%OptimizeFunctionOnNextCall(bar)
+bar(baz);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-142087.js b/deps/v8/test/mjsunit/regress/regress-crbug-142087.js
new file mode 100644
index 0000000000..881ca60fba
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-142087.js
@@ -0,0 +1,38 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var string = "What are you looking for?";
+
+var expected_match = [""];
+for (var i = 0; i < string.length; i++) {
+ expected_match.push("");
+}
+
+string.replace(/(_)|(_|)/g, "");
+assertArrayEquals(expected_match, string.match(/(_)|(_|)/g, ""));
+
+'***************************************'.match(/((\\)|(\*)|(\$))/g, ".");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-142218.js b/deps/v8/test/mjsunit/regress/regress-crbug-142218.js
new file mode 100644
index 0000000000..373f83bca3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-142218.js
@@ -0,0 +1,44 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+length = 1 << 16;
+a = new Array(length);
+
+function insert_element(key) {
+ a[key] = 42;
+}
+
+insert_element(1);
+%OptimizeFunctionOnNextCall(insert_element);
+insert_element(new Object());
+count = 0;
+for (var i = 0; i < length; i++) {
+ if (a[i] != undefined) count++;
+}
+assertEquals(1, count);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-145961.js b/deps/v8/test/mjsunit/regress/regress-crbug-145961.js
new file mode 100644
index 0000000000..eb88945e0f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-145961.js
@@ -0,0 +1,39 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This test causes the operands to be passed in as Integer32 registers.
+// Flags: --allow-natives-syntax
+function test() {
+ var a = new Int32Array(2);
+ var x = a[0];
+ return Math.min(x, x);
+}
+
+assertEquals(0, test());
+assertEquals(0, test());
+%OptimizeFunctionOnNextCall(test);
+assertEquals(0, test());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-147475.js b/deps/v8/test/mjsunit/regress/regress-crbug-147475.js
new file mode 100644
index 0000000000..180744c730
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-147475.js
@@ -0,0 +1,48 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function worker1(ignored) {
+ return 100;
+}
+
+function factory(worker) {
+ return function(call_depth) {
+ if (call_depth == 0) return 10;
+ return 1 + worker(call_depth - 1);
+ }
+}
+
+var f1 = factory(worker1);
+var f2 = factory(f1);
+assertEquals(11, f2(1));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(10, f1(0));
+%OptimizeFunctionOnNextCall(f2);
+assertEquals(102, f2(2));
+assertEquals(102, f2(2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-148376.js b/deps/v8/test/mjsunit/regress/regress-crbug-148376.js
new file mode 100644
index 0000000000..55bb5f16fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-148376.js
@@ -0,0 +1,35 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function defineSetter(o) {
+ o.__defineSetter__('property', function() {});
+}
+
+defineSetter(Object.prototype);
+property = 0;
+defineSetter(this);
+var keys = Object.keys(this);
diff --git a/deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js b/deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js
index 1a608b14a3..4723ec1307 100644
--- a/deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js
+++ b/deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --hydrogen-filter=Debug.setBreakPoint --expose-debug-as debug
+// Flags: --allow-natives-syntax --hydrogen-filter=Debug.setBreakPoint
+// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/regress-load-elements.js b/deps/v8/test/mjsunit/regress/regress-load-elements.js
new file mode 100644
index 0000000000..68cdc8e8a1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-load-elements.js
@@ -0,0 +1,49 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function bad_func(o,a) {
+ for (var i = 0; i < 1; ++i) {
+ o.prop = 0;
+ var x = a[0];
+ }
+}
+
+o = new Object();
+a = {};
+a[0] = 1;
+bad_func(o, a);
+
+o = new Object();
+bad_func(o, a);
+
+// Optimize. Before the fix, the elements-load and subsequent fixed-array-length
+// were hoisted above the map check. This is invalid since not all types
+// necessarily have elements.
+%OptimizeFunctionOnNextCall(bad_func);
+bad_func(o, "");
diff --git a/deps/v8/test/mjsunit/str-to-num.js b/deps/v8/test/mjsunit/str-to-num.js
index bbfa7d33a0..cbec87fab9 100644
--- a/deps/v8/test/mjsunit/str-to-num.js
+++ b/deps/v8/test/mjsunit/str-to-num.js
@@ -147,7 +147,6 @@ assertEquals(15, toNumber("0Xf"));
assertEquals(15, toNumber("0XF"));
assertEquals(0, toNumber("0x000"));
-assertEquals(-Infinity, 1 / toNumber("-0x000"));
assertEquals(0, toNumber("0x000" + repeat('0', 1000)));
assertEquals(9, toNumber("0x009"));
assertEquals(10, toNumber("0x00a"));
@@ -157,7 +156,6 @@ assertEquals(15, toNumber("0x00F"));
assertEquals(15, toNumber("0x00F "));
assertEquals(Infinity, toNumber("0x" + repeat('0', 1000) + '1'
+ repeat('0', 1000)));
-assertEquals(-Infinity, toNumber("-0x1" + repeat('0', 1000)));
assertEquals(0x1000000 * 0x10000000, toNumber("0x10000000000000"));
assertEquals(0x1000000 * 0x10000000 + 1, toNumber("0x10000000000001"));
@@ -207,3 +205,10 @@ assertTrue(isNaN(toNumber("1" + repeat('0', 1000) + 'junk')), "1e1000 junk");
for (var i = 1; i < 12; i++) {
assertEquals(toNumber('1' + repeat('0', i)), Math.pow(10.0, i));
}
+
+assertTrue(isNaN(toNumber("+0x0")));
+assertTrue(isNaN(toNumber("+0xFF")));
+assertTrue(isNaN(toNumber("+0x012")));
+assertTrue(isNaN(toNumber("-0x0")));
+assertTrue(isNaN(toNumber("-0xFF")));
+assertTrue(isNaN(toNumber("-0x012"))); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/string-charcodeat.js b/deps/v8/test/mjsunit/string-charcodeat.js
index 8be6a092e6..72dc8190af 100644
--- a/deps/v8/test/mjsunit/string-charcodeat.js
+++ b/deps/v8/test/mjsunit/string-charcodeat.js
@@ -231,3 +231,6 @@ for (var i = 0; i < 5; i++) {
}
%OptimizeFunctionOnNextCall(directlyOnPrototype);
directlyOnPrototype();
+
+assertTrue(isNaN(%_StringCharCodeAt("ABC", -1)));
+assertTrue(isNaN(%_StringCharCodeAt("ABC", 4)));
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 87ed4fae8f..5a2f31481e 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -56,9 +56,9 @@ class MjsunitTestCase(test.TestCase):
def GetVmCommand(self, source):
result = self.config.context.GetVmCommand(self, self.mode)
- flags_match = FLAGS_PATTERN.search(source)
- if flags_match:
- result += flags_match.group(1).strip().split()
+ flags_match = re.findall(FLAGS_PATTERN, source);
+ for match in flags_match:
+ result += match.strip().split()
return result
def GetVmArguments(self, source):
diff --git a/deps/v8/test/mjsunit/typed-array-slice.js b/deps/v8/test/mjsunit/typed-array-slice.js
new file mode 100644
index 0000000000..c6e7e9415a
--- /dev/null
+++ b/deps/v8/test/mjsunit/typed-array-slice.js
@@ -0,0 +1,61 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// This is a regression test for overlapping key and value registers.
+
+var types = [Array, Int8Array, Uint8Array, Int16Array, Uint16Array,
+ Int32Array, Uint32Array, Uint8ClampedArray, Float32Array,
+ Float64Array];
+
+var results1 = [-2, -2, 254, -2, 65534, -2, 4294967294, 0, -2, -2];
+var results2 = [undefined, -1, 255, -1, 65535, -1, 4294967295, 0, -1, -1];
+var results3 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+var results4 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
+
+const kElementCount = 40;
+
+function do_slice(a) {
+ return Array.prototype.slice.call(a, 4, 8);
+}
+
+for (var t = 0; t < types.length; t++) {
+ var type = types[t];
+ var a = new type(kElementCount);
+ for (var i = 0; i < kElementCount; ++i ) {
+ a[i] = i-6;
+ }
+ delete a[5];
+ var sliced = do_slice(a);
+
+ %ClearFunctionTypeFeedback(do_slice);
+ assertEquals(results1[t], sliced[0]);
+ assertEquals(results2[t], sliced[1]);
+ assertEquals(results3[t], sliced[2]);
+ assertEquals(results4[t], sliced[3]);
+}
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index ac039930c3..5d061ae8c4 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -28,6 +28,8 @@
// Test dictionary -> double elements -> dictionary elements round trip
// Flags: --allow-natives-syntax --unbox-double-arrays --expose-gc
+// Flags: --noparallel-recompilation
+
var large_array_size = 100000;
var approx_dict_to_elements_threshold = 70000;
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 87d7bd2908..4f2fbdea5a 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -126,13 +126,13 @@ ecma/Date/15.9.2.2-5: PASS || FAIL
ecma/Date/15.9.2.2-6: PASS || FAIL
# 1026139: These date tests fail on arm and mips
-ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mips)
-ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mips)
-ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mips)
+ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
+ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
+ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
-ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mips)
-ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mips)
+ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mipsel)
+ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mipsel)
# Flaky test that fails due to what appears to be a bug in the test.
# Occurs depending on current time
@@ -368,6 +368,10 @@ ecma/GlobalObject/15.1.2.6: FAIL_OK
ecma/GlobalObject/15.1.2.7: FAIL_OK
+# Leading zero no longer signal octal numbers (ECMA-262 Annex E 15.1.2.2).
+ecma/GlobalObject/15.1.2.2-2: FAIL_OK
+
+
# Tests that rely on specific details of function decompilation or
# print strings for errors. Non-ECMA behavior.
js1_2/function/tostring-2: FAIL_OK
@@ -603,6 +607,10 @@ ecma_2/RegExp/function-001: FAIL_OK
ecma_2/RegExp/properties-001: FAIL_OK
+# Negative hexadecimal literals are parsed as NaN. This test is outdated.
+ecma/TypeConversion/9.3.1-3: FAIL_OK
+
+
##################### FAILING TESTS #####################
# This section is for tests that fail in V8 and pass in JSC.
@@ -633,7 +641,7 @@ js1_5/Expressions/regress-394673: FAIL
# Bug 762: http://code.google.com/p/v8/issues/detail?id=762
# We do not correctly handle assignments within "with"
-/ecma_3/Statements/12.10-01: FAIL
+ecma_3/Statements/12.10-01: FAIL
# We do not throw an exception when a const is redeclared.
# (We only fail section 1 of the test.)
@@ -818,12 +826,6 @@ js1_5/decompilation/regress-383721: PASS || FAIL
js1_5/decompilation/regress-406555: PASS || FAIL
js1_5/decompilation/regress-460870: PASS || FAIL
-# These tests take an unreasonable amount of time so we skip them
-# in fast mode.
-
-js1_5/Regress/regress-312588: TIMEOUT || SKIP if $FAST == yes
-js1_5/Regress/regress-271716-n: PASS || SKIP if $FAST == yes
-
[ $arch == arm ]
@@ -848,40 +850,7 @@ js1_5/Regress/regress-451322: SKIP
js1_5/GC/regress-203278-2: PASS || TIMEOUT
-[ $fast == yes && $arch == arm ]
-
-# In fast mode on arm we try to skip all tests that would time out,
-# since running the tests takes so long in the first place.
-
-js1_5/Regress/regress-280769-2: SKIP
-js1_5/Regress/regress-280769-3: SKIP
-js1_5/Regress/regress-244470: SKIP
-js1_5/Regress/regress-203278-1: SKIP
-js1_5/Regress/regress-290575: SKIP
-js1_5/Regress/regress-159334: SKIP
-js1_5/Regress/regress-321971: SKIP
-js1_5/Regress/regress-347306-01: SKIP
-js1_5/Regress/regress-280769-1: SKIP
-js1_5/Regress/regress-280769-5: SKIP
-js1_5/GC/regress-306788: SKIP
-js1_5/GC/regress-278725: SKIP
-js1_5/GC/regress-203278-3: SKIP
-js1_5/GC/regress-311497: SKIP
-js1_5/Array/regress-99120-02: SKIP
-ecma/Date/15.9.5.22-1: SKIP
-ecma/Date/15.9.5.20: SKIP
-ecma/Date/15.9.5.12-2: SKIP
-ecma/Date/15.9.5.8: SKIP
-ecma/Date/15.9.5.9: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Expressions/11.7.2: SKIP
-ecma/Expressions/11.10-2: SKIP
-ecma/Expressions/11.7.3: SKIP
-ecma/Expressions/11.10-3: SKIP
-ecma/Expressions/11.7.1: SKIP
-ecma_3/RegExp/regress-209067: SKIP
-
-[ $arch == mips ]
+[ $arch == mipsel ]
# Times out and print so much output that we need to skip it to not
# hang the builder.
@@ -902,37 +871,3 @@ js1_5/Regress/regress-451322: SKIP
# BUG(1040): Allow this test to timeout.
js1_5/GC/regress-203278-2: PASS || TIMEOUT
-
-
-[ $fast == yes && $arch == mips ]
-
-# In fast mode on mips we try to skip all tests that would time out,
-# since running the tests takes so long in the first place.
-
-js1_5/Regress/regress-280769-2: SKIP
-js1_5/Regress/regress-280769-3: SKIP
-js1_5/Regress/regress-244470: SKIP
-js1_5/Regress/regress-203278-1: SKIP
-js1_5/Regress/regress-290575: SKIP
-js1_5/Regress/regress-159334: SKIP
-js1_5/Regress/regress-321971: SKIP
-js1_5/Regress/regress-347306-01: SKIP
-js1_5/Regress/regress-280769-1: SKIP
-js1_5/Regress/regress-280769-5: SKIP
-js1_5/GC/regress-306788: SKIP
-js1_5/GC/regress-278725: SKIP
-js1_5/GC/regress-203278-3: SKIP
-js1_5/GC/regress-311497: SKIP
-js1_5/Array/regress-99120-02: SKIP
-ecma/Date/15.9.5.22-1: SKIP
-ecma/Date/15.9.5.20: SKIP
-ecma/Date/15.9.5.12-2: SKIP
-ecma/Date/15.9.5.8: SKIP
-ecma/Date/15.9.5.9: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Expressions/11.7.2: SKIP
-ecma/Expressions/11.10-2: SKIP
-ecma/Expressions/11.7.3: SKIP
-ecma/Expressions/11.10-3: SKIP
-ecma/Expressions/11.7.1: SKIP
-ecma_3/RegExp/regress-209067: SKIP
diff --git a/deps/v8/test/preparser/strict-identifiers.pyt b/deps/v8/test/preparser/strict-identifiers.pyt
index aa3d5210d8..f979088689 100644
--- a/deps/v8/test/preparser/strict-identifiers.pyt
+++ b/deps/v8/test/preparser/strict-identifiers.pyt
@@ -285,4 +285,4 @@ for reserved_word in reserved_words + strict_reserved_words:
# Future reserved words in strict mode behave like normal identifiers
# in a non strict context.
for reserved_word in strict_reserved_words:
- non_strict_use({"id": id}, None)
+ non_strict_use({"id": reserved_word}, None)
diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status
index 52d126e65b..67d1c75fe1 100644
--- a/deps/v8/test/sputnik/sputnik.status
+++ b/deps/v8/test/sputnik/sputnik.status
@@ -216,7 +216,7 @@ S15.1.3.4_A2.3_T1: SKIP
S15.1.3.1_A2.5_T1: SKIP
S15.1.3.2_A2.5_T1: SKIP
-[ $arch == mips ]
+[ $arch == mipsel ]
# BUG(3251225): Tests that timeout with --nocrankshaft.
S15.1.3.1_A2.5_T1: SKIP
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 567a78ec84..06b43c717e 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -41,10 +41,6 @@ S15.12.2_A1: FAIL
##################### DELIBERATE INCOMPATIBILITIES #####################
-# We deliberately treat arguments to parseInt() with a leading zero as
-# octal numbers in order to not break the web.
-S15.1.2.2_A5.1_T1: FAIL_OK
-
# This tests precision of Math.tan and Math.sin. The implementation for those
# trigonometric functions are platform/compiler dependent. Furthermore, the
# expectation values by far deviates from the actual result given by an
@@ -75,33 +71,19 @@ S15.9.3.1_A5_T6: PASS || FAIL_OK
############################ SKIPPED TESTS #############################
# These tests take a looong time to run in debug mode.
-S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
+S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
-[ $arch == arm ]
+[ $arch == arm || $arch == mipsel ]
-# BUG(3251225): Tests that timeout with --nocrankshaft.
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-S15.1.3.1_A2.4_T1: SKIP
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.4_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-S15.1.3.3_A2.3_T1: SKIP
-S15.1.3.4_A2.3_T1: SKIP
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-
-[ $arch == mips ]
+# TODO(mstarzinger): Causes stack overflow on simulators due to eager
+# compilation of parenthesized function literals. Needs investigation.
+S13.2.1_A1_T1: SKIP
# BUG(3251225): Tests that timeout with --nocrankshaft.
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
S15.1.3.1_A2.4_T1: SKIP
S15.1.3.1_A2.5_T1: SKIP
S15.1.3.2_A2.4_T1: SKIP
S15.1.3.2_A2.5_T1: SKIP
S15.1.3.3_A2.3_T1: SKIP
S15.1.3.4_A2.3_T1: SKIP
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
diff --git a/deps/v8/tools/android-build.sh b/deps/v8/tools/android-build.sh
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/tools/android-build.sh
diff --git a/deps/v8/tools/android-ll-prof.sh b/deps/v8/tools/android-ll-prof.sh
new file mode 100644
index 0000000000..78790ecb62
--- /dev/null
+++ b/deps/v8/tools/android-ll-prof.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Runs d8 with the given arguments on the device under 'perf' and
+# processes the profiler trace and v8 logs using ll_prof.py.
+#
+# Usage:
+# > ./tools/android-ll-prof.sh (debug|release) "args to d8" "args to ll_prof.py"
+#
+# The script creates deploy directory deploy/data/local/tmp/v8, copies there
+# the d8 binary either from out/android_arm.release or out/android_arm.debug,
+# and then sync the deploy directory with /data/local/tmp/v8 on the device.
+# You can put JS files in the deploy directory before running the script.
+# Note: $ANDROID_NDK_ROOT must be set.
+
+MODE=$1
+RUN_ARGS=$2
+LL_PROF_ARGS=$3
+
+BASE=`cd $(dirname "$0")/..; pwd`
+DEPLOY="$BASE/deploy"
+
+set +e
+mkdir -p "$DEPLOY/data/local/tmp/v8"
+
+cp "$BASE/out/android_arm.$MODE/d8" "$DEPLOY/data/local/tmp/v8/d8"
+
+adb -p "$DEPLOY" sync data
+
+adb shell "cd /data/local/tmp/v8;\
+ perf record -R -e cycles -c 10000 -f -i \
+ ./d8 --ll_prof --gc-fake-mmap=/data/local/tmp/__v8_gc__ $RUN_ARGS"
+
+adb pull /data/local/tmp/v8/v8.log .
+adb pull /data/local/tmp/v8/v8.log.ll .
+adb pull /data/perf.data .
+
+ARCH=arm-linux-androideabi-4.4.3
+TOOLCHAIN="${ANDROID_NDK_ROOT}/toolchains/$ARCH/prebuilt/linux-x86/bin"
+
+$BASE/tools/ll_prof.py --host-root="$BASE/deploy" \
+ --gc-fake-mmap=/data/local/tmp/__v8_gc__ \
+ --objdump="$TOOLCHAIN/arm-linux-androideabi-objdump" \
+ $LL_PROF_ARGS
diff --git a/deps/v8/tools/android-run.py b/deps/v8/tools/android-run.py
new file mode 100644
index 0000000000..1693c5b064
--- /dev/null
+++ b/deps/v8/tools/android-run.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script executes the passed command line on Android device
+# using 'adb shell' command. Unfortunately, 'adb shell' always
+# returns exit code 0, ignoring the exit code of executed command.
+# Since we need to return non-zero exit code if the command failed,
+# we augment the passed command line with exit code checking statement
+# and output special error string in case of non-zero exit code.
+# Then we parse the output of 'adb shell' and look for that error string.
+
+import os
+from os.path import join, dirname, abspath
+import subprocess
+import sys
+import tempfile
+
+def Check(output, errors):
+ failed = any([s.startswith('/system/bin/sh:') or s.startswith('ANDROID')
+ for s in output.split('\n')])
+ return 1 if failed else 0
+
+def Execute(cmdline):
+ (fd_out, outname) = tempfile.mkstemp()
+ (fd_err, errname) = tempfile.mkstemp()
+ process = subprocess.Popen(
+ args=cmdline,
+ shell=True,
+ stdout=fd_out,
+ stderr=fd_err,
+ )
+ exit_code = process.wait()
+ os.close(fd_out)
+ os.close(fd_err)
+ output = file(outname).read()
+ errors = file(errname).read()
+ os.unlink(outname)
+ os.unlink(errname)
+ sys.stdout.write(output)
+ sys.stderr.write(errors)
+ return exit_code or Check(output, errors)
+
+def Escape(arg):
+ def ShouldEscape():
+ for x in arg:
+ if not x.isalnum() and x != '-' and x != '_':
+ return True
+ return False
+
+ return arg if not ShouldEscape() else '"%s"' % (arg.replace('"', '\\"'))
+
+def WriteToTemporaryFile(data):
+ (fd, fname) = tempfile.mkstemp()
+ os.close(fd)
+ tmp_file = open(fname, "w")
+ tmp_file.write(data)
+ tmp_file.close()
+ return fname
+
+def Main():
+ if (len(sys.argv) == 1):
+ print("Usage: %s <command-to-run-on-device>" % sys.argv[0])
+ return 1
+ workspace = abspath(join(dirname(sys.argv[0]), '..'))
+ android_workspace = os.getenv("ANDROID_V8", "/data/local/v8")
+ args = [Escape(arg) for arg in sys.argv[1:]]
+ script = (" ".join(args) + "\n"
+ "case $? in\n"
+ " 0) ;;\n"
+ " *) echo \"ANDROID: Error returned by test\";;\n"
+ "esac\n")
+ script = script.replace(workspace, android_workspace)
+ script_file = WriteToTemporaryFile(script)
+ android_script_file = android_workspace + "/" + script_file
+ command = ("adb push '%s' %s;" % (script_file, android_script_file) +
+ "adb shell 'sh %s';" % android_script_file +
+ "adb shell 'rm %s'" % android_script_file)
+ error_code = Execute(command)
+ os.unlink(script_file)
+ return error_code
+
+if __name__ == '__main__':
+ sys.exit(Main())
diff --git a/deps/v8/tools/android-sync.sh b/deps/v8/tools/android-sync.sh
new file mode 100644
index 0000000000..5d4ef2effd
--- /dev/null
+++ b/deps/v8/tools/android-sync.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script pushes android binaries and test data to the device.
+# The first argument can be either "android.release" or "android.debug".
+# The second argument is a relative path to the output directory with binaries.
+# The third argument is the absolute path to the V8 directory on the host.
+# The fourth argument is the absolute path to the V8 directory on the device.
+
+if [ ${#@} -lt 4 ] ; then
+ echo "$0: Error: need 4 arguments"
+ exit 1
+fi
+
+ARCH_MODE=$1
+OUTDIR=$2
+HOST_V8=$3
+ANDROID_V8=$4
+
+function LINUX_MD5 {
+ local HASH=$(md5sum $1)
+ echo ${HASH%% *}
+}
+
+function DARWIN_MD5 {
+ local HASH=$(md5 $1)
+ echo ${HASH} | cut -f2 -d "=" | cut -f2 -d " "
+}
+
+host_os=$(uname -s)
+case "${host_os}" in
+ "Linux")
+ MD5=LINUX_MD5
+ ;;
+ "Darwin")
+ MD5=DARWIN_MD5
+ ;;
+ *)
+ echo "$0: Host platform ${host_os} is not supported" >& 2
+ exit 1
+esac
+
+function sync_file {
+ local FILE=$1
+ local ANDROID_HASH=$(adb shell "md5 \"$ANDROID_V8/$FILE\"")
+ local HOST_HASH=$($MD5 "$HOST_V8/$FILE")
+ if [ "${ANDROID_HASH%% *}" != "${HOST_HASH}" ]; then
+ adb push "$HOST_V8/$FILE" "$ANDROID_V8/$FILE" &> /dev/null
+ fi
+ echo -n "."
+}
+
+function sync_dir {
+ local DIR=$1
+ echo -n "sync to $ANDROID_V8/$DIR"
+ for FILE in $(find "$HOST_V8/$DIR" -not -path "*.svn*" -type f); do
+ local RELATIVE_FILE=${FILE:${#HOST_V8}}
+ sync_file "$RELATIVE_FILE"
+ done
+ echo ""
+}
+
+echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
+sync_file "$OUTDIR/$ARCH_MODE/cctest"
+sync_file "$OUTDIR/$ARCH_MODE/d8"
+sync_file "$OUTDIR/$ARCH_MODE/preparser"
+echo ""
+echo -n "sync to $ANDROID_V8/tools"
+sync_file tools/consarray.js
+sync_file tools/codemap.js
+sync_file tools/csvparser.js
+sync_file tools/profile.js
+sync_file tools/splaytree.js
+sync_file tools/profile_view.js
+sync_file tools/logreader.js
+sync_file tools/tickprocessor.js
+echo ""
+sync_dir test/message
+sync_dir test/mjsunit
+sync_dir test/preparser
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index a9f0cb9ddc..5d9a053afd 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1051,12 +1051,30 @@ class ConsString(String):
class Oddball(HeapObject):
+ # Should match declarations in objects.h
+ KINDS = [
+ "False",
+ "True",
+ "TheHole",
+ "Null",
+ "ArgumentMarker",
+ "Undefined",
+ "Other"
+ ]
+
def ToStringOffset(self):
return self.heap.PointerSize()
+ def ToNumberOffset(self):
+ return self.ToStringOffset() + self.heap.PointerSize()
+
+ def KindOffset(self):
+ return self.ToNumberOffset() + self.heap.PointerSize()
+
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.to_string = self.ObjectField(self.ToStringOffset())
+ self.kind = self.SmiField(self.KindOffset())
def Print(self, p):
p.Print(str(self))
@@ -1065,7 +1083,10 @@ class Oddball(HeapObject):
if self.to_string:
return "Oddball(%08x, <%s>)" % (self.address, self.to_string.GetChars())
else:
- return "Oddball(%08x, kind=%s)" % (self.address, "???")
+ kind = "???"
+ if 0 <= self.kind < len(Oddball.KINDS):
+ kind = Oddball.KINDS[self.kind]
+ return "Oddball(%08x, kind=%s)" % (self.address, kind)
class FixedArray(HeapObject):
@@ -1086,7 +1107,13 @@ class FixedArray(HeapObject):
base_offset = self.ElementsOffset()
for i in xrange(self.length):
offset = base_offset + 4 * i
- p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
+ try:
+ p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
+ except TypeError:
+ p.Dedent()
+ p.Print("...")
+ p.Print("}")
+ return
p.Dedent()
p.Print("}")
@@ -1394,7 +1421,7 @@ class InspectionPadawan(object):
if known_map:
return known_map
found_obj = self.heap.FindObject(tagged_address)
- if found_obj: return found_ob
+ if found_obj: return found_obj
address = tagged_address - 1
if self.reader.IsValidAddress(address):
map_tagged_address = self.reader.ReadUIntPtr(address)
@@ -1451,6 +1478,24 @@ class InspectionShell(cmd.Cmd):
self.padawan = InspectionPadawan(reader, heap)
self.prompt = "(grok) "
+ def do_da(self, address):
+ """
+ Print ASCII string starting at specified address.
+ """
+ address = int(address, 16)
+ string = ""
+ while self.reader.IsValidAddress(address):
+ code = self.reader.ReadU8(address)
+ if code < 128:
+ string += chr(code)
+ else:
+ break
+ address += 1
+ if string == "":
+ print "Not an ASCII string at %s" % self.reader.FormatIntPtr(address)
+ else:
+ print "%s\n" % string
+
def do_dd(self, address):
"""
Interpret memory at the given address (if available) as a sequence
@@ -1510,23 +1555,23 @@ class InspectionShell(cmd.Cmd):
"""
self.padawan.PrintKnowledge()
- def do_km(self, address):
+ def do_kd(self, address):
"""
Teach V8 heap layout information to the inspector. Set the first
- map-space page by passing any pointer into that page.
+ data-space page by passing any pointer into that page.
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
- self.padawan.known_first_map_page = page_address
+ self.padawan.known_first_data_page = page_address
- def do_kd(self, address):
+ def do_km(self, address):
"""
Teach V8 heap layout information to the inspector. Set the first
- data-space page by passing any pointer into that page.
+ map-space page by passing any pointer into that page.
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
- self.padawan.known_first_data_page = page_address
+ self.padawan.known_first_map_page = page_address
def do_kp(self, address):
"""
@@ -1537,6 +1582,17 @@ class InspectionShell(cmd.Cmd):
page_address = address & ~self.heap.PageAlignmentMask()
self.padawan.known_first_pointer_page = page_address
+ def do_list(self, smth):
+ """
+ List all available memory regions.
+ """
+ def print_region(reader, start, size, location):
+ print " %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
+ reader.FormatIntPtr(start + size),
+ size)
+ print "Available memory regions:"
+ self.reader.ForEachMemoryRegion(print_region)
+
def do_s(self, word):
"""
Search for a given word in available memory regions. The given word
@@ -1560,23 +1616,25 @@ class InspectionShell(cmd.Cmd):
"""
raise NotImplementedError
- def do_list(self, smth):
+ def do_u(self, args):
"""
- List all available memory regions.
+ u 0x<address> 0x<size>
+ Unassemble memory in the region [address, address + size)
"""
- def print_region(reader, start, size, location):
- print " %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
- reader.FormatIntPtr(start + size),
- size)
- print "Available memory regions:"
- self.reader.ForEachMemoryRegion(print_region)
-
+ args = args.split(' ')
+ start = int(args[0], 16)
+ size = int(args[1], 16)
+ lines = self.reader.GetDisasmLines(start, size)
+ for line in lines:
+ print FormatDisasmLine(start, self.heap, line)
+ print
EIP_PROXIMITY = 64
CONTEXT_FOR_ARCH = {
MD_CPU_ARCHITECTURE_AMD64:
- ['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp', 'rsp', 'rip'],
+ ['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp', 'rsp', 'rip',
+ 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15'],
MD_CPU_ARCHITECTURE_X86:
['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
}
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index f742cc6f3c..b646567b57 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -234,10 +234,10 @@
'../../src/ast.h',
'../../src/atomicops.h',
'../../src/atomicops_internals_x86_gcc.cc',
- '../../src/bignum.cc',
- '../../src/bignum.h',
'../../src/bignum-dtoa.cc',
'../../src/bignum-dtoa.h',
+ '../../src/bignum.cc',
+ '../../src/bignum.h',
'../../src/bootstrapper.cc',
'../../src/bootstrapper.h',
'../../src/builtins.cc',
@@ -268,21 +268,21 @@
'../../src/conversions.h',
'../../src/counters.cc',
'../../src/counters.h',
- '../../src/cpu.h',
'../../src/cpu-profiler-inl.h',
'../../src/cpu-profiler.cc',
'../../src/cpu-profiler.h',
+ '../../src/cpu.h',
'../../src/data-flow.cc',
'../../src/data-flow.h',
'../../src/date.cc',
'../../src/date.h',
+ '../../src/dateparser-inl.h',
'../../src/dateparser.cc',
'../../src/dateparser.h',
- '../../src/dateparser-inl.h',
- '../../src/debug.cc',
- '../../src/debug.h',
'../../src/debug-agent.cc',
'../../src/debug-agent.h',
+ '../../src/debug.cc',
+ '../../src/debug.h',
'../../src/deoptimizer.cc',
'../../src/deoptimizer.h',
'../../src/disasm.h',
@@ -293,19 +293,25 @@
'../../src/double.h',
'../../src/dtoa.cc',
'../../src/dtoa.h',
- '../../src/elements.cc',
- '../../src/elements.h',
'../../src/elements-kind.cc',
'../../src/elements-kind.h',
+ '../../src/elements.cc',
+ '../../src/elements.h',
'../../src/execution.cc',
'../../src/execution.h',
+ '../../src/extensions/externalize-string-extension.cc',
+ '../../src/extensions/externalize-string-extension.h',
+ '../../src/extensions/gc-extension.cc',
+ '../../src/extensions/gc-extension.h',
+ '../../src/extensions/statistics-extension.cc',
+ '../../src/extensions/statistics-extension.h',
'../../src/factory.cc',
'../../src/factory.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
- '../../src/flag-definitions.h',
'../../src/fixed-dtoa.cc',
'../../src/fixed-dtoa.h',
+ '../../src/flag-definitions.h',
'../../src/flags.cc',
'../../src/flags.h',
'../../src/frames-inl.h',
@@ -323,14 +329,14 @@
'../../src/handles.h',
'../../src/hashmap.h',
'../../src/heap-inl.h',
- '../../src/heap.cc',
- '../../src/heap.h',
'../../src/heap-profiler.cc',
'../../src/heap-profiler.h',
- '../../src/hydrogen.cc',
- '../../src/hydrogen.h',
+ '../../src/heap.cc',
+ '../../src/heap.h',
'../../src/hydrogen-instructions.cc',
'../../src/hydrogen-instructions.h',
+ '../../src/hydrogen.cc',
+ '../../src/hydrogen.h',
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
@@ -342,19 +348,19 @@
'../../src/interface.h',
'../../src/interpreter-irregexp.cc',
'../../src/interpreter-irregexp.h',
+ '../../src/isolate.cc',
+ '../../src/isolate.h',
'../../src/json-parser.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
- '../../src/isolate.cc',
- '../../src/isolate.h',
'../../src/lazy-instance.h',
'../../src/list-inl.h',
'../../src/list.h',
- '../../src/lithium.cc',
- '../../src/lithium.h',
+ '../../src/lithium-allocator-inl.h',
'../../src/lithium-allocator.cc',
'../../src/lithium-allocator.h',
- '../../src/lithium-allocator-inl.h',
+ '../../src/lithium.cc',
+ '../../src/lithium.h',
'../../src/liveedit.cc',
'../../src/liveedit.h',
'../../src/liveobjectlist-inl.h',
@@ -372,14 +378,16 @@
'../../src/messages.h',
'../../src/natives.h',
'../../src/objects-debug.cc',
- '../../src/objects-printer.cc',
'../../src/objects-inl.h',
+ '../../src/objects-printer.cc',
'../../src/objects-visiting.cc',
'../../src/objects-visiting.h',
'../../src/objects.cc',
'../../src/objects.h',
'../../src/once.cc',
'../../src/once.h',
+ '../../src/optimizing-compiler-thread.h',
+ '../../src/optimizing-compiler-thread.cc',
'../../src/parser.cc',
'../../src/parser.h',
'../../src/platform-posix.h',
@@ -394,12 +402,12 @@
'../../src/preparser.h',
'../../src/prettyprinter.cc',
'../../src/prettyprinter.h',
- '../../src/property.cc',
- '../../src/property.h',
- '../../src/property-details.h',
'../../src/profile-generator-inl.h',
'../../src/profile-generator.cc',
'../../src/profile-generator.h',
+ '../../src/property-details.h',
+ '../../src/property.cc',
+ '../../src/property.h',
'../../src/regexp-macro-assembler-irregexp-inl.h',
'../../src/regexp-macro-assembler-irregexp.cc',
'../../src/regexp-macro-assembler-irregexp.h',
@@ -411,16 +419,16 @@
'../../src/regexp-stack.h',
'../../src/rewriter.cc',
'../../src/rewriter.h',
- '../../src/runtime.cc',
- '../../src/runtime.h',
'../../src/runtime-profiler.cc',
'../../src/runtime-profiler.h',
+ '../../src/runtime.cc',
+ '../../src/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
- '../../src/scanner.cc',
- '../../src/scanner.h',
'../../src/scanner-character-streams.cc',
'../../src/scanner-character-streams.h',
+ '../../src/scanner.cc',
+ '../../src/scanner.h',
'../../src/scopeinfo.cc',
'../../src/scopeinfo.h',
'../../src/scopes.cc',
@@ -428,7 +436,7 @@
'../../src/serialize.cc',
'../../src/serialize.h',
'../../src/small-pointer-list.h',
- '../../src/smart-array-pointer.h',
+ '../../src/smart-pointers.h',
'../../src/snapshot-common.cc',
'../../src/snapshot.h',
'../../src/spaces-inl.h',
@@ -447,6 +455,9 @@
'../../src/stub-cache.h',
'../../src/token.cc',
'../../src/token.h',
+ '../../src/transitions-inl.h',
+ '../../src/transitions.cc',
+ '../../src/transitions.h',
'../../src/type-info.cc',
'../../src/type-info.h',
'../../src/unbound-queue-inl.h',
@@ -479,10 +490,6 @@
'../../src/zone-inl.h',
'../../src/zone.cc',
'../../src/zone.h',
- '../../src/extensions/externalize-string-extension.cc',
- '../../src/extensions/externalize-string-extension.h',
- '../../src/extensions/gc-extension.cc',
- '../../src/extensions/gc-extension.h',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -555,7 +562,7 @@
'../../src/ia32/stub-cache-ia32.cc',
],
}],
- ['v8_target_arch=="mips"', {
+ ['v8_target_arch=="mipsel"', {
'sources': [
'../../src/mips/assembler-mips.cc',
'../../src/mips/assembler-mips.h',
@@ -721,9 +728,6 @@
'../../src/win32-math.h',
],
'msvs_disabled_warnings': [4351, 4355, 4800],
- 'direct_dependent_settings': {
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- },
'link_settings': {
'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
},
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 7070ce6fcc..93f143f9a9 100755
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -12,21 +12,21 @@ done
tools_path=`cd $(dirname "$0");pwd`
if [ ! "$D8_PATH" ]; then
d8_public=`which d8`
- if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
+ if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
fi
-[ "$D8_PATH" ] || D8_PATH=$tools_path/..
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
d8_exec=$D8_PATH/d8
-if [ ! -x $d8_exec ]; then
+if [ ! -x "$d8_exec" ]; then
D8_PATH=`pwd`/out/native
d8_exec=$D8_PATH/d8
fi
-if [ ! -x $d8_exec ]; then
+if [ ! -x "$d8_exec" ]; then
d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
fi
-if [ ! -x $d8_exec ]; then
+if [ ! -x "$d8_exec" ]; then
echo "d8 shell not found in $D8_PATH"
echo "To build, execute 'make native' from the V8 directory"
exit 1
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index 51ba672aca..3afe179d2f 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -68,15 +68,9 @@ Examples:
"""
-# Must match kGcFakeMmap.
-V8_GC_FAKE_MMAP = "/tmp/__v8_gc__"
-
JS_ORIGIN = "js"
JS_SNAPSHOT_ORIGIN = "js-snapshot"
-OBJDUMP_BIN = disasm.OBJDUMP_BIN
-
-
class Code(object):
"""Code object."""
@@ -639,7 +633,7 @@ class TraceReader(object):
# Read null-terminated filename.
filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
offset + header.size]
- mmap_info.filename = filename[:filename.find(chr(0))]
+ mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
return mmap_info
def ReadSample(self, header, offset):
@@ -858,6 +852,15 @@ if __name__ == "__main__":
default=False,
action="store_true",
help="no auxiliary messages [default: %default]")
+ parser.add_option("--gc-fake-mmap",
+ default="/tmp/__v8_gc__",
+ help="gc fake mmap file [default: %default]")
+ parser.add_option("--objdump",
+ default="/usr/bin/objdump",
+ help="objdump tool to use [default: %default]")
+ parser.add_option("--host-root",
+ default="",
+ help="Path to the host root [default: %default]")
options, args = parser.parse_args()
if not options.quiet:
@@ -869,6 +872,14 @@ if __name__ == "__main__":
print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
+ V8_GC_FAKE_MMAP = options.gc_fake_mmap
+ HOST_ROOT = options.host_root
+ if os.path.exists(options.objdump):
+ disasm.OBJDUMP_BIN = options.objdump
+ OBJDUMP_BIN = options.objdump
+ else:
+ print "Cannot find %s, falling back to default objdump" % options.objdump
+
# Stats.
events = 0
ticks = 0
@@ -905,7 +916,7 @@ if __name__ == "__main__":
if header.type == PERF_RECORD_MMAP:
start = time.time()
mmap_info = trace_reader.ReadMmap(header, offset)
- if mmap_info.filename == V8_GC_FAKE_MMAP:
+ if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
log_reader.ReadUpToGC()
else:
library_repo.Load(mmap_info, code_map, options)
diff --git a/deps/v8/tools/push-to-trunk.sh b/deps/v8/tools/push-to-trunk.sh
index ff6dd1d776..a193d57384 100755
--- a/deps/v8/tools/push-to-trunk.sh
+++ b/deps/v8/tools/push-to-trunk.sh
@@ -304,11 +304,22 @@ fi
let CURRENT_STEP+=1
if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Commit to SVN."
- git svn dcommit | tee >(grep -E "^Committed r[0-9]+" \
- | sed -e 's/^Committed r\([0-9]\+\)/\1/' \
- > "$TRUNK_REVISION_FILE") \
+ git svn dcommit 2>&1 | tee >(grep -E "^Committed r[0-9]+" \
+ | sed -e 's/^Committed r\([0-9]\+\)/\1/' \
+ > "$TRUNK_REVISION_FILE") \
|| die "'git svn dcommit' failed."
TRUNK_REVISION=$(cat "$TRUNK_REVISION_FILE")
+ # Sometimes grepping for the revision fails. No idea why. If you figure
+ # out why it is flaky, please do fix it properly.
+ if [ -z "$TRUNK_REVISION" ] ; then
+ echo "Sorry, grepping for the SVN revision failed. Please look for it in \
+the last command's output above and provide it manually (just the number, \
+without the leading \"r\")."
+ while [ -z "$TRUNK_REVISION" ] ; do
+ echo -n "> "
+ read TRUNK_REVISION
+ done
+ fi
persist "TRUNK_REVISION"
rm -f "$TRUNK_REVISION_FILE"
fi
diff --git a/deps/v8/tools/run-valgrind.py b/deps/v8/tools/run-valgrind.py
index f8c23da6d1..49c1b70312 100755
--- a/deps/v8/tools/run-valgrind.py
+++ b/deps/v8/tools/run-valgrind.py
@@ -30,36 +30,48 @@
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
-import os
-import socket
import subprocess
import sys
import re
-VALGRIND = os.environ.get('VALGRIND', 'valgrind')
-
VALGRIND_ARGUMENTS = [
- VALGRIND,
- '--log-socket=127.0.0.1:15151',
- '--error-exitcode=247',
- '--leak-check=no',
- '--smc-check=all',
+ 'valgrind',
+ '--error-exitcode=1',
+ '--leak-check=full',
+ '--smc-check=all'
]
-server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-server.bind(('127.0.0.1', 15151))
-server.listen(1)
-
+# Compute the command line.
command = VALGRIND_ARGUMENTS + sys.argv[1:]
+
+# Run valgrind.
process = subprocess.Popen(command, stderr=subprocess.PIPE)
+code = process.wait();
+errors = process.stderr.readlines();
+
+# If valgrind produced an error, we report that to the user.
+if code != 0:
+ sys.stderr.writelines(errors)
+ sys.exit(code)
+
+# Look through the leak details and make sure that we don't
+# have any definitely, indirectly, and possibly lost bytes.
+LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
+LEAK_LINE_MATCHER = re.compile(LEAK_RE)
+LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
+leaks = []
+for line in errors:
+ if LEAK_LINE_MATCHER.search(line):
+ leaks.append(line)
+ if not LEAK_OKAY_MATCHER.search(line):
+ sys.stderr.writelines(errors)
+ sys.exit(1)
-errors = ''
-conn, addr = server.accept()
-while True:
- data = conn.recv(8192)
- if not data: break
- errors += data
+# Make sure we found between 2 and 3 leak lines.
+if len(leaks) < 2 or len(leaks) > 3:
+ sys.stderr.writelines(errors)
+ sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
+ sys.exit(1)
-code = process.wait()
-if code == 247: sys.stderr.writelines(errors)
-sys.exit(code)
+# No leaks found.
+sys.exit(0)
diff --git a/deps/v8/tools/test-wrapper-gypbuild.py b/deps/v8/tools/test-wrapper-gypbuild.py
index d99d055e50..4dd6338dc9 100755
--- a/deps/v8/tools/test-wrapper-gypbuild.py
+++ b/deps/v8/tools/test-wrapper-gypbuild.py
@@ -95,11 +95,14 @@ def BuildOptions():
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
- result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
- dest="suppress_dialogs", default=True, action="store_true")
- result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
- dest="suppress_dialogs", action="store_false")
- result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
+ result.add_option("--suppress-dialogs",
+ help="Suppress Windows dialogs for crashing tests",
+ dest="suppress_dialogs", default=True, action="store_true")
+ result.add_option("--no-suppress-dialogs",
+ help="Display Windows dialogs for crashing tests",
+ dest="suppress_dialogs", action="store_false")
+ result.add_option("--isolates", help="Whether to test isolates",
+ default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
@@ -148,7 +151,8 @@ def ProcessOptions(options):
print "Unknown mode %s" % mode
return False
for arch in options.arch:
- if not arch in ['ia32', 'x64', 'arm', 'mips']:
+ if not arch in ['ia32', 'x64', 'arm', 'mipsel', 'android_arm',
+ 'android_ia32']:
print "Unknown architecture %s" % arch
return False
if options.buildbot:
@@ -217,9 +221,10 @@ def Main():
if not options.no_presubmit:
print ">>> running presubmit tests"
- returncodes += subprocess.call([workspace + '/tools/presubmit.py'])
+ returncodes += subprocess.call([sys.executable,
+ workspace + '/tools/presubmit.py'])
- args_for_children = ['python']
+ args_for_children = [sys.executable]
args_for_children += [workspace + '/tools/test.py'] + PassOnOptions(options)
args_for_children += ['--no-build', '--build-system=gyp']
for arg in args:
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index 5131ad7617..c361f93737 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -140,9 +140,9 @@ def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
+ # Escape spaces and double quotes. We may need to escape more characters
+ # for this to work properly.
+ parts.append('"%s"' % part.replace('"', '\\"'))
else:
parts.append(part)
return " ".join(parts)
@@ -299,8 +299,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
- 'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
- 'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
@@ -1283,7 +1281,7 @@ def ProcessOptions(options):
options.scons_flags.append("arch=" + options.arch)
# Simulators are slow, therefore allow a longer default timeout.
if options.timeout == -1:
- if options.arch == 'arm' or options.arch == 'mips':
+ if options.arch in ['android', 'arm', 'mipsel']:
options.timeout = 2 * TIMEOUT_DEFAULT;
else:
options.timeout = TIMEOUT_DEFAULT;
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 9af5ab6c79..313c6d4c94 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,7 +50,7 @@ if (params.snapshotLogFileName) {
snapshotLogProcessor.processLogFile(params.snapshotLogFileName);
}
var tickProcessor = new TickProcessor(
- new (entriesProviders[params.platform])(params.nm),
+ new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
params.separateIc,
params.callGraphSize,
params.ignoreUnknown,
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 05a3369255..4c4886d878 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -608,10 +608,11 @@ CppEntriesProvider.prototype.parseNextLine = function() {
};
-function UnixCppEntriesProvider(nmExec) {
+function UnixCppEntriesProvider(nmExec, targetRootFS) {
this.symbols = [];
this.parsePos = 0;
this.nmExec = nmExec;
+ this.targetRootFS = targetRootFS;
this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
};
inherits(UnixCppEntriesProvider, CppEntriesProvider);
@@ -619,6 +620,7 @@ inherits(UnixCppEntriesProvider, CppEntriesProvider);
UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
this.parsePos = 0;
+ libName = this.targetRootFS + libName;
try {
this.symbols = [
os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
@@ -656,8 +658,8 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() {
};
-function MacCppEntriesProvider(nmExec) {
- UnixCppEntriesProvider.call(this, nmExec);
+function MacCppEntriesProvider(nmExec, targetRootFS) {
+ UnixCppEntriesProvider.call(this, nmExec, targetRootFS);
// Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ()[iItT] (.*)$/;
};
@@ -666,6 +668,7 @@ inherits(MacCppEntriesProvider, UnixCppEntriesProvider);
MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
this.parsePos = 0;
+ libName = this.targetRootFS + libName;
try {
this.symbols = [os.system(this.nmExec, ['-n', '-f', libName], -1, -1), ''];
} catch (e) {
@@ -675,7 +678,8 @@ MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
};
-function WindowsCppEntriesProvider() {
+function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS) {
+ this.targetRootFS = targetRootFS;
this.symbols = '';
this.parsePos = 0;
};
@@ -698,6 +702,7 @@ WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
+ libName = this.targetRootFS + libName;
var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
if (!fileNameFields) return;
var mapFileName = fileNameFields[1] + '.map';
@@ -785,6 +790,8 @@ function ArgumentsProcessor(args) {
'Specify that we are running on Mac OS X platform'],
'--nm': ['nm', 'nm',
'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+ '--target': ['targetRootFS', '',
+ 'Specify the target root directory for cross environment'],
'--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)']
};
@@ -804,6 +811,7 @@ ArgumentsProcessor.DEFAULTS = {
callGraphSize: 5,
ignoreUnknown: false,
separateIc: false,
+ targetRootFS: '',
nm: 'nm'
};