summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/ChangeLog58
-rw-r--r--deps/v8/DEPS2
-rw-r--r--deps/v8/Makefile15
-rw-r--r--deps/v8/Makefile.android10
-rw-r--r--deps/v8/Makefile.nacl4
-rw-r--r--deps/v8/PRESUBMIT.py3
-rw-r--r--deps/v8/build/features.gypi111
-rwxr-xr-xdeps/v8/build/gyp_v84
-rw-r--r--deps/v8/build/standalone.gypi15
-rw-r--r--deps/v8/build/toolchain.gypi (renamed from deps/v8/build/common.gypi)125
-rwxr-xr-xdeps/v8/include/v8-debug.h10
-rw-r--r--deps/v8/include/v8-profiler.h15
-rw-r--r--deps/v8/include/v8.h20
-rw-r--r--deps/v8/preparser/preparser.gyp2
-rw-r--r--deps/v8/samples/lineprocessor.cc3
-rw-r--r--deps/v8/samples/process.cc4
-rw-r--r--deps/v8/samples/samples.gyp16
-rw-r--r--deps/v8/samples/shell.cc1
-rw-r--r--deps/v8/src/accessors.cc40
-rw-r--r--deps/v8/src/accessors.h6
-rw-r--r--deps/v8/src/api.cc56
-rw-r--r--deps/v8/src/apinatives.js2
-rw-r--r--deps/v8/src/arm/assembler-arm.cc251
-rw-r--r--deps/v8/src/arm/assembler-arm.h113
-rw-r--r--deps/v8/src/arm/builtins-arm.cc1
-rw-r--r--[-rwxr-xr-x]deps/v8/src/arm/code-stubs-arm.cc580
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h81
-rw-r--r--deps/v8/src/arm/codegen-arm.cc261
-rw-r--r--deps/v8/src/arm/constants-arm.h43
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc39
-rw-r--r--deps/v8/src/arm/disasm-arm.cc234
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc22
-rw-r--r--deps/v8/src/arm/ic-arm.cc27
-rw-r--r--deps/v8/src/arm/lithium-arm.cc72
-rw-r--r--deps/v8/src/arm/lithium-arm.h89
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc218
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h1
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc5
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc28
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h8
-rw-r--r--deps/v8/src/arm/simulator-arm.cc393
-rw-r--r--deps/v8/src/arm/simulator-arm.h25
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc203
-rw-r--r--deps/v8/src/array-iterator.js127
-rw-r--r--deps/v8/src/assembler.cc17
-rw-r--r--deps/v8/src/assembler.h5
-rw-r--r--deps/v8/src/ast.cc34
-rw-r--r--deps/v8/src/ast.h22
-rw-r--r--deps/v8/src/atomicops.h9
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.cc1
-rw-r--r--deps/v8/src/bignum.cc1
-rw-r--r--deps/v8/src/bootstrapper.cc30
-rw-r--r--deps/v8/src/builtins.cc18
-rw-r--r--deps/v8/src/char-predicates-inl.h12
-rw-r--r--deps/v8/src/char-predicates.h2
-rw-r--r--deps/v8/src/checks.h6
-rw-r--r--deps/v8/src/circular-queue-inl.h13
-rw-r--r--deps/v8/src/circular-queue.cc37
-rw-r--r--deps/v8/src/circular-queue.h25
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc240
-rw-r--r--deps/v8/src/code-stubs.cc200
-rw-r--r--deps/v8/src/code-stubs.h383
-rw-r--r--deps/v8/src/codegen.h4
-rw-r--r--deps/v8/src/collection.js92
-rw-r--r--deps/v8/src/compilation-cache.cc1
-rw-r--r--deps/v8/src/compiler.cc11
-rw-r--r--deps/v8/src/compiler.h2
-rw-r--r--deps/v8/src/contexts.cc6
-rw-r--r--deps/v8/src/conversions-inl.h28
-rw-r--r--deps/v8/src/conversions.h11
-rw-r--r--deps/v8/src/counters.cc2
-rw-r--r--deps/v8/src/cpu-profiler-inl.h2
-rw-r--r--deps/v8/src/cpu-profiler.cc98
-rw-r--r--deps/v8/src/cpu-profiler.h32
-rw-r--r--deps/v8/src/d8-debug.cc8
-rw-r--r--deps/v8/src/d8-debug.h5
-rw-r--r--deps/v8/src/d8.cc4
-rw-r--r--deps/v8/src/d8.gyp14
-rw-r--r--deps/v8/src/dateparser.cc1
-rw-r--r--deps/v8/src/debug.cc21
-rw-r--r--deps/v8/src/deoptimizer.cc39
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/elements-kind.cc1
-rw-r--r--deps/v8/src/execution.cc1
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.cc2
-rw-r--r--deps/v8/src/extensions/i18n/collator.cc3
-rw-r--r--deps/v8/src/extensions/i18n/i18n-extension.cc1
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.cc7
-rw-r--r--deps/v8/src/extensions/i18n/locale.cc3
-rw-r--r--deps/v8/src/extensions/i18n/number-format.cc4
-rw-r--r--deps/v8/src/factory.cc13
-rw-r--r--deps/v8/src/factory.h78
-rw-r--r--deps/v8/src/flag-definitions.h18
-rw-r--r--deps/v8/src/frames-inl.h6
-rw-r--r--deps/v8/src/frames.cc130
-rw-r--r--deps/v8/src/frames.h2
-rw-r--r--deps/v8/src/full-codegen.cc7
-rw-r--r--deps/v8/src/full-codegen.h4
-rw-r--r--deps/v8/src/gdb-jit.cc1
-rw-r--r--deps/v8/src/global-handles.cc11
-rw-r--r--deps/v8/src/globals.h31
-rw-r--r--deps/v8/src/handles.cc12
-rw-r--r--deps/v8/src/handles.h10
-rw-r--r--deps/v8/src/heap-inl.h5
-rw-r--r--deps/v8/src/heap-profiler.cc1
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc91
-rw-r--r--deps/v8/src/heap-snapshot-generator.h3
-rw-r--r--deps/v8/src/heap.cc171
-rw-r--r--deps/v8/src/heap.h43
-rw-r--r--deps/v8/src/hydrogen-bce.cc390
-rw-r--r--deps/v8/src/hydrogen-bce.h72
-rw-r--r--deps/v8/src/hydrogen-canonicalize.cc59
-rw-r--r--deps/v8/src/hydrogen-canonicalize.h51
-rw-r--r--deps/v8/src/hydrogen-dce.cc125
-rw-r--r--deps/v8/src/hydrogen-dce.h56
-rw-r--r--deps/v8/src/hydrogen-dehoist.cc80
-rw-r--r--deps/v8/src/hydrogen-dehoist.h51
-rw-r--r--deps/v8/src/hydrogen-deoptimizing-mark.cc126
-rw-r--r--deps/v8/src/hydrogen-deoptimizing-mark.h56
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc2
-rw-r--r--deps/v8/src/hydrogen-gvn.cc65
-rw-r--r--deps/v8/src/hydrogen-gvn.h42
-rw-r--r--deps/v8/src/hydrogen-infer-types.cc77
-rw-r--r--deps/v8/src/hydrogen-infer-types.h59
-rw-r--r--deps/v8/src/hydrogen-instructions.cc239
-rw-r--r--deps/v8/src/hydrogen-instructions.h313
-rw-r--r--deps/v8/src/hydrogen-minus-zero.cc83
-rw-r--r--deps/v8/src/hydrogen-minus-zero.h56
-rw-r--r--deps/v8/src/hydrogen-osr.cc3
-rw-r--r--deps/v8/src/hydrogen-range-analysis.cc85
-rw-r--r--deps/v8/src/hydrogen-range-analysis.h8
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.cc76
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.h53
-rw-r--r--deps/v8/src/hydrogen-removable-simulates.cc94
-rw-r--r--deps/v8/src/hydrogen-removable-simulates.h51
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc167
-rw-r--r--deps/v8/src/hydrogen-representation-changes.h55
-rw-r--r--deps/v8/src/hydrogen-sce.cc62
-rw-r--r--deps/v8/src/hydrogen-sce.h48
-rw-r--r--deps/v8/src/hydrogen.cc2308
-rw-r--r--deps/v8/src/hydrogen.h192
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc15
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h31
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc1
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc843
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h89
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc12
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc1
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc20
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc1
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc8
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc28
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc685
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h40
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc33
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc135
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h114
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc30
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h8
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc1
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc220
-rw-r--r--deps/v8/src/ic.cc329
-rw-r--r--deps/v8/src/ic.h79
-rw-r--r--deps/v8/src/icu_util.cc62
-rw-r--r--deps/v8/src/icu_util.h42
-rw-r--r--deps/v8/src/incremental-marking.cc8
-rw-r--r--deps/v8/src/isolate.cc34
-rw-r--r--deps/v8/src/isolate.h17
-rw-r--r--deps/v8/src/json-stringifier.h6
-rw-r--r--deps/v8/src/jsregexp.cc28
-rw-r--r--deps/v8/src/jsregexp.h6
-rw-r--r--deps/v8/src/lithium.cc1
-rw-r--r--deps/v8/src/liveedit.cc7
-rw-r--r--deps/v8/src/log-utils.cc114
-rw-r--r--deps/v8/src/log-utils.h91
-rw-r--r--deps/v8/src/log.cc1363
-rw-r--r--deps/v8/src/log.h88
-rw-r--r--deps/v8/src/macros.py2
-rw-r--r--deps/v8/src/mark-compact.cc380
-rw-r--r--deps/v8/src/mark-compact.h67
-rw-r--r--deps/v8/src/messages.js5
-rw-r--r--deps/v8/src/mips/assembler-mips.cc9
-rw-r--r--deps/v8/src/mips/assembler-mips.h3
-rw-r--r--[-rwxr-xr-x]deps/v8/src/mips/builtins-mips.cc1
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc600
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h81
-rw-r--r--deps/v8/src/mips/codegen-mips.cc16
-rw-r--r--deps/v8/src/mips/constants-mips.cc4
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc41
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc22
-rw-r--r--deps/v8/src/mips/ic-mips.cc27
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc216
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h1
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc5
-rw-r--r--deps/v8/src/mips/lithium-mips.cc73
-rw-r--r--deps/v8/src/mips/lithium-mips.h89
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc34
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h12
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc203
-rw-r--r--deps/v8/src/mirror-debugger.js30
-rw-r--r--deps/v8/src/mksnapshot.cc2
-rw-r--r--deps/v8/src/object-observe.js91
-rw-r--r--deps/v8/src/objects-debug.cc27
-rw-r--r--deps/v8/src/objects-inl.h56
-rw-r--r--deps/v8/src/objects-printer.cc57
-rw-r--r--deps/v8/src/objects-visiting-inl.h23
-rw-r--r--deps/v8/src/objects-visiting.cc7
-rw-r--r--deps/v8/src/objects-visiting.h2
-rw-r--r--deps/v8/src/objects.cc679
-rw-r--r--deps/v8/src/objects.h353
-rw-r--r--deps/v8/src/parser.cc32
-rw-r--r--deps/v8/src/parser.h6
-rw-r--r--deps/v8/src/platform-cygwin.cc52
-rw-r--r--deps/v8/src/platform-freebsd.cc97
-rw-r--r--deps/v8/src/platform-linux.cc151
-rw-r--r--deps/v8/src/platform-macos.cc75
-rw-r--r--deps/v8/src/platform-nullos.cc5
-rw-r--r--deps/v8/src/platform-openbsd.cc51
-rw-r--r--deps/v8/src/platform-posix.cc104
-rw-r--r--deps/v8/src/platform-posix.h70
-rw-r--r--deps/v8/src/platform-solaris.cc41
-rw-r--r--deps/v8/src/platform-win32.cc9
-rw-r--r--deps/v8/src/platform.h40
-rw-r--r--deps/v8/src/preparse-data.cc1
-rw-r--r--deps/v8/src/preparser.cc5
-rw-r--r--deps/v8/src/preparser.h6
-rw-r--r--deps/v8/src/profile-generator-inl.h2
-rw-r--r--deps/v8/src/profile-generator.cc255
-rw-r--r--deps/v8/src/profile-generator.h52
-rw-r--r--deps/v8/src/property-details.h6
-rw-r--r--deps/v8/src/proxy.js8
-rw-r--r--deps/v8/src/runtime-profiler.cc2
-rw-r--r--deps/v8/src/runtime.cc837
-rw-r--r--deps/v8/src/runtime.h18
-rw-r--r--deps/v8/src/runtime.js2
-rw-r--r--deps/v8/src/sampler.cc11
-rw-r--r--deps/v8/src/sampler.h13
-rw-r--r--deps/v8/src/scanner-character-streams.cc2
-rw-r--r--deps/v8/src/scanner.cc32
-rw-r--r--deps/v8/src/scanner.h9
-rw-r--r--deps/v8/src/scopes.cc8
-rw-r--r--deps/v8/src/serialize.cc36
-rw-r--r--deps/v8/src/serialize.h4
-rw-r--r--deps/v8/src/spaces.cc4
-rw-r--r--deps/v8/src/store-buffer-inl.h8
-rw-r--r--deps/v8/src/store-buffer.cc57
-rw-r--r--deps/v8/src/store-buffer.h30
-rw-r--r--deps/v8/src/string.js6
-rw-r--r--deps/v8/src/strtod.cc4
-rw-r--r--deps/v8/src/stub-cache.cc358
-rw-r--r--deps/v8/src/stub-cache.h319
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp2
-rw-r--r--deps/v8/src/type-info.cc60
-rw-r--r--deps/v8/src/type-info.h10
-rw-r--r--deps/v8/src/typedarray.js70
-rw-r--r--deps/v8/src/types.cc57
-rw-r--r--deps/v8/src/types.h182
-rw-r--r--deps/v8/src/typing.cc322
-rw-r--r--deps/v8/src/typing.h9
-rw-r--r--deps/v8/src/unicode.cc12
-rw-r--r--deps/v8/src/v8-counters.cc3
-rw-r--r--deps/v8/src/v8.cc2
-rw-r--r--deps/v8/src/v8globals.h12
-rw-r--r--deps/v8/src/v8threads.cc1
-rw-r--r--deps/v8/src/v8utils.h104
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/win32-headers.h2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc7
-rw-r--r--deps/v8/src/x64/builtins-x64.cc2
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc674
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h87
-rw-r--r--deps/v8/src/x64/codegen-x64.cc12
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc21
-rw-r--r--deps/v8/src/x64/disasm-x64.cc5
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc11
-rw-r--r--deps/v8/src/x64/ic-x64.cc27
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc229
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h1
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc10
-rw-r--r--deps/v8/src/x64/lithium-x64.cc72
-rw-r--r--deps/v8/src/x64/lithium-x64.h87
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc95
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h8
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc219
-rw-r--r--deps/v8/src/zone.cc11
-rw-r--r--deps/v8/test/cctest/cctest.gyp8
-rw-r--r--deps/v8/test/cctest/cctest.status7
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api.cc308
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc183
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc89
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc93
-rw-r--r--deps/v8/test/cctest/test-circular-queue.cc4
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc181
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc149
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc130
-rw-r--r--deps/v8/test/cctest/test-code-stubs.h48
-rw-r--r--deps/v8/test/cctest/test-compare-nil-ic-stub.cc87
-rw-r--r--deps/v8/test/cctest/test-conversions.cc130
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc263
-rw-r--r--deps/v8/test/cctest/test-date.cc1
-rw-r--r--deps/v8/test/cctest/test-debug.cc43
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc33
-rw-r--r--deps/v8/test/cctest/test-double.cc1
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc178
-rw-r--r--deps/v8/test/cctest/test-heap.cc61
-rw-r--r--deps/v8/test/cctest/test-list.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc12
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc1
-rw-r--r--deps/v8/test/cctest/test-log.cc3
-rw-r--r--[-rwxr-xr-x]deps/v8/test/cctest/test-macro-assembler-x64.cc2
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc3
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc17
-rw-r--r--deps/v8/test/cctest/test-parsing.cc6
-rw-r--r--deps/v8/test/cctest/test-platform-tls.cc2
-rw-r--r--deps/v8/test/cctest/test-platform.cc63
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc209
-rw-r--r--deps/v8/test/cctest/test-regexp.cc4
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc4
-rw-r--r--deps/v8/test/cctest/test-threads.cc1
-rw-r--r--deps/v8/test/cctest/test-types.cc4
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc250
-rw-r--r--deps/v8/test/intl/assert.js184
-rw-r--r--deps/v8/test/intl/break-iterator/default-locale.js48
-rw-r--r--deps/v8/test/intl/break-iterator/en-break.js61
-rw-r--r--deps/v8/test/intl/break-iterator/property-override.js64
-rw-r--r--deps/v8/test/intl/break-iterator/protected-icu-internals.js49
-rw-r--r--deps/v8/test/intl/break-iterator/resolved-options-is-method.js40
-rw-r--r--deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/break-iterator/zh-break.js63
-rw-r--r--deps/v8/test/intl/collator/de-sort.js44
-rw-r--r--deps/v8/test/intl/collator/default-locale.js52
-rw-r--r--deps/v8/test/intl/collator/en-sort.js39
-rw-r--r--deps/v8/test/intl/collator/normalization.js56
-rw-r--r--deps/v8/test/intl/collator/property-override.js65
-rw-r--r--deps/v8/test/intl/collator/protected-icu-internals.js49
-rw-r--r--deps/v8/test/intl/collator/resolved-options-is-method.js40
-rw-r--r--deps/v8/test/intl/collator/sr-sort.js46
-rw-r--r--deps/v8/test/intl/collator/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/date-format/default-locale.js44
-rw-r--r--deps/v8/test/intl/date-format/format-is-bound.js39
-rw-r--r--deps/v8/test/intl/date-format/format-test.js46
-rw-r--r--deps/v8/test/intl/date-format/parse-MMMdy.js48
-rw-r--r--deps/v8/test/intl/date-format/parse-invalid-input.js35
-rw-r--r--deps/v8/test/intl/date-format/parse-mdy.js49
-rw-r--r--deps/v8/test/intl/date-format/parse-mdyhms.js51
-rw-r--r--deps/v8/test/intl/date-format/property-override.js70
-rw-r--r--deps/v8/test/intl/date-format/protected-icu-internals.js49
-rw-r--r--deps/v8/test/intl/date-format/resolved-options-is-method.js40
-rw-r--r--deps/v8/test/intl/date-format/resolved-options.js107
-rw-r--r--deps/v8/test/intl/date-format/timezone.js65
-rw-r--r--deps/v8/test/intl/date-format/utils.js36
-rw-r--r--deps/v8/test/intl/date-format/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/general/empty-handle.js48
-rw-r--r--deps/v8/test/intl/general/mapped-locale.js52
-rw-r--r--deps/v8/test/intl/general/supported-locales-of.js43
-rw-r--r--deps/v8/test/intl/general/v8Intl-exists.js36
-rw-r--r--deps/v8/test/intl/intl.status41
-rw-r--r--deps/v8/test/intl/number-format/check-digit-ranges.js56
-rw-r--r--deps/v8/test/intl/number-format/default-locale.js44
-rw-r--r--deps/v8/test/intl/number-format/format-is-bound.js39
-rw-r--r--deps/v8/test/intl/number-format/parse-currency.js33
-rw-r--r--deps/v8/test/intl/number-format/parse-decimal.js39
-rw-r--r--deps/v8/test/intl/number-format/parse-invalid-input.js38
-rw-r--r--deps/v8/test/intl/number-format/parse-percent.js36
-rw-r--r--deps/v8/test/intl/number-format/property-override.js78
-rw-r--r--deps/v8/test/intl/number-format/protected-icu-internals.js49
-rw-r--r--deps/v8/test/intl/number-format/resolved-options-is-method.js40
-rw-r--r--deps/v8/test/intl/number-format/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/overrides/caching.js60
-rw-r--r--deps/v8/test/intl/overrides/date.js65
-rw-r--r--deps/v8/test/intl/overrides/number.js53
-rw-r--r--deps/v8/test/intl/overrides/security.js50
-rw-r--r--deps/v8/test/intl/overrides/string.js69
-rw-r--r--deps/v8/test/intl/overrides/webkit-tests.js32
-rw-r--r--deps/v8/test/intl/testcfg.py72
-rw-r--r--deps/v8/test/intl/utils.js40
-rw-r--r--deps/v8/test/mjsunit/allocation-folding.js46
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js73
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js6
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js25
-rw-r--r--deps/v8/test/mjsunit/array-feedback.js7
-rw-r--r--deps/v8/test/mjsunit/array-literal-feedback.js6
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js17
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js1
-rw-r--r--deps/v8/test/mjsunit/assert-opt-and-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/bugs/bug-2758.js49
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/minus-zero.js37
-rw-r--r--deps/v8/test/mjsunit/compiler/parallel-proto-change.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/phi-representations.js56
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js2
-rw-r--r--deps/v8/test/mjsunit/count-based-osr.js4
-rw-r--r--deps/v8/test/mjsunit/date.js5
-rw-r--r--deps/v8/test/mjsunit/debug-break-inline.js1
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-closure.js91
-rw-r--r--deps/v8/test/mjsunit/debug-script.js3
-rw-r--r--deps/v8/test/mjsunit/deopt-minus-zero.js15
-rw-r--r--deps/v8/test/mjsunit/double-truncation.js78
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js12
-rw-r--r--deps/v8/test/mjsunit/elements-transition-and-store.js41
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js17
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-9.js2
-rw-r--r--deps/v8/test/mjsunit/external-array-no-sse2.js6
-rw-r--r--deps/v8/test/mjsunit/external-array.js6
-rw-r--r--deps/v8/test/mjsunit/function-call.js4
-rw-r--r--deps/v8/test/mjsunit/generated-transition-stub.js340
-rw-r--r--deps/v8/test/mjsunit/harmony/array-iterator.js195
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-crankshaft.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/collections.js65
-rw-r--r--deps/v8/test/mjsunit/harmony/dataview-accessors.js107
-rw-r--r--deps/v8/test/mjsunit/harmony/numeric-literals-off.js41
-rw-r--r--deps/v8/test/mjsunit/harmony/numeric-literals.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/object-observe.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-for.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-with.js446
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarrays.js57
-rw-r--r--deps/v8/test/mjsunit/manual-parallel-recompile.js19
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js4
-rw-r--r--deps/v8/test/mjsunit/md5.js211
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js29
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status4
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js63
-rw-r--r--deps/v8/test/mjsunit/opt-elements-kind.js6
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js6
-rw-r--r--deps/v8/test/mjsunit/parallel-initial-prototype-change.js15
-rw-r--r--deps/v8/test/mjsunit/parallel-invalidate-transition-map.js18
-rw-r--r--deps/v8/test/mjsunit/parallel-optimize-disabled.js2
-rw-r--r--deps/v8/test/mjsunit/regress/poly_count_operation.js155
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1118.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1713b.js126
-rw-r--r--deps/v8/test/mjsunit/regress/regress-173361.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2132.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2250.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2315.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2339.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2451.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-252797.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2537.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2711.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-97116b.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-150545.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-173907b.js88
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-259300.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-260345.js59
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-gcb.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js82
-rw-r--r--deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js52
-rw-r--r--deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js5
-rw-r--r--deps/v8/test/mjsunit/tools/profviz-test.default1566
-rw-r--r--deps/v8/test/mjsunit/tools/profviz.js83
-rw-r--r--deps/v8/test/mjsunit/transition-elements-kind.js48
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js3
-rw-r--r--deps/v8/tools/blink_tests/TestExpectations8
-rwxr-xr-xdeps/v8/tools/grokdump.py5
-rw-r--r--deps/v8/tools/gyp/v8.gyp37
-rw-r--r--deps/v8/tools/oom_dump/oom_dump.cc1
-rwxr-xr-xdeps/v8/tools/presubmit.py36
-rw-r--r--deps/v8/tools/profviz/composer.js4
463 files changed, 22829 insertions, 11396 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 3c113b2f75..e05d039949 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,61 @@
+2013-07-22: Version 3.20.7
+
+ Deprecated some debugger methods.
+
+ Fixed wrong bailout id in polymorphic stores (Chromium issue 259787).
+
+ Fixed data race in SamplingCircularQueue (Chromium issue 251218).
+
+ Fixed type feedback in presence of negative lookups
+ (Chromium issue 252797).
+
+ Do not materialize context-allocated values for debug-evaluate
+ (Chromium issue 259300).
+
+ Synchronized Compare-Literal behavior in FullCodegen and Hydrogen
+ (Chromium issue 260345).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-07-17: Version 3.20.6
+
+ Try to remove invalidated stubs before falling back to checking the
+ constant state (Chromium issue 260585).
+
+ Fixed gyp_v8 to work with use_system_icu=1 (issue 2475).
+
+ Fixed sloppy-mode 'const' under Harmony flag (Chromium issue 173361).
+
+ Use internal array as API function cache (Chromium issue 260106).
+
+ Fixed possible stack overflow in range analysis
+ (Chromium issue 259452).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-07-15: Version 3.20.5
+
+ Ensured that the length of frozen arrays is immutable
+ (issue 2711, Chromium issue 259548).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-07-10: Version 3.20.4
+
+ Fixed garbage-collection issue that causes a crash on ARM
+ (Chromium issue 254570)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-07-08: Version 3.20.3
+
+ Performance and stability improvements on all platforms.
+
+
2013-07-05: Version 3.20.2
Remove deprecated heap profiler methods from V8 public API
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 4f1a5cfb26..ccbaccbe22 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,7 +8,7 @@ deps = {
"http://gyp.googlecode.com/svn/trunk@1656",
"v8/third_party/icu":
- "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@205936",
+ "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@210659",
}
deps_os = {
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 16c004f32d..1b0b7f4d7d 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -34,6 +34,7 @@ TESTJOBS ?=
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
+ANDROID_NDK_HOST_ARCH ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/tmp/v8
NACL_SDK_ROOT ?=
@@ -91,6 +92,10 @@ endif
ifeq ($(vtunejit), on)
GYPFLAGS += -Dv8_enable_vtunejit=1
endif
+# optdebug=on
+ifeq ($(optdebug), on)
+ GYPFLAGS += -Dv8_optimized_debug=1
+endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
GYPFLAGS += -Dv8_enable_debugger_support=0
@@ -210,9 +215,9 @@ ANDROID_ARCHES = android_ia32 android_arm android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
-GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
- preparser/preparser.gyp samples/samples.gyp src/d8.gyp \
- test/cctest/cctest.gyp tools/gyp/v8.gyp
+GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
+ build/toolchain.gypi preparser/preparser.gyp samples/samples.gyp \
+ src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@@ -352,6 +357,7 @@ clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.cl
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
@@ -359,6 +365,7 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
@@ -401,4 +408,4 @@ dependencies:
--revision 1656
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
- third_party/icu --revision 205936
+ third_party/icu --revision 210659
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
index aeff01c665..2d45d3bb12 100644
--- a/deps/v8/Makefile.android
+++ b/deps/v8/Makefile.android
@@ -35,11 +35,12 @@ ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
+ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
ifeq ($(HOST_OS), linux)
- TOOLCHAIN_DIR = linux-x86
+ TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
else
ifeq ($(HOST_OS), mac)
- TOOLCHAIN_DIR = darwin-x86
+ TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
$(error Host platform "${HOST_OS}" is not supported)
endif
@@ -67,7 +68,9 @@ endif
TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
- $(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}")
+ $(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
+ check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
+ correctly)
endif
# For mksnapshot host generation.
@@ -93,6 +96,7 @@ $(ANDROID_MAKEFILES):
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.${ARCH} ${GYPFLAGS}
diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl
index 0c98021ed1..02e83ef2bc 100644
--- a/deps/v8/Makefile.nacl
+++ b/deps/v8/Makefile.nacl
@@ -91,6 +91,8 @@ $(NACL_MAKEFILES):
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
- -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
+ -S.$(subst .,,$(suffix $@)) $(GYPFLAGS) \
+ -Dwno_array_bounds=-Wno-array-bounds
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 7d66203845..1f176e08bd 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -44,7 +44,8 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
- "Copyright header and trailing whitespaces check failed"))
+ "Copyright header, trailing whitespaces and two empty lines " \
+ "between declarations check failed"))
return results
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
new file mode 100644
index 0000000000..3c6d25f758
--- /dev/null
+++ b/deps/v8/build/features.gypi
@@ -0,0 +1,111 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Compile time controlled V8 features.
+
+{
+ 'variables': {
+ 'v8_compress_startup_data%': 'off',
+
+ 'v8_enable_debugger_support%': 1,
+
+ 'v8_enable_disassembler%': 0,
+
+ 'v8_enable_gdbjit%': 0,
+
+ 'v8_object_print%': 0,
+
+ 'v8_enable_verify_heap%': 0,
+
+ 'v8_use_snapshot%': 'true',
+
+ # With post mortem support enabled, metadata is embedded into libv8 that
+ # describes various parameters of the VM for use by debuggers. See
+ # tools/gen-postmortem-metadata.py for details.
+ 'v8_postmortem_support%': 'false',
+
+ # Interpreted regexp engine exists as platform-independent alternative
+ # based where the regular expression is compiled to a bytecode.
+ 'v8_interpreted_regexp%': 0,
+
+ # Enable ECMAScript Internationalization API. Enabling this feature will
+ # add a dependency on the ICU library.
+ 'v8_enable_i18n_support%': 0,
+ },
+ 'target_defaults': {
+ 'conditions': [
+ ['v8_enable_debugger_support==1', {
+ 'defines': ['ENABLE_DEBUGGER_SUPPORT',],
+ }],
+ ['v8_enable_disassembler==1', {
+ 'defines': ['ENABLE_DISASSEMBLER',],
+ }],
+ ['v8_enable_gdbjit==1', {
+ 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
+ }],
+ ['v8_object_print==1', {
+ 'defines': ['OBJECT_PRINT',],
+ }],
+ ['v8_enable_verify_heap==1', {
+ 'defines': ['VERIFY_HEAP',],
+ }],
+ ['v8_interpreted_regexp==1', {
+ 'defines': ['V8_INTERPRETED_REGEXP',],
+ }],
+ ['v8_enable_i18n_support==1', {
+ 'defines': ['V8_I18N_SUPPORT',],
+ }],
+ ['v8_compress_startup_data=="bz2"', {
+ 'defines': [
+ 'COMPRESS_STARTUP_DATA_BZ2',
+ ],
+ }],
+ ], # conditions
+ 'configurations': {
+ 'Debug': {
+ 'variables': {
+ 'v8_enable_extra_checks%': 1,
+ },
+ 'conditions': [
+ ['v8_enable_extra_checks==1', {
+ 'defines': ['ENABLE_EXTRA_CHECKS',],
+ }],
+ ],
+ }, # Debug
+ 'Release': {
+ 'variables': {
+ 'v8_enable_extra_checks%': 0,
+ },
+ 'conditions': [
+ ['v8_enable_extra_checks==1', {
+ 'defines': ['ENABLE_EXTRA_CHECKS',],
+ }],
+ ], # conditions
+ }, # Release
+ }, # configurations
+ }, # target_defaults
+}
diff --git a/deps/v8/build/gyp_v8 b/deps/v8/build/gyp_v8
index 73a66a72fd..92e6503925 100755
--- a/deps/v8/build/gyp_v8
+++ b/deps/v8/build/gyp_v8
@@ -47,6 +47,10 @@ if __name__ == '__main__':
sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
import gyp
+# Add paths so that pymod_do_main(...) can import files.
+sys.path.insert(
+ 1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
+
def apply_gyp_environment(file_path=None):
"""
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index b1303c8122..ab2dfd528e 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -28,11 +28,15 @@
# Definitions to be used when building stand-alone V8 binaries.
{
+ # We need to include toolchain.gypi here for third-party sources that don't
+ # directly include it themselves.
+ 'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
'clang%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
+ 'v8_enable_i18n_support%': 0,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@@ -128,6 +132,9 @@
},
}],
['OS == "win"', {
+ 'defines!': [
+ 'DEBUG',
+ ],
'msvs_settings': {
'VCCLCompilerTool': {
'WarnAsError': 'false',
@@ -218,6 +225,14 @@
# 1 == /SUBSYSTEM:CONSOLE
# 2 == /SUBSYSTEM:WINDOWS
'SubSystem': '1',
+
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'AdditionalDependencies': [
+ 'advapi32.lib',
+ ],
+ }],
+ ],
},
},
},
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/toolchain.gypi
index dbb33a867b..95e2cd2ef5 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -1,4 +1,4 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -32,7 +32,6 @@
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
- 'v8_compress_startup_data%': 'off',
'v8_target_arch%': '<(target_arch)',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
@@ -42,14 +41,6 @@
# NaCl V8 builds stop using the ARM simulator
'nacl_target_arch%': 'none', # must be set externally
- # Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
- # generated by V8 to do unaligned memory access, and setting it to 'false'
- # will ensure that the generated code will always do aligned memory
- # accesses. The default value of 'default' will try to determine the correct
- # setting. Note that for Intel architectures (ia32 and x64) unaligned memory
- # access is allowed for all CPUs.
- 'v8_can_use_unaligned_accesses%': 'default',
-
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
# ARM target. Leaving the default value of 'false' will avoid the use of
@@ -67,21 +58,14 @@
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
- 'v8_enable_debugger_support%': 1,
-
'v8_enable_backtrace%': 0,
- 'v8_enable_disassembler%': 0,
-
- 'v8_enable_gdbjit%': 0,
-
- 'v8_object_print%': 0,
+ # Turns on compiler optimizations in Debug builds (#defines are unaffected).
+ 'v8_optimized_debug%': 0,
# Enable profiling support. Only required on Windows.
'v8_enable_prof%': 0,
- 'v8_enable_verify_heap%': 0,
-
# Some versions of GCC 4.5 seem to need -fno-strict-aliasing.
'v8_no_strict_aliasing%': 0,
@@ -89,49 +73,16 @@
# it's handled in build/standalone.gypi.
'want_separate_host_toolset%': 1,
- 'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'werror%': '-Werror',
-
- # With post mortem support enabled, metadata is embedded into libv8 that
- # describes various parameters of the VM for use by debuggers. See
- # tools/gen-postmortem-metadata.py for details.
- 'v8_postmortem_support%': 'false',
-
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
- # Interpreted regexp engine exists as platform-independent alternative
- # based where the regular expression is compiled to a bytecode.
- 'v8_interpreted_regexp%': 0,
-
- # Enable ECMAScript Internationalization API. Enabling this feature will
- # add a dependency on the ICU library.
- 'v8_enable_i18n_support%': 0,
+ # Allow to suppress the array bounds warning (default is no suppression).
+ 'wno_array_bounds%': '',
},
'target_defaults': {
'conditions': [
- ['v8_enable_debugger_support==1', {
- 'defines': ['ENABLE_DEBUGGER_SUPPORT',],
- }],
- ['v8_enable_disassembler==1', {
- 'defines': ['ENABLE_DISASSEMBLER',],
- }],
- ['v8_enable_gdbjit==1', {
- 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
- }],
- ['v8_object_print==1', {
- 'defines': ['OBJECT_PRINT',],
- }],
- ['v8_enable_verify_heap==1', {
- 'defines': ['VERIFY_HEAP',],
- }],
- ['v8_interpreted_regexp==1', {
- 'defines': ['V8_INTERPRETED_REGEXP',],
- }],
- ['v8_enable_i18n_support==1', {
- 'defines': ['V8_I18N_SUPPORT',],
- }],
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@@ -391,11 +342,6 @@
},
'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
- ['v8_compress_startup_data=="bz2"', {
- 'defines': [
- 'COMPRESS_STARTUP_DATA_BZ2',
- ],
- }],
['OS=="win"', {
'defines': [
'WIN32',
@@ -488,9 +434,6 @@
], # conditions
'configurations': {
'Debug': {
- 'variables': {
- 'v8_enable_extra_checks%': 1,
- },
'defines': [
'DEBUG',
'ENABLE_DISASSEMBLER',
@@ -500,14 +443,22 @@
],
'msvs_settings': {
'VCCLCompilerTool': {
- 'Optimization': '0',
-
'conditions': [
- ['OS=="win" and component=="shared_library"', {
+ ['component=="shared_library"', {
'RuntimeLibrary': '3', # /MDd
}, {
'RuntimeLibrary': '1', # /MTd
}],
+ ['v8_optimized_debug==1', {
+ 'Optimization': '1',
+ 'InlineFunctionExpansion': '2',
+ 'EnableIntrinsicFunctions': 'true',
+ 'FavorSizeOrSpeed': '0',
+ 'StringPooling': 'true',
+ 'BasicRuntimeChecks': '0',
+ }, {
+ 'Optimization': '0',
+ }],
],
},
'VCLinkerTool': {
@@ -515,12 +466,30 @@
},
},
'conditions': [
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
- '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
+ '-Wnon-virtual-dtor', '-Woverloaded-virtual',
+ '<(wno_array_bounds)' ],
+ 'conditions': [
+ ['v8_optimized_debug==1', {
+ 'cflags!': [
+ '-O0',
+ '-O2',
+ '-Os',
+ ],
+ 'cflags': [
+ '-fdata-sections',
+ '-ffunction-sections',
+ '-O1',
+ ],
+ }],
+ ['v8_optimized_debug==1 and gcc_version==44 and clang==0', {
+ 'cflags': [
+ # Avoid crashes with gcc 4.4 in the v8 test suite.
+ '-fno-tree-vrp',
+ ],
+ }],
+ ],
}],
['OS=="linux" and v8_enable_backtrace==1', {
# Support for backtrace_symbols.
@@ -542,19 +511,20 @@
}],
['OS=="mac"', {
'xcode_settings': {
- 'GCC_OPTIMIZATION_LEVEL': '0', # -O0
+ 'conditions': [
+ ['v8_optimized_debug==1', {
+ 'GCC_OPTIMIZATION_LEVEL': '1', # -O1
+ 'GCC_STRICT_ALIASING': 'YES',
+ }, {
+ 'GCC_OPTIMIZATION_LEVEL': '0', # -O0
+ }],
+ ],
},
}],
],
}, # Debug
'Release': {
- 'variables': {
- 'v8_enable_extra_checks%': 0,
- },
'conditions': [
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags!': [
'-O2',
@@ -564,6 +534,7 @@
'-fdata-sections',
'-ffunction-sections',
'-O3',
+ '<(wno_array_bounds)',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
@@ -613,7 +584,7 @@
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'conditions': [
- ['OS=="win" and component=="shared_library"', {
+ ['component=="shared_library"', {
'RuntimeLibrary': '2', #/MD
}, {
'RuntimeLibrary': '0', #/MT
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index f432de0be8..e488aaa889 100755
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -245,8 +245,9 @@ class EXPORT Debug {
typedef void (*DebugMessageDispatchHandler)();
// Set a C debug event listener.
- static bool SetDebugEventListener(EventCallback that,
- Handle<Value> data = Handle<Value>());
+ V8_DEPRECATED(static bool SetDebugEventListener(
+ EventCallback that,
+ Handle<Value> data = Handle<Value>()));
static bool SetDebugEventListener2(EventCallback2 that,
Handle<Value> data = Handle<Value>());
@@ -274,8 +275,9 @@ class EXPORT Debug {
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
- static void SetMessageHandler(MessageHandler handler,
- bool message_handler_thread = false);
+ V8_DEPRECATED(static void SetMessageHandler(
+ MessageHandler handler,
+ bool message_handler_thread = false));
static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index df3bd9ff6b..cf28341300 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -181,18 +181,9 @@ class V8EXPORT CpuProfiler {
*/
int GetProfileCount();
- /** Deprecated. Use GetCpuProfile with single parameter. */
- V8_DEPRECATED(const CpuProfile* GetCpuProfile(
- int index,
- Handle<Value> security_token));
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(int index);
- /** Returns a profile by uid. */
- V8_DEPRECATED(const CpuProfile* FindCpuProfile(
- unsigned uid,
- Handle<Value> security_token = Handle<Value>()));
-
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
@@ -207,12 +198,6 @@ class V8EXPORT CpuProfiler {
void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/**
- * Deprecated. Use StopCpuProfiling with one parameter instead.
- */
- V8_DEPRECATED(const CpuProfile* StopCpuProfiling(
- Handle<String> title,
- Handle<Value> security_token));
- /**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 3afb83572a..9ce05831b4 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -2377,6 +2377,7 @@ class V8EXPORT Function : public Object {
};
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
#endif
@@ -2489,6 +2490,12 @@ class V8EXPORT ArrayBuffer : public Object {
};
+#ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
+#endif
+
+
/**
* A base class for an instance of one of "views" over ArrayBuffer,
* including TypedArrays and DataView (ES6 draft 15.13).
@@ -2516,6 +2523,9 @@ class V8EXPORT ArrayBufferView : public Object {
V8_INLINE(static ArrayBufferView* Cast(Value* obj));
+ static const int kInternalFieldCount =
+ V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
+
private:
ArrayBufferView();
static void CheckCast(Value* obj);
@@ -4689,6 +4699,12 @@ class V8EXPORT V8 {
*/
static int ContextDisposedNotification();
+ /**
+ * Initialize the ICU library bundled with V8. The embedder should only
+ * invoke this method when using the bundled ICU. Returns true on success.
+ */
+ static bool InitializeICU();
+
private:
V8();
@@ -5383,7 +5399,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 131;
+ static const int kEmptyStringRootIndex = 132;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5393,7 +5409,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xb0;
+ static const int kJSObjectType = 0xb1;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x88;
diff --git a/deps/v8/preparser/preparser.gyp b/deps/v8/preparser/preparser.gyp
index 598f0a511e..23cbfff644 100644
--- a/deps/v8/preparser/preparser.gyp
+++ b/deps/v8/preparser/preparser.gyp
@@ -29,7 +29,7 @@
'variables': {
'v8_code': 1,
},
- 'includes': ['../build/common.gypi'],
+ 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
'target_name': 'preparser',
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 214af057db..42048202fd 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -322,7 +322,9 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
return true;
}
+
int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
int result = RunMain(argc, argv);
v8::V8::Dispose();
return result;
@@ -421,6 +423,7 @@ void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(ReadLine());
}
+
v8::Handle<v8::String> ReadLine() {
const int kBufferSize = 1024 + 1;
char buffer[kBufferSize];
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 97eec14dc3..844aee3d45 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -54,6 +54,7 @@ class HttpRequest {
virtual const string& UserAgent() = 0;
};
+
/**
* The abstract superclass of http request processors.
*/
@@ -72,6 +73,7 @@ class HttpRequestProcessor {
static void Log(const char* event);
};
+
/**
* An http request processor that is scriptable using JavaScript.
*/
@@ -135,6 +137,7 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static Persistent<ObjectTemplate> map_template_;
};
+
// -------------------------
// --- P r o c e s s o r ---
// -------------------------
@@ -624,6 +627,7 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
map<string, string> options;
string file;
ParseOptions(argc, argv, options, &file);
diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp
index 8b1de7eb42..be7b9ea696 100644
--- a/deps/v8/samples/samples.gyp
+++ b/deps/v8/samples/samples.gyp
@@ -28,8 +28,9 @@
{
'variables': {
'v8_code': 1,
+ 'v8_enable_i18n_support%': 0,
},
- 'includes': ['../build/common.gypi'],
+ 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'target_defaults': {
'type': 'executable',
'dependencies': [
@@ -38,6 +39,19 @@
'include_dirs': [
'../include',
],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
+ '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ ],
+ }],
+ ],
},
'targets': [
{
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index a0af931b23..710547c341 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -66,6 +66,7 @@ static bool run_shell;
int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
run_shell = (argc == 1);
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index e441de47ee..51db3615c3 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -450,26 +450,23 @@ Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Isolate* isolate = Isolate::Current();
- JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
- if (function == NULL) return isolate->heap()->undefined_value();
- while (!function->should_have_prototype()) {
- function = FindInstanceOf<JSFunction>(isolate, function->GetPrototype());
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
+ if (function_raw == NULL) return isolate->heap()->undefined_value();
+ while (!function_raw->should_have_prototype()) {
+ function_raw = FindInstanceOf<JSFunction>(isolate,
+ function_raw->GetPrototype());
// There has to be one because we hit the getter.
- ASSERT(function != NULL);
+ ASSERT(function_raw != NULL);
}
- if (!function->has_prototype()) {
- Object* prototype;
- { MaybeObject* maybe_prototype
- = isolate->heap()->AllocateFunctionPrototype(function);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
- Object* result;
- { MaybeObject* maybe_result = function->SetPrototype(prototype);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ if (!function_raw->has_prototype()) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> function(function_raw);
+ Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
+ JSFunction::SetPrototype(function, proto);
+ function_raw = *function;
}
- return function->prototype();
+ return function_raw->prototype();
}
@@ -503,9 +500,7 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
old_value = isolate->factory()->NewFunctionPrototype(function);
}
- Handle<Object> result;
- MaybeObject* maybe_result = function->SetPrototype(*value);
- if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
+ JSFunction::SetPrototype(function, value);
ASSERT(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
@@ -581,6 +576,13 @@ const AccessorDescriptor Accessors::FunctionName = {
//
+Handle<Object> Accessors::FunctionGetArguments(Handle<Object> object) {
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate, Accessors::FunctionGetArguments(*object, 0), Object);
+}
+
+
static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 9a83ab8a85..ae56a3d444 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -77,14 +77,12 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
- void*);
static Handle<Object> FunctionGetPrototype(Handle<Object> object);
+ static Handle<Object> FunctionGetArguments(Handle<Object> object);
MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
Object* value,
void*);
- static MaybeObject* FunctionGetArguments(Object* object, void*);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
@@ -92,8 +90,10 @@ class Accessors : public AllStatic {
private:
// Accessor functions only used through the descriptor.
+ static MaybeObject* FunctionGetPrototype(Object* object, void*);
static MaybeObject* FunctionGetLength(Object* object, void*);
static MaybeObject* FunctionGetName(Object* object, void*);
+ static MaybeObject* FunctionGetArguments(Object* object, void*);
static MaybeObject* FunctionGetCaller(Object* object, void*);
MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
Object* value, void*);
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 638a25f317..c93b23c471 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -45,6 +45,7 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "heap-snapshot-generator-inl.h"
+#include "icu_util.h"
#include "messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include "natives.h"
@@ -298,6 +299,7 @@ static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
return (obj == 0) ? ReportEmptyHandle(location) : false;
}
+
// --- S t a t i c s ---
@@ -322,6 +324,7 @@ static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
}
+
// Some initializing API functions are called early and may be
// called on a thread different from static initializer thread.
// If Isolate API is used, Isolate::Enter() will initialize TLS so
@@ -401,6 +404,7 @@ enum CompressedStartupDataItems {
kCompressedStartupDataCount
};
+
int V8::GetCompressedStartupDataCount() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
return kCompressedStartupDataCount;
@@ -670,6 +674,7 @@ void V8::DisposeGlobal(i::Object** obj) {
i::GlobalHandles::Destroy(obj);
}
+
// --- H a n d l e s ---
@@ -4422,6 +4427,7 @@ bool String::IsOneByte() const {
return str->HasOnlyOneByteChars();
}
+
// Helpers for ContainsOnlyOneByteHelper
template<size_t size> struct OneByteMask;
template<> struct OneByteMask<4> {
@@ -4435,6 +4441,8 @@ static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1;
static inline bool Unaligned(const uint16_t* chars) {
return reinterpret_cast<const uintptr_t>(chars) & kAlignmentMask;
}
+
+
static inline const uint16_t* Align(const uint16_t* chars) {
return reinterpret_cast<uint16_t*>(
reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
@@ -5419,6 +5427,11 @@ int v8::V8::ContextDisposedNotification() {
}
+bool v8::V8::InitializeICU() {
+ return i::InitializeICU();
+}
+
+
const char* v8::V8::GetVersion() {
return i::Version::GetVersion();
}
@@ -6281,6 +6294,7 @@ bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
+
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
ApiCheck(!obj->is_external(),
@@ -6581,6 +6595,7 @@ v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
disallow_heap_allocation_ = new i::DisallowHeapAllocation();
}
+
v8::AssertNoGCScope::~AssertNoGCScope() {
delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
}
@@ -6644,6 +6659,7 @@ void V8::SetCounterFunction(CounterLookupCallback callback) {
isolate->stats_table()->SetCounterFunction(callback);
}
+
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
@@ -6652,6 +6668,7 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
isolate->counters()->ResetHistograms();
}
+
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
@@ -6999,6 +7016,7 @@ String::Value::~Value() {
i::DeleteArray(str_);
}
+
Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "RangeError");
@@ -7015,6 +7033,7 @@ Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
+
Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "ReferenceError");
@@ -7032,6 +7051,7 @@ Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
+
Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "SyntaxError");
@@ -7048,6 +7068,7 @@ Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
+
Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "TypeError");
@@ -7064,6 +7085,7 @@ Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
+
Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "Error");
@@ -7437,7 +7459,7 @@ void CpuProfile::Delete() {
i::CpuProfiler* profiler = isolate->cpu_profiler();
ASSERT(profiler != NULL);
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
- if (profiler->GetProfilesCount() == 0 && !profiler->HasDetachedProfiles()) {
+ if (profiler->GetProfilesCount() == 0) {
// If this was the last profile, clean up all accessory data as well.
profiler->DeleteAllProfiles();
}
@@ -7484,27 +7506,9 @@ int CpuProfiler::GetProfileCount() {
}
-const CpuProfile* CpuProfiler::GetCpuProfile(int index,
- Handle<Value> security_token) {
- return reinterpret_cast<const CpuProfile*>(
- reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- index));
-}
-
-
const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
return reinterpret_cast<const CpuProfile*>(
- reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(NULL, index));
-}
-
-
-const CpuProfile* CpuProfiler::FindCpuProfile(unsigned uid,
- Handle<Value> security_token) {
- return reinterpret_cast<const CpuProfile*>(
- reinterpret_cast<i::CpuProfiler*>(this)->FindProfile(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- uid));
+ reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
}
@@ -7514,19 +7518,9 @@ void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
}
-const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
- Handle<Value> security_token) {
- return reinterpret_cast<const CpuProfile*>(
- reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
- security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
- *Utils::OpenHandle(*title)));
-}
-
-
const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
- NULL,
*Utils::OpenHandle(*title)));
}
@@ -7562,6 +7556,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
isolate->factory()->InternalizeUtf8String(edge->name()));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
+ case i::HeapGraphEdge::kWeak:
return ToApiHandle<Number>(
isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
@@ -7808,6 +7803,7 @@ void Testing::SetStressRunType(Testing::StressType type) {
internal::Testing::set_stress_type(type);
}
+
int Testing::GetStressRuns() {
if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
#ifdef DEBUG
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index ad1d869415..ccbedd6d39 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -37,7 +37,7 @@ function CreateDate(time) {
}
-var kApiFunctionCache = {};
+var kApiFunctionCache = new InternalArray();
var functionCache = kApiFunctionCache;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 89c0a3b3cd..d95946e964 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -49,6 +49,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cache_line_size_ = 64;
ExternalReference ExternalReference::cpu_features() {
@@ -56,6 +57,7 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
+
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
@@ -124,6 +126,9 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << VFP3 |
static_cast<uint64_t>(1) << ARMv7;
}
+ if (FLAG_enable_neon) {
+ supported_ |= 1u << NEON;
+ }
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= static_cast<uint64_t>(1) << ARMv7;
@@ -156,6 +161,10 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << ARMv7;
}
+ if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
+ found_by_runtime_probing_only_ |= 1u << NEON;
+ }
+
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
@@ -170,12 +179,18 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
- if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
+ CpuImplementer implementer = OS::GetCpuImplementer();
+ if (implementer == QUALCOMM_IMPLEMENTER &&
FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
+ CpuPart part = OS::GetCpuPart(implementer);
+ if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
+ cache_line_size_ = 32;
+ }
+
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
&& OS::ArmCpuHasFeature(VFP32DREGS)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
@@ -246,11 +261,12 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
- "ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
+ "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(NEON),
CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
@@ -356,6 +372,7 @@ MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
am_ = am;
}
+
MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
rn_ = rn;
rm_ = rm;
@@ -376,6 +393,66 @@ MemOperand::MemOperand(Register rn, Register rm,
}
+NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
+ ASSERT((am == Offset) || (am == PostIndex));
+ rn_ = rn;
+ rm_ = (am == Offset) ? pc : sp;
+ SetAlignment(align);
+}
+
+
+NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
+ rn_ = rn;
+ rm_ = rm;
+ SetAlignment(align);
+}
+
+
+void NeonMemOperand::SetAlignment(int align) {
+ switch (align) {
+ case 0:
+ align_ = 0;
+ break;
+ case 64:
+ align_ = 1;
+ break;
+ case 128:
+ align_ = 2;
+ break;
+ case 256:
+ align_ = 3;
+ break;
+ default:
+ UNREACHABLE();
+ align_ = 0;
+ break;
+ }
+}
+
+
+NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
+ base_ = base;
+ switch (registers_count) {
+ case 1:
+ type_ = nlt_1;
+ break;
+ case 2:
+ type_ = nlt_2;
+ break;
+ case 3:
+ type_ = nlt_3;
+ break;
+ case 4:
+ type_ = nlt_4;
+ break;
+ default:
+ UNREACHABLE();
+ type_ = nlt_1;
+ break;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -677,6 +754,7 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
return instr & kOff12Mask;
}
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -1543,6 +1621,107 @@ void Assembler::bfi(Register dst,
}
+void Assembler::pkhbt(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond ) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+ // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+ // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src1.is(pc));
+ ASSERT(!src2.rm().is(pc));
+ ASSERT(!src2.rm().is(no_reg));
+ ASSERT(src2.rs().is(no_reg));
+ ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
+ ASSERT(src2.shift_op() == LSL);
+ emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+ src2.shift_imm_*B7 | B4 | src2.rm().code());
+}
+
+
+void Assembler::pkhtb(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.125.
+ // cond(31-28) | 01101000(27-20) | Rn(19-16) |
+ // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src1.is(pc));
+ ASSERT(!src2.rm().is(pc));
+ ASSERT(!src2.rm().is(no_reg));
+ ASSERT(src2.rs().is(no_reg));
+ ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
+ ASSERT(src2.shift_op() == ASR);
+ int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
+ emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
+ asr*B7 | B6 | B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb(Register dst,
+ const Operand& src,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.274.
+ // cond(31-28) | 01101110(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src.rm().is(pc));
+ ASSERT(!src.rm().is(no_reg));
+ ASSERT(src.rs().is(no_reg));
+ ASSERT((src.shift_imm_ == 0) ||
+ (src.shift_imm_ == 8) ||
+ (src.shift_imm_ == 16) ||
+ (src.shift_imm_ == 24));
+ ASSERT(src.shift_op() == ROR);
+ emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
+ ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
+void Assembler::uxtab(Register dst,
+ Register src1,
+ const Operand& src2,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.271.
+ // cond(31-28) | 01101110(27-20) | Rn(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src1.is(pc));
+ ASSERT(!src2.rm().is(pc));
+ ASSERT(!src2.rm().is(no_reg));
+ ASSERT(src2.rs().is(no_reg));
+ ASSERT((src2.shift_imm_ == 0) ||
+ (src2.shift_imm_ == 8) ||
+ (src2.shift_imm_ == 16) ||
+ (src2.shift_imm_ == 24));
+ ASSERT(src2.shift_op() == ROR);
+ emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
+ ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+}
+
+
+void Assembler::uxtb16(Register dst,
+ const Operand& src,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.275.
+ // cond(31-28) | 01101100(27-20) | 1111(19-16) |
+ // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+ ASSERT(!dst.is(pc));
+ ASSERT(!src.rm().is(pc));
+ ASSERT(!src.rm().is(no_reg));
+ ASSERT(src.rs().is(no_reg));
+ ASSERT((src.shift_imm_ == 0) ||
+ (src.shift_imm_ == 8) ||
+ (src.shift_imm_ == 16) ||
+ (src.shift_imm_ == 24));
+ ASSERT(src.shift_op() == ROR);
+ emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
+ ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+}
+
+
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
@@ -1640,6 +1819,26 @@ void Assembler::strd(Register src1, Register src2,
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
+
+// Preload instructions.
+void Assembler::pld(const MemOperand& address) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.128.
+ // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
+ // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
+ ASSERT(address.rm().is(no_reg));
+ ASSERT(address.am() == Offset);
+ int U = B23;
+ int offset = address.offset();
+ if (offset < 0) {
+ offset = -offset;
+ U = 0;
+ }
+ ASSERT(offset < 4096);
+ emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
+ 0xf*B12 | offset);
+}
+
+
// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
@@ -2074,6 +2273,7 @@ void Assembler::vstm(BlockAddrMode am,
0xA*B8 | count);
}
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
OS::MemCopy(&i, &d, 8);
@@ -2082,6 +2282,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
*hi = i >> 32;
}
+
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
@@ -2701,6 +2902,50 @@ void Assembler::vsqrt(const DwVfpRegister dst,
}
+// Support for NEON.
+
+void Assembler::vld1(NeonSize size,
+ const NeonListOperand& dst,
+ const NeonMemOperand& src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.320.
+ // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
+ // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+ ASSERT(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ dst.base().split_code(&vd, &d);
+ emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
+ dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
+}
+
+
+void Assembler::vst1(NeonSize size,
+ const NeonListOperand& src,
+ const NeonMemOperand& dst) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.404.
+ // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
+ // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
+ ASSERT(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ src.base().split_code(&vd, &d);
+ emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
+ size*B6 | dst.align()*B4 | dst.rm().code());
+}
+
+
+void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.346.
+ // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
+ // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsSupported(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
+ (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
+}
+
+
// Pseudo instructions.
void Assembler::nop(int type) {
// ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
@@ -2774,6 +3019,7 @@ void Assembler::RecordConstPool(int size) {
#endif
}
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -2894,6 +3140,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
+
void Assembler::RecordRelocInfo(double data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, data);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 3000860ba4..62dd94c078 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -78,12 +78,15 @@ class CpuFeatures : public AllStatic {
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static unsigned cache_line_size() { return cache_line_size_; }
+
private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_only_;
+ static unsigned cache_line_size_;
friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
@@ -301,6 +304,36 @@ struct DwVfpRegister {
typedef DwVfpRegister DoubleRegister;
+// Quad word NEON register.
+struct QwNeonRegister {
+ static const int kMaxNumRegisters = 16;
+
+ static QwNeonRegister from_code(int code) {
+ QwNeonRegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const {
+ return (0 <= code_) && (code_ < kMaxNumRegisters);
+ }
+ bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ void split_code(int* vm, int* m) const {
+ ASSERT(is_valid());
+ *m = (code_ & 0x10) >> 4;
+ *vm = code_ & 0x0F;
+ }
+
+ int code_;
+};
+
+
+typedef QwNeonRegister QuadRegister;
+
+
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
const SwVfpRegister s0 = { 0 };
@@ -370,6 +403,23 @@ const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
+const QwNeonRegister q0 = { 0 };
+const QwNeonRegister q1 = { 1 };
+const QwNeonRegister q2 = { 2 };
+const QwNeonRegister q3 = { 3 };
+const QwNeonRegister q4 = { 4 };
+const QwNeonRegister q5 = { 5 };
+const QwNeonRegister q6 = { 6 };
+const QwNeonRegister q7 = { 7 };
+const QwNeonRegister q8 = { 8 };
+const QwNeonRegister q9 = { 9 };
+const QwNeonRegister q10 = { 10 };
+const QwNeonRegister q11 = { 11 };
+const QwNeonRegister q12 = { 12 };
+const QwNeonRegister q13 = { 13 };
+const QwNeonRegister q14 = { 14 };
+const QwNeonRegister q15 = { 15 };
+
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
@@ -562,6 +612,42 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
+
+// Class NeonMemOperand represents a memory operand in load and
+// store NEON instructions
+class NeonMemOperand BASE_EMBEDDED {
+ public:
+ // [rn {:align}] Offset
+ // [rn {:align}]! PostIndex
+ explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
+
+ // [rn {:align}], rm PostIndex
+ explicit NeonMemOperand(Register rn, Register rm, int align = 0);
+
+ Register rn() const { return rn_; }
+ Register rm() const { return rm_; }
+ int align() const { return align_; }
+
+ private:
+ void SetAlignment(int align);
+
+ Register rn_; // base
+ Register rm_; // register increment
+ int align_;
+};
+
+
+// Class NeonListOperand represents a list of NEON registers
+class NeonListOperand BASE_EMBEDDED {
+ public:
+ explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
+ DoubleRegister base() const { return base_; }
+ NeonListType type() const { return type_; }
+ private:
+ DoubleRegister base_;
+ NeonListType type_;
+};
+
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@@ -866,6 +952,19 @@ class Assembler : public AssemblerBase {
void bfi(Register dst, Register src, int lsb, int width,
Condition cond = al);
+ void pkhbt(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void pkhtb(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void uxtb(Register dst, const Operand& src, Condition cond = al);
+
+ void uxtab(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+
+ void uxtb16(Register dst, const Operand& src, Condition cond = al);
+
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
@@ -887,6 +986,9 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
+ // Preload instructions
+ void pld(const MemOperand& address);
+
// Load/Store multiple instructions
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
@@ -1097,6 +1199,17 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
const Condition cond = al);
+ // Support for NEON.
+ // All these APIs support D0 to D31 and Q0 to Q15.
+
+ void vld1(NeonSize size,
+ const NeonListOperand& dst,
+ const NeonMemOperand& src);
+ void vst1(NeonSize size,
+ const NeonListOperand& src,
+ const NeonMemOperand& dst);
+ void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 69ba00ac5c..eff47e2692 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -200,7 +200,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
r3, // Scratch.
r4, // Scratch.
r5, // Scratch.
- false, // Is it a Smi?
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 6af5ccea38..7773667b7e 100755..100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -60,6 +60,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
}
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -226,8 +236,42 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r2, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r3, r1, r2 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
#define __ ACCESS_MASM(masm)
+
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cond);
@@ -892,17 +936,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Now that we have the types we might as well check for
// internalized-internalized.
- Label not_internalized;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(r2, r2, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ cmp(r2, Operand(kInternalizedTag | kStringTag));
- __ b(ne, &not_internalized); // r2 (rhs) is not an internalized string
-
- __ and_(r3, r3, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ cmp(r3, Operand(kInternalizedTag | kStringTag));
- __ b(eq, &return_not_equal); // both rhs and lhs are internalized strings
-
- __ bind(&not_internalized);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orr(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ b(eq, &return_not_equal);
}
@@ -943,15 +980,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
// r2 is object type of rhs.
Label object_test;
- STATIC_ASSERT(kInternalizedTag != 0);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
- __ tst(r2, Operand(kIsInternalizedMask));
- __ b(eq, possible_strings);
+ __ tst(r2, Operand(kIsNotInternalizedMask));
+ __ b(ne, possible_strings);
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsInternalizedMask));
- __ b(eq, possible_strings);
+ __ tst(r3, Operand(kIsNotInternalizedMask));
+ __ b(ne, possible_strings);
// Both are internalized. We already checked they weren't the same pointer
// so they are not equal.
@@ -982,7 +1019,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- bool object_is_smi,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@@ -1005,40 +1041,38 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
Label is_smi;
Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- }
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ __ eor(scratch1, scratch1, Operand(scratch2));
+ __ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ __ ldr(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ sub(scratch2, object, Operand(kHeapObjectTag));
+ __ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ __ sub(probe, probe, Operand(kHeapObjectTag));
+ __ vldr(d1, probe, HeapNumber::kValueOffset);
+ __ VFPCompareAndSetFlags(d0, d1);
+ __ b(ne, not_found); // The cache did not contain this value.
+ __ b(&load_result_from_cache);
__ bind(&is_smi);
Register scratch = scratch1;
@@ -1050,7 +1084,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Operand(scratch, LSL, kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- Register probe = mask;
__ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(object, probe);
__ b(ne, not_found);
@@ -1072,7 +1105,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
+ GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime);
__ add(sp, sp, Operand(1 * kPointerSize));
__ Ret();
@@ -1289,277 +1322,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ mov(r3, Operand(r0)); // the operand
- __ mov(r2, Operand(Smi::FromInt(op_)));
- __ mov(r1, Operand(Smi::FromInt(mode_)));
- __ mov(r0, Operand(Smi::FromInt(operand_type_)));
- __ Push(r3, r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, slow);
-
- // Return '0 - value'.
- __ rsb(r0, r0, Operand::Zero());
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (mode_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
-
- __ bind(&heapnumber_allocated);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
-
- // Convert the heap number in r0 to an untagged integer in r1.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ECMAToInt32(r1, d0, r2, r3, r4, d1);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ cmn(r1, Operand(0x40000000));
- __ b(mi, &try_float);
-
- // Tag the result as a smi and we're done.
- __ SmiTag(r0, r1);
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the lower bit of the result (left shifted to look like a smi).
- __ mov(r2, Operand(r1, LSL, 31));
- // Push the 31 high bits (bit 0 cleared to look like a smi).
- __ bic(r1, r1, Operand(1));
- __ Push(r2, r1);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ Pop(r2, r1); // Restore the result.
- __ orr(r1, r1, Operand(r2, LSR, 31));
- }
- __ bind(&heapnumber_allocated);
- }
-
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
// Generates code to call a C function to do a double operation.
// This code never falls through, but returns with a heap number containing
// the result in r0.
@@ -2135,8 +1897,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- StringAddStub string_add_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -2494,8 +2256,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &left_not_string);
- StringAddStub string_add_left_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2505,8 +2267,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- StringAddStub string_add_right_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -3001,6 +2763,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -3640,7 +3403,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3671,7 +3435,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
support_wrapper_);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3741,7 +3506,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -4649,20 +4415,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// function without changing the state.
__ cmp(r3, r1);
__ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- __ JumpIfNotSmi(r3, &miss);
- if (FLAG_debug_code) {
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(r3, Operand(terminal_kind_sentinel));
- __ Assert(le, "Array function sentinel is not an ElementsKind");
- }
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ ldr(r5, FieldMemOperand(r3, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
// Make sure the function is the Array() function
__ LoadArrayFunction(r3);
@@ -4691,14 +4454,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ cmp(r1, r3);
__ b(ne, &not_array_function);
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- GetInitialFastElementsKind());
- __ mov(r3, Operand(initial_kind_sentinel));
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ __ push(r0);
+ __ push(r1);
+ __ push(r2);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(r2);
+ __ pop(r1);
+ __ pop(r0);
+ }
__ b(&done);
__ bind(&not_array_function);
@@ -5723,7 +5494,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfEitherSmi(r0, r1, &call_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -5735,20 +5510,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -5796,7 +5567,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(ne, &longer_than_two);
// Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5844,7 +5615,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5927,7 +5698,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r6: sum of lengths.
Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -6015,7 +5786,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm);
// Build a frame
{
@@ -6030,7 +5801,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm);
// Build a frame
{
@@ -6082,7 +5853,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
- false,
&not_cached);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
@@ -6223,14 +5993,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
-
- __ and_(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ cmp(tmp1, Operand(kInternalizedTag | kStringTag));
- __ b(ne, &miss);
-
- __ and_(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ cmp(tmp2, Operand(kInternalizedTag | kStringTag));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orr(tmp1, tmp1, Operand(tmp2));
+ __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
__ b(ne, &miss);
// Internalized strings are compared by identity.
@@ -6264,7 +6029,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
@@ -6330,13 +6094,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// strings.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsInternalizedMask));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ orr(tmp3, tmp1, Operand(tmp2));
+ __ tst(tmp3, Operand(kIsNotInternalizedMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(r0));
- __ Ret(ne);
+ __ Ret(eq);
}
// Check that both strings are sequential ASCII.
@@ -6711,6 +6475,7 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+
#define REG(Name) { kRegister_ ## Name ## _Code }
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
@@ -7181,10 +6946,6 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
Label normal_sequence;
@@ -7196,18 +6957,19 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ b(eq, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ add(r3, r3, Operand(1));
- __ cmp(r2, Operand(undefined_sentinel));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &normal_sequence);
-
- // The type cell may have gone megamorphic, don't overwrite if so
- __ ldr(r5, FieldMemOperand(r2, kPointerSize));
- __ JumpIfNotSmi(r5, &normal_sequence);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ ldr(r5, FieldMemOperand(r5, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &normal_sequence);
// Save the resulting elements kind in type info
__ SmiTag(r3);
- __ str(r3, FieldMemOperand(r2, kPointerSize));
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
__ bind(&normal_sequence);
@@ -7236,7 +6998,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7277,10 +7039,6 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -7296,7 +7054,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// We should either have undefined in ebx or a valid cell
Label okay_here;
Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ cmp(r2, Operand(undefined_sentinel));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &okay_here);
__ ldr(r3, FieldMemOperand(r2, 0));
__ cmp(r3, Operand(cell_map));
@@ -7306,10 +7064,20 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
- __ cmp(r2, Operand(undefined_sentinel));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
- __ JumpIfNotSmi(r3, &no_info);
+
+ // The type cell may have undefined in its value.
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &no_info);
+
+ // The type cell has either an AllocationSite or a JSFunction
+ __ ldr(r4, FieldMemOperand(r3, 0));
+ __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &no_info);
+
+ __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 1f663f52e9..6eab8d128e 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -80,71 +80,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -209,21 +144,6 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 1 << 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
- // Stub needs a frame before calling the runtime
- ERECT_FRAME = 1 << 3,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -352,7 +272,6 @@ class NumberToStringStub: public PlatformCodeStub {
Register scratch1,
Register scratch2,
Register scratch3,
- bool object_is_smi,
Label* not_found);
private:
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 60de5fc4f7..7559373ee9 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -112,6 +112,252 @@ UnaryMathFunction CreateExpFunction() {
#endif
}
+#if defined(V8_HOST_ARCH_ARM)
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ OS::MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ return stub;
+ }
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ Register temp1 = r3;
+ Label less_4;
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
+ Label size_less_than_8;
+ __ pld(MemOperand(src, 0));
+
+ __ cmp(chars, Operand(8));
+ __ b(lt, &size_less_than_8);
+ __ cmp(chars, Operand(32));
+ __ b(lt, &less_32);
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 32));
+ }
+ __ cmp(chars, Operand(64));
+ __ b(lt, &less_64);
+ __ pld(MemOperand(src, 64));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 96));
+ }
+ __ cmp(chars, Operand(128));
+ __ b(lt, &less_128);
+ __ pld(MemOperand(src, 128));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 160));
+ }
+ __ pld(MemOperand(src, 192));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 224));
+ }
+ __ cmp(chars, Operand(256));
+ __ b(lt, &less_256);
+ __ sub(chars, chars, Operand(256));
+
+ __ bind(&loop);
+ __ pld(MemOperand(src, 256));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ if (CpuFeatures::cache_line_size() == 32) {
+ __ pld(MemOperand(src, 256));
+ }
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(64), SetCC);
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ b(ge, &loop);
+ __ add(chars, chars, Operand(256));
+
+ __ bind(&less_256);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(128));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+ __ cmp(chars, Operand(64));
+ __ b(lt, &less_64);
+
+ __ bind(&less_128);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
+ __ sub(chars, chars, Operand(64));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
+
+ __ bind(&less_64);
+ __ cmp(chars, Operand(32));
+ __ b(lt, &less_32);
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(32));
+
+ __ bind(&less_32);
+ __ cmp(chars, Operand(16));
+ __ b(le, &_16_or_less);
+ __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(16));
+
+ __ bind(&_16_or_less);
+ __ cmp(chars, Operand(8));
+ __ b(le, &_8_or_less);
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+ __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
+ __ sub(chars, chars, Operand(8));
+
+ // Do a last copy which may overlap with the previous copy (up to 8 bytes).
+ __ bind(&_8_or_less);
+ __ rsb(chars, chars, Operand(8));
+ __ sub(src, src, Operand(chars));
+ __ sub(dest, dest, Operand(chars));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+ __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
+
+ __ Ret();
+
+ __ bind(&size_less_than_8);
+
+ __ bic(temp1, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ } else {
+ Register temp2 = ip;
+ Label loop;
+
+ __ bic(temp2, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ add(temp2, dest, temp2);
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+ }
+
+ __ bind(&less_4);
+ __ mov(chars, Operand(chars, LSL, 31), SetCC);
+ // bit0 => Z (ne), bit1 => C (cs)
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
+ __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strb(temp1, MemOperand(dest), ne);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
+#endif
+}
+
+
+// Convert 8 to 16. The number of character to copy must be at least 8.
+OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ OS::MemCopyUint16Uint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ return stub;
+ }
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ if (CpuFeatures::IsSupported(NEON)) {
+ Register temp = r3;
+ Label loop;
+
+ __ bic(temp, chars, Operand(0x7));
+ __ sub(chars, chars, Operand(temp));
+ __ add(temp, dest, Operand(temp, LSL, 1));
+
+ __ bind(&loop);
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
+ __ vmovl(NeonU8, q0, d0);
+ __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
+ __ cmp(dest, temp);
+ __ b(&loop, ne);
+
+ // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
+ __ rsb(chars, chars, Operand(8));
+ __ sub(src, src, Operand(chars));
+ __ sub(dest, dest, Operand(chars, LSL, 1));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
+ __ vmovl(NeonU8, q0, d0);
+ __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
+ __ Ret();
+ } else {
+ Register temp1 = r3;
+ Register temp2 = ip;
+ Register temp3 = lr;
+ Register temp4 = r4;
+ Label loop;
+ Label not_two;
+
+ __ Push(lr, r4);
+ __ bic(temp2, chars, Operand(0x3));
+ __ add(temp2, dest, Operand(temp2, LSL, 1));
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ uxtb16(temp3, Operand(temp1, ROR, 0));
+ __ uxtb16(temp4, Operand(temp1, ROR, 8));
+ __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
+ __ str(temp1, MemOperand(dest));
+ __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
+ __ str(temp1, MemOperand(dest, 4));
+ __ add(dest, dest, Operand(8));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+
+ __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
+ __ b(&not_two, cc);
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex));
+ __ uxtb(temp3, Operand(temp1, ROR, 8));
+ __ mov(temp3, Operand(temp3, LSL, 16));
+ __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+ __ str(temp3, MemOperand(dest, 4, PostIndex));
+ __ bind(&not_two);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strh(temp1, MemOperand(dest), ne);
+ __ Pop(pc, r4);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+ return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
+#endif
+}
+#endif
#undef __
@@ -120,6 +366,7 @@ UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -144,7 +391,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -154,9 +401,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- r4 : scratch (elements)
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, allocation_site_info_found);
+ ASSERT(allocation_memento_found != NULL);
+ __ TestJSArrayForAllocationMemento(r2, r4);
+ __ b(eq, allocation_memento_found);
}
// Set transitioned map.
@@ -185,7 +432,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
+ __ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, fail);
}
@@ -311,7 +558,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
+ __ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, fail);
}
@@ -591,7 +838,7 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ patcher.masm()->nop(ip.code());
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
initialized = true;
}
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index e21055fee4..9bfccf822b 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -33,22 +33,6 @@
#error ARM EABI support is required.
#endif
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#ifndef CAN_USE_VFP3_INSTRUCTIONS
-# define CAN_USE_VFP3_INSTRUCTIONS
-#endif
-#endif
-
-// Simulator should support unaligned access by default.
-#if !defined(__arm__)
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-# define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-#endif
-
namespace v8 {
namespace internal {
@@ -331,6 +315,32 @@ enum LFlag {
};
+// NEON data type
+enum NeonDataType {
+ NeonS8 = 0x1, // U = 0, imm3 = 0b001
+ NeonS16 = 0x2, // U = 0, imm3 = 0b010
+ NeonS32 = 0x4, // U = 0, imm3 = 0b100
+ NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
+ NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
+ NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
+ NeonDataTypeSizeMask = 0x7,
+ NeonDataTypeUMask = 1 << 24
+};
+
+enum NeonListType {
+ nlt_1 = 0x7,
+ nlt_2 = 0xA,
+ nlt_3 = 0x6,
+ nlt_4 = 0x2
+};
+
+enum NeonSize {
+ Neon8 = 0x0,
+ Neon16 = 0x1,
+ Neon32 = 0x2,
+ Neon64 = 0x4
+};
+
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@@ -573,6 +583,7 @@ class Instruction {
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int TypeValue() const { return Bits(27, 25); }
+ inline int SpecialValue() const { return Bits(27, 23); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index ea3287aa33..780bafb755 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 16;
+const int Deoptimizer::table_entry_size_ = 12;
int Deoptimizer::patch_size() {
@@ -465,22 +465,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (r3) (return
+ // Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
- if (type() == EAGER || type() == SOFT) {
- __ mov(r3, Operand::Zero());
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(r3, lr);
+ // Correct one word for bailout id.
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@@ -521,13 +511,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ vstr(d0, r1, dst_offset);
}
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == SOFT || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@@ -636,18 +621,12 @@ void Deoptimizer::EntryGenerator::Generate() {
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- if (type() == EAGER || type() == SOFT) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 6101bec947..fd986fd656 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -113,6 +113,8 @@ class Decoder {
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
+ void FormatNeonList(int Vd, int type);
+ void FormatNeonMemory(int Rn, int align, int Rm);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@@ -133,6 +135,8 @@ class Decoder {
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
+ void DecodeSpecialCondition(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@@ -187,11 +191,13 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
+
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(VFPRegisters::Name(reg, false));
}
+
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(VFPRegisters::Name(reg, true));
@@ -417,6 +423,41 @@ int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
}
+void Decoder::FormatNeonList(int Vd, int type) {
+ if (type == nlt_1) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d}", Vd);
+ } else if (type == nlt_2) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d}", Vd, Vd + 1);
+ } else if (type == nlt_3) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
+ } else if (type == nlt_4) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+ }
+}
+
+
+void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "[r%d", Rn);
+ if (align != 0) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ":%d", (1 << align) << 6);
+ }
+ if (Rm == 15) {
+ Print("]");
+ } else if (Rm == 13) {
+ Print("]!");
+ } else {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "], r%d", Rm);
+ }
+}
+
+
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
@@ -980,15 +1021,107 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
}
case ia_x: {
- if (instr->HasW()) {
- VERIFY(instr->Bits(5, 4) == 0x1);
- if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ if (instr->Bit(4) == 0) {
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
+ } else {
+ if (instr->Bits(11, 7) == 0) {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
+ } else {
+ Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case 1:
+ UNREACHABLE();
+ break;
+ case 2:
+ UNREACHABLE();
+ break;
+ case 3:
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
+ break;
+ }
} else {
- UNREACHABLE(); // SSAT.
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ UNREACHABLE();
+ break;
+ case 1:
+ UNREACHABLE();
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+ break;
+ case 1:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case 3:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+ break;
+ case 1:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+ break;
+ }
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 0:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+ break;
+ case 1:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+ break;
+ case 2:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+ break;
+ case 3:
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+ break;
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
}
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
}
break;
}
@@ -1421,6 +1554,91 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
+
+void Decoder::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vst1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ int Rm = instr->VmValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vld1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ int Rn = instr->Bits(19, 16);
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d]", Rn);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #-%d]", Rn, offset);
+ } else {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [r%d, #+%d]", Rn, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
+ default:
+ Unknown(instr);
+ break;
+ }
+}
+
#undef VERIFIY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
@@ -1447,7 +1665,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
- Unknown(instr);
+ DecodeSpecialCondition(instr);
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 41f02be259..6a5845de43 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -169,9 +169,7 @@ void FullCodeGenerator::Generate() {
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ nop(ip.code());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
@@ -181,8 +179,11 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
+ if (locals_count > 0) {
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
+ }
}
}
@@ -3718,7 +3719,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -4366,10 +4367,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
// TODO(svenpanne): Allowing format strings in Comment would be nice here...
Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
+ UnaryOpStub stub(expr->op());
// UnaryOpStub expects the argument to be in the
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
@@ -4438,7 +4436,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ JumpIfSmi(r0, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ JumpIfSmi(r0, &no_conversion);
+ }
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 89ebfde668..ee28d28198 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -325,9 +325,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// bit test is enough.
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ tst(hash, Operand(kIsInternalizedMask));
- __ b(eq, not_unique);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ tst(hash, Operand(kIsNotInternalizedMask));
+ __ b(ne, not_unique);
__ bind(&unique);
}
@@ -1230,8 +1230,8 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(r0, r2);
__ Ret();
@@ -1253,8 +1253,8 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(r0, r2);
__ Ret();
@@ -1384,8 +1384,8 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1398,7 +1398,7 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -1414,7 +1414,7 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1531,8 +1531,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index b08353e069..b68d22f336 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -182,7 +182,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -272,6 +272,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
+ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) {
+ switch (hydrogen()->known_list()) {
+ case HLinkObjectInList::ALLOCATION_SITE_LIST:
+ return ExternalReference::allocation_sites_list_address(isolate);
+ }
+
+ UNREACHABLE();
+ // Return a dummy value
+ return ExternalReference::isolate_address(isolate);
+}
+
+
+void LLinkObjectInList::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" offset %d", hydrogen()->store_field().offset());
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -325,7 +343,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -1685,8 +1702,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
@@ -1694,14 +1711,14 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@@ -1998,6 +2015,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
+ return new(zone())
+ LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
+}
+
+
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -2102,6 +2131,13 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LLinkObjectInList* result = new(zone()) LLinkObjectInList(object);
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2389,14 +2425,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2419,14 +2447,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2599,14 +2619,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), r0);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 39cab8fdb3..5165f1bbb6 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -49,7 +49,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -81,7 +80,7 @@ class LCodeGen;
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -92,7 +91,6 @@ class LCodeGen;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
@@ -106,7 +104,6 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
@@ -118,10 +115,12 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
+ V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@@ -719,9 +718,9 @@ class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -729,8 +728,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -925,6 +925,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNumberAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+};
+
+
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1671,6 +1684,23 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
+class LLinkObjectInList: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LLinkObjectInList(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ ExternalReference GetReference(Isolate* isolate);
+
+ DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list")
+ DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2450,21 +2480,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@@ -2551,20 +2566,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2586,20 +2587,6 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 272db157a8..9e0d59f8ec 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -161,9 +161,7 @@ bool LCodeGen::GeneratePrologue() {
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ nop(ip.code());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
@@ -343,8 +341,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
Label table_start;
__ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
@@ -357,45 +354,24 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
if (deopt_jump_table_[i].needs_frame) {
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ b(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, ip);
- }
+ if (needs_frame.is_bound()) {
+ __ b(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ b(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(pc, ip);
- }
- }
- } else {
- if (type == Deoptimizer::LAZY) {
+ __ bind(&needs_frame);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
__ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- } else {
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ __ mov(pc, ip);
}
+ } else {
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
masm()->CheckConstPool(false, false);
}
@@ -794,7 +770,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_deopt_every_n_times == 1 &&
!info()->IsStub() &&
info()->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@@ -803,13 +780,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@@ -1069,11 +1041,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -2130,12 +2097,12 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@@ -2153,6 +2120,25 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
+void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32() || r.IsDouble()) {
+ EmitBranch(instr, al);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsTaggedNumber()) {
+ EmitBranch(instr, al);
+ }
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq);
+ }
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2329,7 +2315,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Condition cond = TokenToCondition(instr->op(), false);
@@ -2937,6 +2923,19 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
}
+void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) {
+ Register object = ToRegister(instr->object());
+ ExternalReference sites_list_address = instr->GetReference(isolate());
+
+ __ mov(ip, Operand(sites_list_address));
+ __ ldr(ip, MemOperand(ip));
+ __ str(ip, FieldMemOperand(object,
+ instr->hydrogen()->store_field().offset()));
+ __ mov(ip, Operand(sites_list_address));
+ __ str(object, MemOperand(ip));
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -4123,7 +4122,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
@@ -4527,7 +4526,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
+ __ TestJSArrayForAllocationMemento(object, temp);
DeoptimizeIf(eq, instr->environment());
}
@@ -4535,7 +4534,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub stub(instr->hydrogen()->flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -5321,80 +5320,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ str(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ mov(r0, Operand(Smi::FromInt(instance_size)));
- __ push(r0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5712,33 +5637,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 075fb416c7..b0390ee445 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -150,7 +150,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 352fbb90ca..902817e140 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -219,7 +219,6 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
-
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
@@ -255,6 +254,10 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ Vmov(result, v, ip);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index cce20ffd6a..8416926b46 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1033,6 +1033,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
+
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
@@ -3092,11 +3093,14 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::JumpIfNotUniqueName(Register reg,
Label* not_unique_name) {
- STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
- cmp(reg, Operand(kInternalizedTag));
- b(lt, not_unique_name);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ b(eq, &succeed);
cmp(reg, Operand(SYMBOL_TYPE));
- b(gt, not_unique_name);
+ b(ne, not_unique_name);
+
+ bind(&succeed);
}
@@ -3746,26 +3750,26 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
}
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg) {
- Label no_info_available;
+ Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
add(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Operand(new_space_start));
- b(lt, &no_info_available);
+ b(lt, &no_memento_available);
mov(ip, Operand(new_space_allocation_top));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
- b(gt, &no_info_available);
- ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
+ b(gt, &no_memento_available);
+ ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
+ Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ bind(&no_memento_available);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index b76ebd590e..747dd3b882 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -1334,14 +1334,14 @@ class MacroAssembler: public Assembler {
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to eq
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg);
private:
void CallCFunctionHelper(Register function,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 238632aea0..c47f2ab80c 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -919,6 +919,54 @@ void Simulator::set_dw_register(int dreg, const int* dbl) {
}
+void Simulator::get_d_register(int dreg, uint64_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
+}
+
+
+void Simulator::set_d_register(int dreg, const uint64_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
+}
+
+
+void Simulator::get_d_register(int dreg, uint32_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_d_register(int dreg, const uint32_t* value) {
+ ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+ memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint64_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint64_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+}
+
+
+void Simulator::get_q_register(int qreg, uint32_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
+}
+
+
+void Simulator::set_q_register(int qreg, const uint32_t* value) {
+ ASSERT((qreg >= 0) && (qreg < num_q_registers));
+ memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
@@ -1026,6 +1074,7 @@ void Simulator::TrashCallerSaveRegisters() {
registers_[12] = 0x50Bad4U;
}
+
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
@@ -1485,11 +1534,11 @@ static int count_bits(int bit_vector) {
}
-void Simulator::ProcessPUW(Instruction* instr,
- int num_regs,
- int reg_size,
- intptr_t* start_address,
- intptr_t* end_address) {
+int32_t Simulator::ProcessPU(Instruction* instr,
+ int num_regs,
+ int reg_size,
+ intptr_t* start_address,
+ intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
switch (instr->PUField()) {
@@ -1520,11 +1569,10 @@ void Simulator::ProcessPUW(Instruction* instr,
break;
}
}
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
+ return rn_val;
}
+
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
@@ -1532,7 +1580,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
intptr_t start_address = 0;
intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
+ int32_t rn_val =
+ ProcessPU(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
@@ -1551,6 +1600,9 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
rlist >>= 1;
}
ASSERT(end_address == ((intptr_t)address) - 4);
+ if (instr->HasW()) {
+ set_register(instr->RnValue(), rn_val);
+ }
}
@@ -1573,7 +1625,8 @@ void Simulator::HandleVList(Instruction* instr) {
intptr_t start_address = 0;
intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
+ int32_t rn_val =
+ ProcessPU(instr, num_regs, operand_size, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
for (int reg = vd; reg < vd + num_regs; reg++) {
@@ -1606,6 +1659,9 @@ void Simulator::HandleVList(Instruction* instr) {
}
}
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+ if (instr->HasW()) {
+ set_register(instr->RnValue(), rn_val);
+ }
}
@@ -1954,6 +2010,7 @@ double Simulator::canonicalizeNaN(double value) {
FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
}
+
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
@@ -2596,36 +2653,148 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
}
case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
-
- if (instr->Bit(22) == 0x1) { // USAT.
- int32_t sat_pos = instr->Bits(20, 16);
- int32_t sat_val = (1 << sat_pos) - 1;
- int32_t shift = instr->Bits(11, 7);
- int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmValue());
- if (shift_type == 0) { // LSL
- rm_val <<= shift;
- } else { // ASR
- rm_val >>= shift;
+ if (instr->Bit(4) == 0) {
+ // Memop.
+ } else {
+ if (instr->Bit(5) == 0) {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(6) == 0) {
+ // Pkhbt.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t shift = instr->Bits(11, 7);
+ rm_val <<= shift;
+ set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
+ } else {
+ // Pkhtb.
+ uint32_t rn_val = get_register(rn);
+ int32_t rm_val = get_register(instr->RmValue());
+ int32_t shift = instr->Bits(11, 7);
+ if (shift == 0) {
+ shift = 32;
+ }
+ rm_val >>= shift;
+ set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 1:
+ UNIMPLEMENTED();
+ break;
+ case 2:
+ UNIMPLEMENTED();
+ break;
+ case 3: {
+ // Usat.
+ int32_t sat_pos = instr->Bits(20, 16);
+ int32_t sat_val = (1 << sat_pos) - 1;
+ int32_t shift = instr->Bits(11, 7);
+ int32_t shift_type = instr->Bit(6);
+ int32_t rm_val = get_register(instr->RmValue());
+ if (shift_type == 0) { // LSL
+ rm_val <<= shift;
+ } else { // ASR
+ rm_val >>= shift;
+ }
+ // If saturation occurs, the Q flag should be set in the CPSR.
+ // There is no Q flag yet, and no instruction (MRS) to read the
+ // CPSR directly.
+ if (rm_val > sat_val) {
+ rm_val = sat_val;
+ } else if (rm_val < 0) {
+ rm_val = 0;
+ }
+ set_register(rd, rm_val);
+ break;
+ }
}
- // If saturation occurs, the Q flag should be set in the CPSR.
- // There is no Q flag yet, and no instruction (MRS) to read the
- // CPSR directly.
- if (rm_val > sat_val) {
- rm_val = sat_val;
- } else if (rm_val < 0) {
- rm_val = 0;
+ } else {
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ UNIMPLEMENTED();
+ break;
+ case 1:
+ UNIMPLEMENTED();
+ break;
+ case 2:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb16.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd,
+ (rm_val & 0xFF) | (rm_val & 0xFF0000));
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 3:
+ if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
+ if (instr->Bits(19, 16) == 0xF) {
+ // Uxtb.
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, (rm_val & 0xFF));
+ } else {
+ // Uxtab.
+ uint32_t rn_val = get_register(rn);
+ uint32_t rm_val = get_register(instr->RmValue());
+ int32_t rotate = instr->Bits(11, 10);
+ switch (rotate) {
+ case 0:
+ break;
+ case 1:
+ rm_val = (rm_val >> 8) | (rm_val << 24);
+ break;
+ case 2:
+ rm_val = (rm_val >> 16) | (rm_val << 16);
+ break;
+ case 3:
+ rm_val = (rm_val >> 24) | (rm_val << 8);
+ break;
+ }
+ set_register(rd, rn_val + (rm_val & 0xFF));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
}
- set_register(rd, rm_val);
- } else { // SSAT.
- UNIMPLEMENTED();
}
return;
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- UNIMPLEMENTED();
}
break;
}
@@ -3349,6 +3518,156 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
+void Simulator::DecodeSpecialCondition(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 5:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl signed
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ int8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ int16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 7:
+ if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
+ (instr->Bit(4) == 1)) {
+ // vmovl unsigned
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Vm = (instr->Bit(5) << 4) | instr->VmValue();
+ int imm3 = instr->Bits(21, 19);
+ if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
+ int esize = 8 * imm3;
+ int elements = 64 / esize;
+ uint8_t from[8];
+ get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
+ uint16_t to[8];
+ int e = 0;
+ while (e < elements) {
+ to[e] = from[e];
+ e++;
+ }
+ set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 8:
+ if (instr->Bits(21, 20) == 0) {
+ // vst1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ get_d_register(Vd + r, data);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else if (instr->Bits(21, 20) == 2) {
+ // vld1
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int type = instr->Bits(11, 8);
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int regs = 0;
+ switch (type) {
+ case nlt_1:
+ regs = 1;
+ break;
+ case nlt_2:
+ regs = 2;
+ break;
+ case nlt_3:
+ regs = 3;
+ break;
+ case nlt_4:
+ regs = 4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ int r = 0;
+ while (r < regs) {
+ uint32_t data[2];
+ data[0] = ReadW(address, instr);
+ data[1] = ReadW(address + 4, instr);
+ set_d_register(Vd + r, data);
+ address += 8;
+ r++;
+ }
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ // pld: ignore instruction.
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3365,7 +3684,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
+ DecodeSpecialCondition(instr);
} else if (ConditionallyExecute(instr)) {
switch (instr->TypeValue()) {
case 0:
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 45ae999b57..7fca7432bf 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -144,7 +144,10 @@ class Simulator {
d8, d9, d10, d11, d12, d13, d14, d15,
d16, d17, d18, d19, d20, d21, d22, d23,
d24, d25, d26, d27, d28, d29, d30, d31,
- num_d_registers = 32
+ num_d_registers = 32,
+ q0 = 0, q1, q2, q3, q4, q5, q6, q7,
+ q8, q9, q10, q11, q12, q13, q14, q15,
+ num_q_registers = 16
};
explicit Simulator(Isolate* isolate);
@@ -163,6 +166,15 @@ class Simulator {
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
+ void get_d_register(int dreg, uint64_t* value);
+ void set_d_register(int dreg, const uint64_t* value);
+ void get_d_register(int dreg, uint32_t* value);
+ void set_d_register(int dreg, const uint32_t* value);
+ void get_q_register(int qreg, uint64_t* value);
+ void set_q_register(int qreg, const uint64_t* value);
+ void get_q_register(int qreg, uint32_t* value);
+ void set_q_register(int qreg, const uint32_t* value);
+
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
@@ -279,11 +291,11 @@ class Simulator {
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
- void ProcessPUW(Instruction* instr,
- int num_regs,
- int operand_size,
- intptr_t* start_address,
- intptr_t* end_address);
+ int32_t ProcessPU(Instruction* instr,
+ int num_regs,
+ int operand_size,
+ intptr_t* start_address,
+ intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
@@ -328,6 +340,7 @@ class Simulator {
// Support for VFP.
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
+ void DecodeSpecialCondition(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index c154f9add4..d7b1b55c20 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -437,91 +437,58 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
+void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
// Generate StoreTransition code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* miss_restore_name,
- Label* slow) {
+void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// r0 : value
Label exit;
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1, miss_label);
-
- // Check that we are allowed to write this.
- if (object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- // holder == object indicates that no property was found.
- if (lookup->holder() != *object) {
- holder = lookup->holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- Register holder_reg = CheckPrototypes(
- object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
- // If no property was found, and the holder (the last object in the
- // prototype chain) is in slow mode, we need to do a negative lookup on the
- // holder.
- if (lookup->holder() == *object) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm,
- Handle<GlobalObject>(GlobalObject::cast(holder)),
- name,
- scratch1,
- miss_restore_name);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
- }
- }
- }
-
- Register storage_reg = name_reg;
-
if (details.type() == CONSTANT_FUNCTION) {
Handle<HeapObject> constant(
HeapObject::cast(descriptors->GetValue(descriptor)));
__ LoadHeapObject(scratch1, constant);
__ cmp(value_reg, scratch1);
- __ b(ne, miss_restore_name);
+ __ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_restore_name);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_restore_name);
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@@ -535,7 +502,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_restore_name, DONT_DO_SMI_CHECK);
+ miss_label, DONT_DO_SMI_CHECK);
__ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -566,8 +533,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ mov(scratch1, Operand(transition));
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
@@ -604,19 +570,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
if (!FLAG_track_fields || !representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
// Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
- name_reg,
+ storage_reg,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@@ -636,19 +596,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
if (!FLAG_track_fields || !representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
// Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
- name_reg,
+ storage_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@@ -668,27 +622,18 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// r0 : value
Label exit;
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1240,6 +1185,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ mov(scratch1, Operand(Handle<Map>(object->map())));
+
Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1342,7 +1291,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ b(success);
@@ -1352,6 +1302,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
}
+void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ b(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
+
+
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
@@ -1394,7 +1355,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ b(ne, &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
return reg;
}
@@ -1415,7 +1376,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
}
@@ -1728,11 +1689,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
- Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
- Handle<Cell> kind_feedback_cell =
- isolate()->factory()->NewCell(kind);
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
__ mov(r0, Operand(argc));
- __ mov(r2, Operand(kind_feedback_cell));
+ __ mov(r2, Operand(site_feedback_cell));
__ mov(r1, Operand(function));
ArrayConstructorStub stub(isolate());
@@ -2824,34 +2785,30 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(receiver(), &miss);
- CheckPrototypes(object, receiver(), holder,
- scratch1(), scratch2(), scratch3(), name, &miss);
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(receiver()); // receiver
__ mov(ip, Operand(callback)); // callback info
- __ Push(ip, this->name(), value());
+ __ push(ip);
+ __ mov(ip, Operand(name));
+ __ Push(ip, value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -3105,7 +3062,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
- HandlerFrontendFooter(&success, &miss);
+ HandlerFrontendFooter(name, &success, &miss);
__ bind(&success);
Counters* counters = isolate()->counters();
@@ -3118,7 +3075,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
}
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
CodeHandleList* handlers,
Handle<Name> name,
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
new file mode 100644
index 0000000000..8f1ab47b8a
--- /dev/null
+++ b/deps/v8/src/array-iterator.js
@@ -0,0 +1,127 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var ARRAY_ITERATOR_KIND_KEYS = 1;
+var ARRAY_ITERATOR_KIND_VALUES = 2;
+var ARRAY_ITERATOR_KIND_ENTRIES = 3;
+// The spec draft also has "sparse" but it is never used.
+
+var iteratorObjectSymbol = %CreateSymbol(void 0);
+var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
+var arrayIterationKindSymbol = %CreateSymbol(void 0);
+
+function ArrayIterator() {}
+
+// 15.4.5.1 CreateArrayIterator Abstract Operation
+function CreateArrayIterator(array, kind) {
+ var object = ToObject(array);
+ var iterator = new ArrayIterator;
+ iterator[iteratorObjectSymbol] = object;
+ iterator[arrayIteratorNextIndexSymbol] = 0;
+ iterator[arrayIterationKindSymbol] = kind;
+ return iterator;
+}
+
+// 15.19.4.3.4 CreateItrResultObject
+function CreateIteratorResultObject(value, done) {
+ return {value: value, done: done};
+}
+
+// 15.4.5.2.2 ArrayIterator.prototype.next( )
+function ArrayIteratorNext() {
+ var iterator = ToObject(this);
+ var array = iterator[iteratorObjectSymbol];
+ if (!array) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Array Iterator.prototype.next']);
+ }
+
+ var index = iterator[arrayIteratorNextIndexSymbol];
+ var itemKind = iterator[arrayIterationKindSymbol];
+ var length = TO_UINT32(array.length);
+
+ // "sparse" is never used.
+
+ if (index >= length) {
+ iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
+ return CreateIteratorResultObject(void 0, true);
+ }
+
+ var elementKey = ToString(index);
+ iterator[arrayIteratorNextIndexSymbol] = index + 1;
+
+ if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
+ return CreateIteratorResultObject(array[elementKey], false);
+
+ if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES)
+ return CreateIteratorResultObject([elementKey, array[elementKey]], false);
+
+ return CreateIteratorResultObject(elementKey, false);
+}
+
+function ArrayEntries() {
+ return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_ENTRIES);
+}
+
+function ArrayValues() {
+ return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_VALUES);
+}
+
+function ArrayKeys() {
+ return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_KEYS);
+}
+
+function SetUpArrayIterator() {
+ %CheckIsBootstrapping();
+
+ %FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
+ %FunctionSetReadOnlyPrototype(ArrayIterator);
+
+ InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array(
+ 'next', ArrayIteratorNext
+ ));
+}
+
+SetUpArrayIterator();
+
+function ExtendArrayPrototype() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+ 'entries', ArrayEntries,
+ 'values', ArrayValues,
+ 'keys', ArrayKeys
+ ));
+}
+
+ExtendArrayPrototype();
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index b669e0911f..ae8a0b58ba 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -381,6 +381,7 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
}
+
void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
for (int i = 0; i < kIntSize; i++) {
@@ -390,6 +391,7 @@ void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
}
}
+
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
@@ -847,7 +849,7 @@ void RelocInfo::Verify() {
CHECK(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = HEAP->FindCodeObject(addr);
+ Object* found = code->GetIsolate()->FindCodeObject(addr);
CHECK(found->IsCode());
CHECK(code->address() == HeapObject::cast(found)->address());
break;
@@ -1071,6 +1073,11 @@ ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
}
+ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
+ return ExternalReference(isolate->stress_deopt_count_address());
+}
+
+
ExternalReference ExternalReference::transcendental_cache_array_address(
Isolate* isolate) {
return ExternalReference(
@@ -1123,6 +1130,12 @@ ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
}
+ExternalReference ExternalReference::allocation_sites_list_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->allocation_sites_list_address());
+}
+
+
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
@@ -1322,6 +1335,7 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
return ExternalReference(Redirect(isolate, function));
}
+
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
@@ -1334,6 +1348,7 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
+
ExternalReference ExternalReference::re_word_character_map() {
return ExternalReference(
NativeRegExpMacroAssembler::word_character_map_address());
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 95853e8e3a..481add56b8 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -747,6 +747,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Heap::roots_array_start()
static ExternalReference roots_array_start(Isolate* isolate);
+ // Static variable Heap::allocation_sites_list_address()
+ static ExternalReference allocation_sites_list_address(Isolate* isolate);
+
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit(Isolate* isolate);
@@ -863,6 +866,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
+ static ExternalReference stress_deopt_count(Isolate* isolate);
+
private:
explicit ExternalReference(void* address)
: address_(address) {}
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 589bd5a48f..f34c7bb24a 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -71,8 +71,14 @@ bool Expression::IsNullLiteral() {
}
-bool Expression::IsUndefinedLiteral() {
- return AsLiteral() != NULL && AsLiteral()->value()->IsUndefined();
+bool Expression::IsUndefinedLiteral(Isolate* isolate) {
+ VariableProxy* var_proxy = AsVariableProxy();
+ if (var_proxy == NULL) return false;
+ Variable* var = var_proxy->var();
+ // The global identifier "undefined" is immutable. Everything
+ // else could be reassigned.
+ return var != NULL && var->location() == Variable::UNALLOCATED &&
+ var_proxy->name()->Equals(isolate->heap()->undefined_string());
}
@@ -385,12 +391,13 @@ static bool IsVoidOfLiteral(Expression* expr) {
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
- Expression** expr) {
+ Expression** expr,
+ Isolate* isolate) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
- if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) {
+ if (left->IsUndefinedLiteral(isolate) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
@@ -398,9 +405,10 @@ static bool MatchLiteralCompareUndefined(Expression* left,
}
-bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
- return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
- MatchLiteralCompareUndefined(right_, op_, left_, expr);
+bool CompareOperation::IsLiteralCompareUndefined(
+ Expression** expr, Isolate* isolate) {
+ return MatchLiteralCompareUndefined(left_, op_, right_, expr, isolate) ||
+ MatchLiteralCompareUndefined(right_, op_, left_, expr, isolate);
}
@@ -503,7 +511,7 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
store_mode_ = oracle->GetStoreMode(id);
- } else if (oracle->StoreIsPolymorphic(id)) {
+ } else if (oracle->StoreIsKeyedPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
store_mode_ = oracle->GetStoreMode(id);
@@ -520,9 +528,11 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(
oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsPolymorphic(id)) {
+ } else if (oracle->StoreIsKeyedPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
+ } else {
+ oracle->CollectPolymorphicStoreReceiverTypes(id, &receiver_types_);
}
store_mode_ = oracle->GetStoreMode(id);
type_ = oracle->IncrementType(this);
@@ -675,8 +685,10 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
Object* value = allocation_info_cell_->value();
- if (value->IsSmi()) {
- elements_kind_ = static_cast<ElementsKind>(Smi::cast(value)->value());
+ ASSERT(!value->IsTheHole());
+ if (value->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(value);
+ elements_kind_ = site->GetElementsKind();
}
}
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index b9a98e0b7d..f14156f93c 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -353,14 +353,12 @@ class Expression: public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral();
- // True iff the expression is the undefined literal.
- bool IsUndefinedLiteral();
+ // True if we can prove that the expression is the undefined literal.
+ bool IsUndefinedLiteral(Isolate* isolate);
// Expression type bounds
- Handle<Type> upper_type() { return upper_type_; }
- Handle<Type> lower_type() { return lower_type_; }
- void set_upper_type(Handle<Type> type) { upper_type_ = type; }
- void set_lower_type(Handle<Type> type) { lower_type_ = type; }
+ Bounds bounds() { return bounds_; }
+ void set_bounds(Bounds bounds) { bounds_ = bounds; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
@@ -391,15 +389,13 @@ class Expression: public AstNode {
protected:
explicit Expression(Isolate* isolate)
- : upper_type_(Type::Any(), isolate),
- lower_type_(Type::None(), isolate),
+ : bounds_(Type::None(), Type::Any(), isolate),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
private:
- Handle<Type> upper_type_;
- Handle<Type> lower_type_;
+ Bounds bounds_;
byte to_boolean_types_;
const BailoutId id_;
@@ -1884,9 +1880,6 @@ class BinaryOperation: public Expression {
BailoutId RightId() const { return right_id_; }
TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
- // TODO(rossberg): result_type should be subsumed by lower_type.
- Handle<Type> result_type() const { return result_type_; }
- void set_result_type(Handle<Type> type) { result_type_ = type; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
@@ -1913,7 +1906,6 @@ class BinaryOperation: public Expression {
Expression* right_;
int pos_;
- Handle<Type> result_type_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS.
Maybe<int> fixed_right_arg_;
@@ -2002,7 +1994,7 @@ class CompareOperation: public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
- bool IsLiteralCompareUndefined(Expression** expr);
+ bool IsLiteralCompareUndefined(Expression** expr, Isolate* isolate);
bool IsLiteralCompareNull(Expression** expr);
protected:
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index b18b54d77b..789721edfc 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -153,14 +153,11 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
#include "atomicops_internals_tsan.h"
-#elif defined(_MSC_VER) && \
- (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && \
- (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && \
- (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.cc b/deps/v8/src/atomicops_internals_x86_gcc.cc
index b5078cf4a5..950b423f41 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.cc
+++ b/deps/v8/src/atomicops_internals_x86_gcc.cc
@@ -124,6 +124,7 @@ class AtomicOpsx86Initializer {
}
};
+
// A global to get use initialized on startup via static initialization :/
AtomicOpsx86Initializer g_initer;
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index c8b61eef50..af0edde6d5 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -45,6 +45,7 @@ static int BitSize(S value) {
return 8 * sizeof(value);
}
+
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
ASSERT(kBigitSize >= BitSize(value));
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 49333eb21c..281f8b918e 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -200,7 +200,7 @@ class Genesis BASE_EMBEDDED {
// detached from the other objects in the snapshot.
void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
// New context initialization. Used for creating a context from scratch.
- bool InitializeGlobal(Handle<GlobalObject> inner_global,
+ void InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function);
void InitializeExperimentalGlobal();
// Installs the contents of the native .js files on the global objects.
@@ -829,7 +829,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpInnerGlobal.
-bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
@@ -1053,10 +1053,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
- { MaybeObject* result = cons->SetInstancePrototype(
- native_context()->initial_object_prototype());
- if (result->IsFailure()) return false;
- }
+ JSFunction::SetInstancePrototype(cons,
+ Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
@@ -1277,7 +1275,6 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
}
- return true;
}
@@ -1289,7 +1286,7 @@ Handle<JSFunction> Genesis::InstallTypedArray(
Builtins::kIllegal, false, true);
Handle<Map> initial_map = isolate()->factory()->NewMap(
- JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, elementsKind);
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields, elementsKind);
result->set_initial_map(*initial_map);
initial_map->set_constructor(*result);
return result;
@@ -1327,6 +1324,11 @@ void Genesis::InitializeExperimentalGlobal() {
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
+ { // -- W e a k S e t
+ InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ }
}
if (FLAG_harmony_array_buffer) {
@@ -1373,7 +1375,7 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSFunction> data_view_fun =
InstallFunction(
global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSize,
+ JSDataView::kSizeWithInternalFields,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_data_view_fun(*data_view_fun);
@@ -1586,6 +1588,7 @@ void Genesis::InstallNativeFunctions() {
to_complete_property_descriptor);
}
+
void Genesis::InstallExperimentalNativeFunctions() {
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
@@ -2071,6 +2074,11 @@ bool Genesis::InstallExperimentalNatives() {
"native generator.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_iteration &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native array-iterator.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
}
InstallExperimentalNativeFunctions();
@@ -2239,10 +2247,12 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
#endif
}
+
static uint32_t Hash(RegisteredExtension* extension) {
return v8::internal::ComputePointerHash(extension);
}
+
static bool MatchRegisteredExtensions(void* key1, void* key2) {
return key1 == key2;
}
@@ -2624,7 +2634,7 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
- if (!InitializeGlobal(inner_global, empty_function)) return;
+ InitializeGlobal(inner_global, empty_function);
InstallJSFunctionResultCaches();
InitializeNormalizedMapCaches();
if (!InstallNatives()) return;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index be04ddf53d..eaba839aa4 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -182,6 +182,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
return result;
}
+
// ----------------------------------------------------------------------------
BUILTIN(Illegal) {
@@ -210,14 +211,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
MaybeObject* maybe_array = array->Initialize(0);
if (maybe_array->IsFailure()) return maybe_array;
- AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
- ElementsKind to_kind = array->GetElementsKind();
- if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
+ if (memento != NULL && memento->IsValid()) {
+ AllocationSite* site = memento->GetAllocationSite();
+ ElementsKind to_kind = site->GetElementsKind();
if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
to_kind)) {
// We have advice that we should change the elements kind
if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
+ PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
reinterpret_cast<void*>(array),
ElementsKindToString(array->GetElementsKind()),
ElementsKindToString(to_kind));
@@ -1153,6 +1155,7 @@ BUILTIN(StrictModePoisonPill) {
"strict_poison_pill", HandleVector<Object>(NULL, 0)));
}
+
// -----------------------------------------------------------------------------
//
@@ -1432,14 +1435,17 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
+
static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
KeyedLoadIC::GenerateIndexedInterceptor(masm);
}
+
static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
KeyedLoadIC::GenerateNonStrictArguments(masm);
}
+
static void Generate_StoreIC_Slow(MacroAssembler* masm) {
StoreIC::GenerateSlow(masm);
}
@@ -1539,14 +1545,17 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
+
static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
+
static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
}
+
static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
}
@@ -1716,6 +1725,7 @@ void Builtins::InitBuiltinFunctionTable() {
#undef DEF_FUNCTION_PTR_A
}
+
void Builtins::SetUp(bool create_heap_objects) {
ASSERT(!initialized_);
Isolate* isolate = Isolate::Current();
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 1a89ef3b11..dee9ccd381 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -71,6 +71,18 @@ inline bool IsHexDigit(uc32 c) {
}
+inline bool IsOctalDigit(uc32 c) {
+ // ECMA-262, 6th, 7.8.3
+ return IsInRange(c, '0', '7');
+}
+
+
+inline bool IsBinaryDigit(uc32 c) {
+ // ECMA-262, 6th, 7.8.3
+ return c == '0' || c == '1';
+}
+
+
inline bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c)
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index b97191f5cc..767ad6513a 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -40,6 +40,8 @@ inline bool IsCarriageReturn(uc32 c);
inline bool IsLineFeed(uc32 c);
inline bool IsDecimalDigit(uc32 c);
inline bool IsHexDigit(uc32 c);
+inline bool IsOctalDigit(uc32 c);
+inline bool IsBinaryDigit(uc32 c);
inline bool IsRegExpWord(uc32 c);
inline bool IsRegExpNewline(uc32 c);
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index d0a0c2b5ac..b309e2c42c 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -230,6 +230,11 @@ inline void CheckNonEqualsHelper(const char* file,
#define CHECK_LE(a, b) CHECK((a) <= (b))
+// Use C++11 static_assert if possible, which gives error
+// messages that are easier to understand on first sight.
+#if __cplusplus >= 201103L
+#define STATIC_CHECK(test) static_assert(test, #test)
+#else
// This is inspired by the static assertion facility in boost. This
// is pretty magical. If it causes you trouble on a platform you may
// find a fix in the boost code.
@@ -249,6 +254,7 @@ template <int> class StaticAssertionHelper { };
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
+#endif
extern bool FLAG_enable_slow_asserts;
diff --git a/deps/v8/src/circular-queue-inl.h b/deps/v8/src/circular-queue-inl.h
index 373bf6092a..b48070ab5d 100644
--- a/deps/v8/src/circular-queue-inl.h
+++ b/deps/v8/src/circular-queue-inl.h
@@ -35,7 +35,16 @@ namespace internal {
void* SamplingCircularQueue::Enqueue() {
- WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
+ if (producer_pos_->enqueue_pos == producer_pos_->next_chunk_pos) {
+ if (producer_pos_->enqueue_pos == buffer_ + buffer_size_) {
+ producer_pos_->next_chunk_pos = buffer_;
+ producer_pos_->enqueue_pos = buffer_;
+ }
+ Acquire_Store(producer_pos_->next_chunk_pos, kEnqueueStarted);
+ // Skip marker.
+ producer_pos_->enqueue_pos += 1;
+ producer_pos_->next_chunk_pos += chunk_size_;
+ }
void* result = producer_pos_->enqueue_pos;
producer_pos_->enqueue_pos += record_size_;
return result;
@@ -44,7 +53,7 @@ void* SamplingCircularQueue::Enqueue() {
void SamplingCircularQueue::WrapPositionIfNeeded(
SamplingCircularQueue::Cell** pos) {
- if (**pos == kEnd) *pos = buffer_;
+ if (*pos == buffer_ + buffer_size_) *pos = buffer_;
}
diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc
index 928c3f0c05..0aea343592 100644
--- a/deps/v8/src/circular-queue.cc
+++ b/deps/v8/src/circular-queue.cc
@@ -33,26 +33,22 @@ namespace v8 {
namespace internal {
-SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks)
+SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes,
+ size_t desired_chunk_size_in_bytes,
+ unsigned buffer_size_in_chunks)
: record_size_(record_size_in_bytes / sizeof(Cell)),
chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
- record_size_in_bytes),
+ record_size_in_bytes + sizeof(Cell)),
chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
buffer_size_(chunk_size_ * buffer_size_in_chunks),
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- producer_consumer_distance_(2 * chunk_size_),
- buffer_(NewArray<Cell>(buffer_size_ + 1)) {
+ buffer_(NewArray<Cell>(buffer_size_)) {
+ ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes);
+ ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_);
ASSERT(buffer_size_in_chunks > 2);
- // Clean up the whole buffer to avoid encountering a random kEnd
- // while enqueuing.
- for (int i = 0; i < buffer_size_; ++i) {
+ // Mark all chunks as clear.
+ for (size_t i = 0; i < buffer_size_; i += chunk_size_) {
buffer_[i] = kClear;
}
- buffer_[buffer_size_] = kEnd;
// Layout producer and consumer position pointers each on their own
// cache lines to avoid cache lines thrashing due to simultaneous
@@ -67,6 +63,7 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
producer_pos_ = reinterpret_cast<ProducerPosition*>(
RoundUp(positions_, kProcessorCacheLineSize));
+ producer_pos_->next_chunk_pos = buffer_;
producer_pos_->enqueue_pos = buffer_;
consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
@@ -74,7 +71,11 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
positions_ + positions_size);
consumer_pos_->dequeue_chunk_pos = buffer_;
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
+ // The distance ensures that producer and consumer never step on
+ // each other's chunks and helps eviction of produced data from
+ // the CPU cache (having that chunk size is bigger than the cache.)
+ const size_t producer_consumer_distance = (2 * chunk_size_);
+ consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance;
consumer_pos_->dequeue_pos = NULL;
}
@@ -89,9 +90,11 @@ void* SamplingCircularQueue::StartDequeue() {
if (consumer_pos_->dequeue_pos != NULL) {
return consumer_pos_->dequeue_pos;
} else {
- if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
- consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
- consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
+ if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) {
+ // Skip marker.
+ consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1;
+ consumer_pos_->dequeue_end_pos =
+ consumer_pos_->dequeue_chunk_pos + chunk_size_;
return consumer_pos_->dequeue_pos;
} else {
return NULL;
diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h
index 73afc68316..4ad4f4b550 100644
--- a/deps/v8/src/circular-queue.h
+++ b/deps/v8/src/circular-queue.h
@@ -45,9 +45,9 @@ namespace internal {
class SamplingCircularQueue {
public:
// Executed on the application thread.
- SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks);
+ SamplingCircularQueue(size_t record_size_in_bytes,
+ size_t desired_chunk_size_in_bytes,
+ unsigned buffer_size_in_chunks);
~SamplingCircularQueue();
// Enqueue returns a pointer to a memory location for storing the next
@@ -67,12 +67,16 @@ class SamplingCircularQueue {
void FlushResidualRecords();
typedef AtomicWord Cell;
- // Reserved values for the first cell of a record.
- static const Cell kClear = 0; // Marks clean (processed) chunks.
- static const Cell kEnd = -1; // Marks the end of the buffer.
private:
+ // Reserved values for the chunk marker (first Cell in each chunk).
+ enum {
+ kClear, // Marks clean (processed) chunks.
+ kEnqueueStarted // Marks chunks where enqueue started.
+ };
+
struct ProducerPosition {
+ Cell* next_chunk_pos;
Cell* enqueue_pos;
};
struct ConsumerPosition {
@@ -84,11 +88,10 @@ class SamplingCircularQueue {
INLINE(void WrapPositionIfNeeded(Cell** pos));
- const int record_size_;
- const int chunk_size_in_bytes_;
- const int chunk_size_;
- const int buffer_size_;
- const int producer_consumer_distance_;
+ const size_t record_size_;
+ const size_t chunk_size_in_bytes_;
+ const size_t chunk_size_;
+ const size_t buffer_size_;
Cell* buffer_;
byte* positions_;
ProducerPosition* producer_pos_;
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 96266af119..324dfa9f76 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -315,40 +315,44 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
int length = casted_stub()->length();
- HInstruction* boilerplate =
+ HInstruction* allocation_site =
AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
GetParameter(1),
NULL,
FAST_ELEMENTS));
-
IfBuilder checker(this);
- checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, undefined);
+ checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site, undefined);
checker.Then();
+ HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
+ HInstruction* boilerplate = AddLoad(allocation_site, access);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
- if_fixed_cow.IfCompareMap(elements, factory->fixed_cow_array_map());
+ if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
+ allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
0/*copy-on-write*/));
if_fixed_cow.Else();
IfBuilder if_fixed(this);
- if_fixed.IfCompareMap(elements, factory->fixed_array_map());
+ if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
+ allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
length));
if_fixed.Else();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
+ allocation_site,
alloc_site_mode,
FAST_DOUBLE_ELEMENTS,
length));
@@ -356,6 +360,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
+ allocation_site,
alloc_site_mode,
elements_kind,
length));
@@ -392,7 +397,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
AddInstruction(new(zone) HInstanceSize(boilerplate));
HValue* size_in_words =
AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2));
- checker.IfCompare(boilerplate_size, size_in_words, Token::EQ);
+ checker.If<HCompareNumericAndBranch>(boilerplate_size,
+ size_in_words, Token::EQ);
checker.Then();
HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size));
@@ -421,6 +427,49 @@ Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
template <>
+HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+
+ HValue* size = AddInstruction(new(zone) HConstant(AllocationSite::kSize));
+ HAllocate::Flags flags = HAllocate::DefaultFlags();
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ HInstruction* object = AddInstruction(new(zone)
+ HAllocate(context(), size, HType::JSObject(), flags));
+
+ // Store the map
+ Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(),
+ isolate());
+ AddStoreMapConstant(object, allocation_site_map);
+
+ // Store the payload (smi elements kind)
+ HValue* initial_elements_kind = AddInstruction(new(zone) HConstant(
+ GetInitialFastElementsKind()));
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteTransitionInfo(),
+ initial_elements_kind);
+
+ Add<HLinkObjectInList>(object, HObjectAccess::ForAllocationSiteWeakNext(),
+ HLinkObjectInList::ALLOCATION_SITE_LIST);
+
+ // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
+ // cell is really a Cell, and so no write barrier is needed.
+ // TODO(mvstanton): Add a debug_code check to verify the input cell is really
+ // a cell. (perhaps with a new instruction, HAssert).
+ HInstruction* cell = GetParameter(0);
+ HObjectAccess access = HObjectAccess::ForCellValue();
+ HStoreNamedField* store = AddStore(cell, access, object);
+ store->SkipWriteBarrier();
+ return cell;
+}
+
+
+Handle<Code> CreateAllocationSiteStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
@@ -483,49 +532,15 @@ Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
- Zone* zone = this->zone();
-
- HValue* js_array = GetParameter(0);
- HValue* map = GetParameter(1);
-
info()->MarkAsSavesCallerDoubles();
- AddInstruction(new(zone) HTrapAllocationMemento(js_array));
-
- HInstruction* array_length =
- AddLoad(js_array, HObjectAccess::ForArrayLength());
- array_length->set_type(HType::Smi());
-
- ElementsKind to_kind = casted_stub()->to_kind();
- BuildNewSpaceArrayCheck(array_length, to_kind);
-
- IfBuilder if_builder(this);
-
- if_builder.IfCompare(array_length, graph()->GetConstant0(), Token::EQ);
- if_builder.Then();
-
- // Nothing to do, just change the map.
-
- if_builder.Else();
-
- HInstruction* elements = AddLoadElements(js_array);
-
- HInstruction* elements_length = AddLoadFixedArrayLength(elements);
-
- HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
- context(), to_kind, elements_length);
-
- BuildCopyElements(context(), elements,
- casted_stub()->from_kind(), new_elements,
- to_kind, array_length, elements_length);
-
- AddStore(js_array, HObjectAccess::ForElementsPointer(), new_elements);
-
- if_builder.End();
-
- AddStore(js_array, HObjectAccess::ForMap(), map);
+ BuildTransitionElementsKind(GetParameter(0),
+ GetParameter(1),
+ casted_stub()->from_kind(),
+ casted_stub()->to_kind(),
+ true);
- return js_array;
+ return GetParameter(0);
}
@@ -545,7 +560,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
}
HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
- JSArrayBuilder array_builder(this, kind, property_cell, constructor,
+ // Walk through the property cell to the AllocationSite
+ HValue* alloc_site = AddInstruction(new(zone()) HLoadNamedField(property_cell,
+ HObjectAccess::ForCellValue()));
+ JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
override_mode);
HValue* result = NULL;
switch (argument_class) {
@@ -606,7 +624,8 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HBoundsCheck* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
IfBuilder if_builder(this);
- if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
+ if_builder.If<HCompareNumericAndBranch>(checked_arg, constant_zero,
+ Token::EQ);
if_builder.Then();
Push(initial_capacity_node); // capacity
Push(constant_zero); // length
@@ -742,8 +761,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
- Handle<Type> type =
- CompareNilICStub::StateToType(isolate, stub->GetState(), sentinel_map);
+ Handle<Type> type = stub->GetType(isolate, sentinel_map);
BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
@@ -764,13 +782,52 @@ Handle<Code> CompareNilICStub::GenerateCode() {
template <>
+HValue* CodeStubGraphBuilder<UnaryOpStub>::BuildCodeInitializedStub() {
+ UnaryOpStub* stub = casted_stub();
+ Handle<Type> type = stub->GetType(graph()->isolate());
+ HValue* input = GetParameter(0);
+
+ // Prevent unwanted HChange being inserted to ensure that the stub
+ // deopts on newly encountered types.
+ if (!type->Maybe(Type::Double())) {
+ input = AddInstruction(new(zone())
+ HForceRepresentation(input, Representation::Smi()));
+ }
+
+ if (!type->Is(Type::Number())) {
+ // If we expect to see other things than Numbers, we will create a generic
+ // stub, which handles all numbers and calls into the runtime for the rest.
+ IfBuilder if_number(this);
+ if_number.If<HIsNumberAndBranch>(input);
+ if_number.Then();
+ HInstruction* res = BuildUnaryMathOp(input, type, stub->operation());
+ if_number.Return(AddInstruction(res));
+ if_number.Else();
+ HValue* function = AddLoadJSBuiltin(stub->ToJSBuiltin(), context());
+ Add<HPushArgument>(GetParameter(0));
+ HValue* result = Add<HInvokeFunction>(context(), function, 1);
+ if_number.Return(result);
+ if_number.End();
+ return graph()->GetConstantUndefined();
+ }
+
+ return AddInstruction(BuildUnaryMathOp(input, type, stub->operation()));
+}
+
+
+Handle<Code> UnaryOpStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
IfBuilder if_true(this);
if_true.If<HBranch>(GetParameter(0), stub->GetTypes());
if_true.Then();
- if_true.Return(graph()->GetConstant1());
+ if_true.Return(graph()->GetConstant1());
if_true.Else();
if_true.End();
return graph()->GetConstant0();
@@ -782,4 +839,85 @@ Handle<Code> ToBooleanStub::GenerateCode() {
}
+template <>
+HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
+ StoreGlobalStub* stub = casted_stub();
+ Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
+ Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
+ Handle<PropertyCell> placeholder_cell =
+ isolate()->factory()->NewPropertyCell(placeholer_value);
+
+ HParameter* receiver = GetParameter(0);
+ HParameter* value = GetParameter(2);
+
+ if (stub->is_constant()) {
+ // Assume every store to a constant value changes it.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ set_current_block(NULL);
+ } else {
+ HValue* cell = Add<HConstant>(placeholder_cell, Representation::Tagged());
+
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ AddInstruction(HCheckMaps::New(receiver, placeholder_map, zone()));
+
+ // Load the payload of the global parameter cell. A hole indicates that the
+ // property has been deleted and that the store must be handled by the
+ // runtime.
+ HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
+ HValue* cell_contents = Add<HLoadNamedField>(cell, access);
+ IfBuilder builder(this);
+ HValue* hole_value = Add<HConstant>(hole, Representation::Tagged());
+ builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
+ builder.Then();
+ builder.Deopt();
+ builder.Else();
+ Add<HStoreNamedField>(cell, access, value);
+ builder.End();
+ }
+ return value;
+}
+
+
+Handle<Code> StoreGlobalStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
+ HValue* value = GetParameter(0);
+ HValue* map = GetParameter(1);
+ HValue* key = GetParameter(2);
+ HValue* object = GetParameter(3);
+
+ if (FLAG_trace_elements_transitions) {
+ // Tracing elements transitions is the job of the runtime.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ set_current_block(NULL);
+ } else {
+ info()->MarkAsSavesCallerDoubles();
+
+ BuildTransitionElementsKind(object, map,
+ casted_stub()->from_kind(),
+ casted_stub()->to_kind(),
+ casted_stub()->is_jsarray());
+
+ BuildUncheckedMonomorphicElementAccess(object, key, value, NULL,
+ casted_stub()->is_jsarray(),
+ casted_stub()->to_kind(),
+ true, ALLOW_RETURN_HOLE,
+ casted_stub()->store_mode());
+ }
+
+ return value;
+}
+
+
+Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 2ed2ba3c66..9d40ad04d1 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -85,6 +85,14 @@ Code::Kind CodeStub::GetCodeKind() const {
}
+Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
+ Handle<Code> ic = GetCode(isolate);
+ ic = isolate->factory()->CopyCode(ic);
+ RecordCodeGeneration(*ic, isolate);
+ return ic;
+}
+
+
Handle<Code> PlatformCodeStub::GenerateCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
@@ -185,11 +193,82 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
-void CodeStub::PrintName(StringStream* stream) {
+void CodeStub::PrintBaseName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
+void CodeStub::PrintName(StringStream* stream) {
+ PrintBaseName(stream);
+ PrintState(stream);
+}
+
+
+Builtins::JavaScript UnaryOpStub::ToJSBuiltin() {
+ switch (operation_) {
+ default:
+ UNREACHABLE();
+ case Token::SUB:
+ return Builtins::UNARY_MINUS;
+ case Token::BIT_NOT:
+ return Builtins::BIT_NOT;
+ }
+}
+
+
+Handle<JSFunction> UnaryOpStub::ToJSFunction(Isolate* isolate) {
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
+ Object* builtin = builtins->javascript_builtin(ToJSBuiltin());
+ return Handle<JSFunction>(JSFunction::cast(builtin), isolate);
+}
+
+
+MaybeObject* UnaryOpStub::Result(Handle<Object> object, Isolate* isolate) {
+ Handle<JSFunction> builtin_function = ToJSFunction(isolate);
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(builtin_function, object,
+ 0, NULL, &caught_exception);
+ if (caught_exception) {
+ return Failure::Exception();
+ }
+ return *result;
+}
+
+
+void UnaryOpStub::UpdateStatus(Handle<Object> object) {
+ State old_state(state_);
+ if (object->IsSmi()) {
+ state_.Add(SMI);
+ if (operation_ == Token::SUB && *object == 0) {
+ // The result (-0) has to be represented as double.
+ state_.Add(HEAP_NUMBER);
+ }
+ } else if (object->IsHeapNumber()) {
+ state_.Add(HEAP_NUMBER);
+ } else {
+ state_.Add(GENERIC);
+ }
+ TraceTransition(old_state, state_);
+}
+
+
+Handle<Type> UnaryOpStub::GetType(Isolate* isolate) {
+ if (state_.Contains(GENERIC)) {
+ return handle(Type::Any(), isolate);
+ }
+ Handle<Type> type = handle(Type::None(), isolate);
+ if (state_.Contains(SMI)) {
+ type = handle(
+ Type::Union(type, handle(Type::Smi(), isolate)), isolate);
+ }
+ if (state_.Contains(HEAP_NUMBER)) {
+ type = handle(
+ Type::Union(type, handle(Type::Double(), isolate)), isolate);
+ }
+ return type;
+}
+
+
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
@@ -275,6 +354,29 @@ void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
#undef __
+void UnaryOpStub::PrintBaseName(StringStream* stream) {
+ CodeStub::PrintBaseName(stream);
+ if (operation_ == Token::SUB) stream->Add("Minus");
+ if (operation_ == Token::BIT_NOT) stream->Add("Not");
+}
+
+
+void UnaryOpStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
+}
+
+
+void UnaryOpStub::State::Print(StringStream* stream) const {
+ stream->Add("(");
+ SimpleListPrinter printer(stream);
+ if (IsEmpty()) printer.Add("None");
+ if (Contains(GENERIC)) printer.Add("Generic");
+ if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
+ if (Contains(SMI)) printer.Add("Smi");
+ stream->Add(")");
+}
+
+
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -431,8 +533,9 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
}
-void CompareNilICStub::Record(Handle<Object> object) {
- ASSERT(state_ != State::Generic());
+void CompareNilICStub::UpdateStatus(Handle<Object> object) {
+ ASSERT(!state_.Contains(GENERIC));
+ State old_state(state_);
if (object->IsNull()) {
state_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
@@ -440,24 +543,30 @@ void CompareNilICStub::Record(Handle<Object> object) {
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
- state_ = State::Generic();
+ state_.RemoveAll();
+ state_.Add(GENERIC);
} else if (IsMonomorphic()) {
- state_ = State::Generic();
+ state_.RemoveAll();
+ state_.Add(GENERIC);
} else {
state_.Add(MONOMORPHIC_MAP);
}
+ TraceTransition(old_state, state_);
}
-void CompareNilICStub::State::TraceTransition(State to) const {
+template<class StateType>
+void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
- stream.Add("[CompareNilIC : ");
- Print(&stream);
+ stream.Add("[");
+ PrintBaseName(&stream);
+ stream.Add(": ");
+ from.Print(&stream);
stream.Add("=>");
to.Print(&stream);
stream.Add("]\n");
@@ -466,11 +575,15 @@ void CompareNilICStub::State::TraceTransition(State to) const {
}
-void CompareNilICStub::PrintName(StringStream* stream) {
- stream->Add("CompareNilICStub_");
+void CompareNilICStub::PrintBaseName(StringStream* stream) {
+ CodeStub::PrintBaseName(stream);
+ stream->Add((nil_value_ == kNullValue) ? "(NullValue)":
+ "(UndefinedValue)");
+}
+
+
+void CompareNilICStub::PrintState(StringStream* stream) {
state_.Print(stream);
- stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
- "(UndefinedValue|");
}
@@ -481,33 +594,28 @@ void CompareNilICStub::State::Print(StringStream* stream) const {
if (Contains(UNDEFINED)) printer.Add("Undefined");
if (Contains(NULL_TYPE)) printer.Add("Null");
if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
- if (Contains(UNDETECTABLE)) printer.Add("Undetectable");
if (Contains(GENERIC)) printer.Add("Generic");
stream->Add(")");
}
-Handle<Type> CompareNilICStub::StateToType(
+Handle<Type> CompareNilICStub::GetType(
Isolate* isolate,
- State state,
Handle<Map> map) {
- if (state.Contains(CompareNilICStub::GENERIC)) {
+ if (state_.Contains(CompareNilICStub::GENERIC)) {
return handle(Type::Any(), isolate);
}
Handle<Type> result(Type::None(), isolate);
- if (state.Contains(CompareNilICStub::UNDEFINED)) {
+ if (state_.Contains(CompareNilICStub::UNDEFINED)) {
result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
isolate);
}
- if (state.Contains(CompareNilICStub::NULL_TYPE)) {
+ if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
result = handle(Type::Union(result, handle(Type::Null(), isolate)),
isolate);
}
- if (state.Contains(CompareNilICStub::UNDETECTABLE)) {
- result = handle(Type::Union(result, handle(Type::Undetectable(), isolate)),
- isolate);
- } else if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
+ if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
result = handle(Type::Union(result, handle(type, isolate)), isolate);
}
@@ -516,6 +624,16 @@ Handle<Type> CompareNilICStub::StateToType(
}
+Handle<Type> CompareNilICStub::GetInputType(
+ Isolate* isolate,
+ Handle<Map> map) {
+ Handle<Type> output_type = GetType(isolate, map);
+ Handle<Type> nil_type = handle(nil_value_ == kNullValue
+ ? Type::Null() : Type::Undefined(), isolate);
+ return handle(Type::Union(output_type, nil_type), isolate);
+}
+
+
void InstanceofStub::PrintName(StringStream* stream) {
const char* args = "";
if (HasArgsInRegisters()) {
@@ -552,6 +670,12 @@ void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
}
+void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
+ CreateAllocationSiteStub stub;
+ stub.GetCode(isolate)->set_is_pregenerated(true);
+}
+
+
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
@@ -615,16 +739,15 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
-bool ToBooleanStub::Record(Handle<Object> object) {
+bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
Types old_types(types_);
- bool to_boolean_value = types_.Record(object);
- old_types.TraceTransition(types_);
+ bool to_boolean_value = types_.UpdateStatus(object);
+ TraceTransition(old_types, types_);
return to_boolean_value;
}
-void ToBooleanStub::PrintName(StringStream* stream) {
- stream->Add("ToBooleanStub_");
+void ToBooleanStub::PrintState(StringStream* stream) {
types_.Print(stream);
}
@@ -645,24 +768,7 @@ void ToBooleanStub::Types::Print(StringStream* stream) const {
}
-void ToBooleanStub::Types::TraceTransition(Types to) const {
- #ifdef DEBUG
- if (!FLAG_trace_ic) return;
- char buffer[100];
- NoAllocationStringAllocator allocator(buffer,
- static_cast<unsigned>(sizeof(buffer)));
- StringStream stream(&allocator);
- stream.Add("[ToBooleanIC : ");
- Print(&stream);
- stream.Add("=>");
- to.Print(&stream);
- stream.Add("]\n");
- stream.OutputToStdOut();
- #endif
-}
-
-
-bool ToBooleanStub::Types::Record(Handle<Object> object) {
+bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
if (object->IsUndefined()) {
Add(UNDEFINED);
return false;
@@ -712,9 +818,9 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
}
-void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
+void ElementsTransitionAndStorePlatformStub::Generate(MacroAssembler* masm) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(from_, to_);
+ AllocationSiteMode mode = AllocationSite::GetMode(from_, to_);
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (IsFastSmiOrObjectElementsKind(to_)) {
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index d197c841b1..33593544d6 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -66,11 +66,13 @@ namespace internal {
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
+ V(CreateAllocationSite) \
V(ToBoolean) \
V(ToNumber) \
V(ArgumentsAccess) \
V(RegExpConstructResult) \
V(NumberToString) \
+ V(DoubleToI) \
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
@@ -90,6 +92,7 @@ namespace internal {
V(ArrayConstructor) \
V(InternalArrayConstructor) \
V(ProfileEntryHook) \
+ V(StoreGlobal) \
/* IC Handler stubs */ \
V(LoadField) \
V(KeyedLoadField)
@@ -123,8 +126,6 @@ namespace internal {
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
-
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
@@ -140,6 +141,8 @@ class CodeStub BASE_EMBEDDED {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode(Isolate* isolate);
+ // Retrieve the code for the stub, make and return a copy of the code.
+ Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate);
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -197,6 +200,8 @@ class CodeStub BASE_EMBEDDED {
return -1;
}
+ virtual void PrintName(StringStream* stream);
+
protected:
static bool CanUseFPRegisters();
@@ -208,6 +213,11 @@ class CodeStub BASE_EMBEDDED {
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
+ // Returns a name for logging/debugging purposes.
+ SmartArrayPointer<const char> GetName();
+ virtual void PrintBaseName(StringStream* stream);
+ virtual void PrintState(StringStream* stream) { }
+
private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
@@ -236,10 +246,6 @@ class CodeStub BASE_EMBEDDED {
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
- // Returns a name for logging/debugging purposes.
- SmartArrayPointer<const char> GetName();
- virtual void PrintName(StringStream* stream);
-
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -354,6 +360,9 @@ class HydrogenCodeStub : public CodeStub {
Handle<Code> GenerateLightweightMissCode(Isolate* isolate);
+ template<class StateType>
+ void TraceTransition(StateType from, StateType to);
+
private:
class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
@@ -384,6 +393,22 @@ class RuntimeCallHelper {
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
};
+
+// TODO(bmeurer): Move to the StringAddStub declaration once we're
+// done with the translation to a hydrogen code stub.
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE = 0,
+ // Check left parameter.
+ STRING_ADD_CHECK_LEFT = 1 << 0,
+ // Check right parameter.
+ STRING_ADD_CHECK_RIGHT = 1 << 1,
+ // Check both parameters.
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
+ // Stub needs a frame before calling the runtime
+ STRING_ADD_ERECT_FRAME = 1 << 2
+};
+
} } // namespace v8::internal
#if V8_TARGET_ARCH_IA32
@@ -519,6 +544,117 @@ class FastNewBlockContextStub : public PlatformCodeStub {
int MinorKey() { return slots_; }
};
+class StoreGlobalStub : public HydrogenCodeStub {
+ public:
+ StoreGlobalStub(StrictModeFlag strict_mode, bool is_constant) {
+ bit_field_ = StrictModeBits::encode(strict_mode) |
+ IsConstantBits::encode(is_constant);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Code::Kind GetCodeKind() const { return Code::STORE_IC; }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+ virtual Code::ExtraICState GetExtraICState() { return bit_field_; }
+
+ bool is_constant() {
+ return IsConstantBits::decode(bit_field_);
+ }
+ void set_is_constant(bool value) {
+ bit_field_ = IsConstantBits::update(bit_field_, value);
+ }
+
+ Representation representation() {
+ return Representation::FromKind(RepresentationBits::decode(bit_field_));
+ }
+ void set_representation(Representation r) {
+ bit_field_ = RepresentationBits::update(bit_field_, r.kind());
+ }
+
+ private:
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+ Major MajorKey() { return StoreGlobal; }
+
+ class StrictModeBits: public BitField<StrictModeFlag, 0, 1> {};
+ class IsConstantBits: public BitField<bool, 1, 1> {};
+ class RepresentationBits: public BitField<Representation::Kind, 2, 8> {};
+
+ int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
+};
+
+
+class UnaryOpStub : public HydrogenCodeStub {
+ public:
+ // Stub without type info available -> construct uninitialized
+ explicit UnaryOpStub(Token::Value operation)
+ : HydrogenCodeStub(UNINITIALIZED), operation_(operation) { }
+ explicit UnaryOpStub(Code::ExtraICState ic_state) :
+ state_(StateBits::decode(ic_state)),
+ operation_(OperatorBits::decode(ic_state)) { }
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
+ virtual InlineCacheState GetICState() {
+ if (state_.Contains(GENERIC)) {
+ return MEGAMORPHIC;
+ } else if (state_.IsEmpty()) {
+ return PREMONOMORPHIC;
+ } else {
+ return MONOMORPHIC;
+ }
+ }
+ virtual Code::ExtraICState GetExtraICState() {
+ return OperatorBits::encode(operation_) |
+ StateBits::encode(state_.ToIntegral());
+ }
+
+ Token::Value operation() { return operation_; }
+ Handle<JSFunction> ToJSFunction(Isolate* isolate);
+ Builtins::JavaScript ToJSBuiltin();
+
+ void UpdateStatus(Handle<Object> object);
+ MaybeObject* Result(Handle<Object> object, Isolate* isolate);
+ Handle<Code> GenerateCode();
+ Handle<Type> GetType(Isolate* isolate);
+
+ protected:
+ void PrintState(StringStream* stream);
+ void PrintBaseName(StringStream* stream);
+
+ private:
+ enum UnaryOpType {
+ SMI,
+ HEAP_NUMBER,
+ GENERIC,
+ NUMBER_OF_TYPES
+ };
+
+ class State : public EnumSet<UnaryOpType, byte> {
+ public:
+ State() : EnumSet<UnaryOpType, byte>() { }
+ explicit State(byte bits) : EnumSet<UnaryOpType, byte>(bits) { }
+ void Print(StringStream* stream) const;
+ };
+
+ class StateBits : public BitField<int, 0, NUMBER_OF_TYPES> { };
+ class OperatorBits : public BitField<Token::Value, NUMBER_OF_TYPES, 8> { };
+
+ State state_;
+ Token::Value operation_;
+
+ virtual CodeStub::Major MajorKey() { return UnaryOp; }
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+};
+
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
@@ -620,6 +756,28 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
};
+class CreateAllocationSiteStub : public HydrogenCodeStub {
+ public:
+ explicit CreateAllocationSiteStub() { }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual bool IsPregenerated() { return true; }
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ Major MajorKey() { return CreateAllocationSite; }
+ int NotMissMinorKey() { return 0; }
+
+ DISALLOW_COPY_AND_ASSIGN(CreateAllocationSiteStub);
+};
+
+
class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
@@ -1119,51 +1277,17 @@ class ICCompareStub: public PlatformCodeStub {
class CompareNilICStub : public HydrogenCodeStub {
public:
- enum CompareNilType {
- UNDEFINED,
- NULL_TYPE,
- MONOMORPHIC_MAP,
- UNDETECTABLE,
- GENERIC,
- NUMBER_OF_TYPES
- };
+ Handle<Type> GetType(Isolate* isolate, Handle<Map> map = Handle<Map>());
+ Handle<Type> GetInputType(Isolate* isolate, Handle<Map> map);
- class State : public EnumSet<CompareNilType, byte> {
- public:
- State() : EnumSet<CompareNilType, byte>(0) { }
- explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
-
- static State Generic() {
- State set;
- set.Add(UNDEFINED);
- set.Add(NULL_TYPE);
- set.Add(UNDETECTABLE);
- set.Add(GENERIC);
- return set;
- }
-
- void Print(StringStream* stream) const;
- void TraceTransition(State to) const;
- };
-
- static Handle<Type> StateToType(
- Isolate* isolate, State state, Handle<Map> map = Handle<Map>());
-
- // At most 6 different types can be distinguished, because the Code object
- // only has room for a single byte to hold a set and there are two more
- // boolean flags we need to store. :-P
- STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
-
- CompareNilICStub(NilValue nil, State state = State())
- : nil_value_(nil), state_(state) {
- }
+ explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
CompareNilICStub(Code::ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
- : HydrogenCodeStub(init_state) {
- nil_value_ = NilValueField::decode(ic_state);
- state_ = State(ExtractTypesFromExtraICState(ic_state));
- }
+ : HydrogenCodeStub(init_state),
+ nil_value_(NilValueField::decode(ic_state)),
+ state_(State(TypesField::decode(ic_state))) {
+ }
static Handle<Code> GetUninitialized(Isolate* isolate,
NilValue nil) {
@@ -1182,7 +1306,7 @@ class CompareNilICStub : public HydrogenCodeStub {
}
virtual InlineCacheState GetICState() {
- if (state_ == State::Generic()) {
+ if (state_.Contains(GENERIC)) {
return MEGAMORPHIC;
} else if (state_.Contains(MONOMORPHIC_MAP)) {
return MONOMORPHIC;
@@ -1195,35 +1319,49 @@ class CompareNilICStub : public HydrogenCodeStub {
Handle<Code> GenerateCode();
- // extra ic state = nil_value | type_n-1 | ... | type_0
virtual Code::ExtraICState GetExtraICState() {
- return NilValueField::encode(nil_value_) | state_.ToIntegral();
- }
- static byte ExtractTypesFromExtraICState(Code::ExtraICState state) {
- return state & ((1 << NUMBER_OF_TYPES) - 1);
- }
- static NilValue ExtractNilValueFromExtraICState(Code::ExtraICState state) {
- return NilValueField::decode(state);
+ return NilValueField::encode(nil_value_) |
+ TypesField::encode(state_.ToIntegral());
}
- void Record(Handle<Object> object);
+ void UpdateStatus(Handle<Object> object);
bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
NilValue GetNilValue() const { return nil_value_; }
- State GetState() const { return state_; }
void ClearState() { state_.RemoveAll(); }
- virtual void PrintName(StringStream* stream);
+ virtual void PrintState(StringStream* stream);
+ virtual void PrintBaseName(StringStream* stream);
private:
friend class CompareNilIC;
+ enum CompareNilType {
+ UNDEFINED,
+ NULL_TYPE,
+ MONOMORPHIC_MAP,
+ GENERIC,
+ NUMBER_OF_TYPES
+ };
+
+ // At most 6 different types can be distinguished, because the Code object
+ // only has room for a single byte to hold a set and there are two more
+ // boolean flags we need to store. :-P
+ STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
+
+ class State : public EnumSet<CompareNilType, byte> {
+ public:
+ State() : EnumSet<CompareNilType, byte>(0) { }
+ explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
+
+ void Print(StringStream* stream) const;
+ };
+
CompareNilICStub(NilValue nil, InitializationState init_state)
- : HydrogenCodeStub(init_state) {
- nil_value_ = nil;
- }
+ : HydrogenCodeStub(init_state), nil_value_(nil) { }
- class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES, 1> {};
+ class NilValueField : public BitField<NilValue, 0, 1> {};
+ class TypesField : public BitField<byte, 1, NUMBER_OF_TYPES> {};
virtual CodeStub::Major MajorKey() { return CompareNilIC; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
@@ -1625,6 +1763,60 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
};
+class DoubleToIStub : public PlatformCodeStub {
+ public:
+ DoubleToIStub(Register source,
+ Register destination,
+ int offset,
+ bool is_truncating) : bit_field_(0) {
+ bit_field_ = SourceRegisterBits::encode(source.code_) |
+ DestinationRegisterBits::encode(destination.code_) |
+ OffsetBits::encode(offset) |
+ IsTruncatingBits::encode(is_truncating);
+ }
+
+ Register source() {
+ Register result = { SourceRegisterBits::decode(bit_field_) };
+ return result;
+ }
+
+ Register destination() {
+ Register result = { DestinationRegisterBits::decode(bit_field_) };
+ return result;
+ }
+
+ bool is_truncating() {
+ return IsTruncatingBits::decode(bit_field_);
+ }
+
+ int offset() {
+ return OffsetBits::decode(bit_field_);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ static const int kBitsPerRegisterNumber = 6;
+ STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
+ class SourceRegisterBits:
+ public BitField<int, 0, kBitsPerRegisterNumber> {}; // NOLINT
+ class DestinationRegisterBits:
+ public BitField<int, kBitsPerRegisterNumber,
+ kBitsPerRegisterNumber> {}; // NOLINT
+ class IsTruncatingBits:
+ public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
+ class OffsetBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
+
+ Major MajorKey() { return DoubleToI; }
+ int MinorKey() { return bit_field_; }
+
+ int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
+};
+
+
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
@@ -1755,7 +1947,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
- AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteOverrideModeBits::encode(override_mode) |
ContextCheckModeBits::encode(context_mode);
@@ -2009,8 +2201,7 @@ class ToBooleanStub: public HydrogenCodeStub {
byte ToByte() const { return ToIntegral(); }
void Print(StringStream* stream) const;
- void TraceTransition(Types to) const;
- bool Record(Handle<Object> object);
+ bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;
bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
@@ -2023,7 +2214,7 @@ class ToBooleanStub: public HydrogenCodeStub {
explicit ToBooleanStub(Code::ExtraICState state)
: types_(static_cast<byte>(state)) { }
- bool Record(Handle<Object> object);
+ bool UpdateStatus(Handle<Object> object);
Types GetTypes() { return types_; }
virtual Handle<Code> GenerateCode();
@@ -2032,7 +2223,7 @@ class ToBooleanStub: public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
- virtual void PrintName(StringStream* stream);
+ virtual void PrintState(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -2070,13 +2261,59 @@ class ToBooleanStub: public HydrogenCodeStub {
};
-class ElementsTransitionAndStoreStub : public PlatformCodeStub {
+class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
- ElementsTransitionAndStoreStub(ElementsKind from,
- ElementsKind to,
+ ElementsTransitionAndStoreStub(ElementsKind from_kind,
+ ElementsKind to_kind,
bool is_jsarray,
- StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode)
+ : from_kind_(from_kind),
+ to_kind_(to_kind),
+ is_jsarray_(is_jsarray),
+ store_mode_(store_mode) {}
+
+ ElementsKind from_kind() const { return from_kind_; }
+ ElementsKind to_kind() const { return to_kind_; }
+ bool is_jsarray() const { return is_jsarray_; }
+ KeyedAccessStoreMode store_mode() const { return store_mode_; }
+
+ Handle<Code> GenerateCode();
+
+ void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class FromBits: public BitField<ElementsKind, 0, 8> {};
+ class ToBits: public BitField<ElementsKind, 8, 8> {};
+ class IsJSArrayBits: public BitField<bool, 16, 1> {};
+ class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {};
+
+ Major MajorKey() { return ElementsTransitionAndStore; }
+ int NotMissMinorKey() {
+ return FromBits::encode(from_kind_) |
+ ToBits::encode(to_kind_) |
+ IsJSArrayBits::encode(is_jsarray_) |
+ StoreModeBits::encode(store_mode_);
+ }
+
+ ElementsKind from_kind_;
+ ElementsKind to_kind_;
+ bool is_jsarray_;
+ KeyedAccessStoreMode store_mode_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+};
+
+
+// TODO(bmeurer) Remove this when compiled transitions is enabled
+class ElementsTransitionAndStorePlatformStub : public PlatformCodeStub {
+ public:
+ ElementsTransitionAndStorePlatformStub(ElementsKind from,
+ ElementsKind to,
+ bool is_jsarray,
+ StrictModeFlag strict_mode,
+ KeyedAccessStoreMode store_mode)
: from_(from),
to_(to),
is_jsarray_(is_jsarray),
@@ -2107,7 +2344,7 @@ class ElementsTransitionAndStoreStub : public PlatformCodeStub {
StrictModeFlag strict_mode_;
KeyedAccessStoreMode store_mode_;
- DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+ DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStorePlatformStub);
};
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 53ff2e1a1a..ea20296916 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -97,10 +97,10 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
// If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
- // |allocation_site_info_found| may be NULL.
+ // |allocation_memento_found| may be NULL.
static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
AllocationSiteMode mode,
- Label* allocation_site_info_found);
+ Label* allocation_memento_found);
static void GenerateSmiToDouble(MacroAssembler* masm,
AllocationSiteMode mode,
Label* fail);
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index c5604ab30f..63ddbbb966 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -34,6 +34,7 @@
var $Set = global.Set;
var $Map = global.Map;
var $WeakMap = global.WeakMap;
+var $WeakSet = global.WeakSet;
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
@@ -240,7 +241,7 @@ SetUpMap();
function WeakMapConstructor() {
if (%_IsConstructCall()) {
- %WeakMapInitialize(this);
+ %WeakCollectionInitialize(this);
} else {
return new $WeakMap();
}
@@ -255,7 +256,7 @@ function WeakMapGet(key) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapGet(this, key);
+ return %WeakCollectionGet(this, key);
}
@@ -267,7 +268,7 @@ function WeakMapSet(key, value) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapSet(this, key, value);
+ return %WeakCollectionSet(this, key, value);
}
@@ -279,7 +280,7 @@ function WeakMapHas(key) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapHas(this, key);
+ return %WeakCollectionHas(this, key);
}
@@ -291,7 +292,7 @@ function WeakMapDelete(key) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return %WeakMapDelete(this, key);
+ return %WeakCollectionDelete(this, key);
}
@@ -301,7 +302,7 @@ function WeakMapClear() {
['WeakMap.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
- %WeakMapInitialize(this);
+ %WeakCollectionInitialize(this);
}
@@ -325,3 +326,82 @@ function SetUpWeakMap() {
}
SetUpWeakMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakSet
+
+function WeakSetConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ return new $WeakSet();
+ }
+}
+
+
+function WeakSetAdd(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.add', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionSet(this, value, true);
+}
+
+
+function WeakSetHas(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionHas(this, value);
+}
+
+
+function WeakSetDelete(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionDelete(this, value);
+}
+
+
+function WeakSetClear() {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakSet, WeakSetConstructor);
+ %FunctionSetPrototype($WeakSet, new $Object());
+ %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakSet prototype object.
+ InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
+ "add", WeakSetAdd,
+ "has", WeakSetHas,
+ "delete", WeakSetDelete,
+ "clear", WeakSetClear
+ ));
+}
+
+SetUpWeakSet();
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 7ace2f7db0..18c82e95fd 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -86,6 +86,7 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
+
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 8edb41d724..c2995773e3 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -449,6 +449,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
return SetLastStatus(SUCCEEDED);
}
+
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
@@ -564,8 +565,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
if (info->is_eval()) {
StackTraceFrameIterator it(isolate);
if (!it.done()) {
- script->set_eval_from_shared(
- JSFunction::cast(it.frame()->function())->shared());
+ script->set_eval_from_shared(it.frame()->function()->shared());
Code* code = it.frame()->LookupCode();
int offset = static_cast<int>(
it.frame()->pc() - code->instruction_start());
@@ -1199,9 +1199,9 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
Handle<Code> code = info->code();
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
+ int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ USE(line_num);
if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
- USE(line_num);
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
@@ -1215,7 +1215,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*code,
*shared,
info,
- shared->DebugName()));
+ info->isolate()->heap()->empty_string(),
+ line_num));
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 161f40458c..332d575dc2 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -560,8 +560,6 @@ class OptimizingCompiler: public ZoneObject {
class Compiler : public AllStatic {
public:
- static const int kMaxInliningLevels = 3;
-
// Call count before primitive functions trigger their own optimization.
static const int kCallsUntilPrimitiveOpt = 200;
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 5edbc5ac2d..0fddfdf505 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -88,6 +88,7 @@ JSObject* Context::global_proxy() {
return native_context()->global_proxy_object();
}
+
void Context::set_global_proxy(JSObject* object) {
native_context()->set_global_proxy_object(object);
}
@@ -123,7 +124,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
if (context->IsNativeContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
- Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
+ Handle<JSReceiver> object(
+ JSReceiver::cast(context->extension()), isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
@@ -133,6 +135,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
} else {
*attributes = object->GetPropertyAttribute(*name);
}
+ if (isolate->has_pending_exception()) return Handle<Object>();
+
if (*attributes != ABSENT) {
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 595ae9ed5b..2f0a399d1a 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -515,6 +515,32 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
end,
false,
allow_trailing_junk);
+
+ // It could be an explicit octal value.
+ } else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
+ ++current;
+ if (current == end || !isDigit(*current, 8) || sign != NONE) {
+ return JunkStringValue(); // "0o".
+ }
+
+ return InternalStringToIntDouble<3>(unicode_cache,
+ current,
+ end,
+ false,
+ allow_trailing_junk);
+
+ // It could be a binary value.
+ } else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
+ ++current;
+ if (current == end || !isBinaryDigit(*current) || sign != NONE) {
+ return JunkStringValue(); // "0b".
+ }
+
+ return InternalStringToIntDouble<1>(unicode_cache,
+ current,
+ end,
+ false,
+ allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
@@ -524,7 +550,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
}
}
- bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
+ bool octal = leading_zero && (flags & ALLOW_IMPLICIT_OCTAL) != 0;
// Copy significant digits of the integer part (if any) to the buffer.
while (*current >= '0' && *current <= '9') {
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 1fbb5f1182..7aa2d3fb3a 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -52,6 +52,11 @@ inline bool isDigit(int x, int radix) {
}
+inline bool isBinaryDigit(int x) {
+ return x == '0' || x == '1';
+}
+
+
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// For NaN and values outside the int range, return INT_MIN or INT_MAX.
@@ -108,8 +113,10 @@ inline uint32_t DoubleToUint32(double x) {
enum ConversionFlags {
NO_FLAGS = 0,
ALLOW_HEX = 1,
- ALLOW_OCTALS = 2,
- ALLOW_TRAILING_JUNK = 4
+ ALLOW_OCTAL = 2,
+ ALLOW_IMPLICIT_OCTAL = 4,
+ ALLOW_BINARY = 8,
+ ALLOW_TRAILING_JUNK = 16
};
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index fa192baed2..183941206e 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -56,6 +56,7 @@ void* Histogram::CreateHistogram() const {
CreateHistogram(name_, min_, max_, num_buckets_);
}
+
// Start the timer.
void HistogramTimer::Start() {
if (Enabled()) {
@@ -67,6 +68,7 @@ void HistogramTimer::Start() {
}
}
+
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index c3cc27c770..bafea0679f 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -70,7 +70,7 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
+ new(ticks_buffer_.Enqueue()) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index b3800f5877..d3fadb52d4 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -52,18 +52,18 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
- enqueue_order_(0) {
+ last_code_event_id_(0), last_processed_code_event_id_(0) {
}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
- event.generic.order = ++enqueue_order_;
+ event.generic.order = ++last_code_event_id_;
events_buffer_.Enqueue(event);
}
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
- TickSampleEventRecord record(enqueue_order_);
+ TickSampleEventRecord record(last_code_event_id_);
TickSample* sample = &record.sample;
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
@@ -76,7 +76,14 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
}
-bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
+void ProfilerEventsProcessor::StopSynchronously() {
+ if (!running_) return;
+ running_ = false;
+ Join();
+}
+
+
+bool ProfilerEventsProcessor::ProcessCodeEvent() {
CodeEventsContainer record;
if (events_buffer_.Dequeue(&record)) {
switch (record.generic.type) {
@@ -90,17 +97,18 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
#undef PROFILER_TYPE_CASE
default: return true; // Skip record.
}
- *dequeue_order = record.generic.order;
+ last_processed_code_event_id_ = record.generic.order;
return true;
}
return false;
}
-bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
+bool ProfilerEventsProcessor::ProcessTicks() {
while (true) {
if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
+ && ticks_from_vm_buffer_.Peek()->order ==
+ last_processed_code_event_id_) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
@@ -115,56 +123,46 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
// will get far behind, a record may be modified right under its
// feet.
TickSampleEventRecord record = *rec;
- if (record.order == dequeue_order) {
- // A paranoid check to make sure that we don't get a memory overrun
- // in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
- generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
- } else {
- return true;
- }
+ if (record.order != last_processed_code_event_id_) return true;
+
+ // A paranoid check to make sure that we don't get a memory overrun
+ // in case of frames_count having a wild value.
+ if (record.sample.frames_count < 0
+ || record.sample.frames_count > TickSample::kMaxFramesCount)
+ record.sample.frames_count = 0;
+ generator_->RecordTickSample(record.sample);
+ ticks_buffer_.FinishDequeue();
}
}
void ProfilerEventsProcessor::Run() {
- unsigned dequeue_order = 0;
-
while (running_) {
// Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
+ if (ProcessTicks()) {
+ // All ticks of the current last_processed_code_event_id_ are processed,
// proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
+ ProcessCodeEvent();
}
YieldCPU();
}
// Process remaining tick events.
ticks_buffer_.FlushResidualRecords();
- // Perform processing until we have tick events, skip remaining code events.
- while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
+ do {
+ ProcessTicks();
+ } while (ProcessCodeEvent());
}
int CpuProfiler::GetProfilesCount() {
// The count of profiles doesn't depend on a security token.
- return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length();
-}
-
-
-CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
- const int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->Profiles(token)->at(index);
+ return profiles_->profiles()->length();
}
-CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
- const int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->GetProfile(token, uid);
+CpuProfile* CpuProfiler::GetProfile(int index) {
+ return profiles_->profiles()->at(index);
}
@@ -186,11 +184,6 @@ void CpuProfiler::DeleteProfile(CpuProfile* profile) {
}
-bool CpuProfiler::HasDetachedProfiles() {
- return profiles_->HasDetachedProfiles();
-}
-
-
static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag) {
return FLAG_prof_browser_mode
&& (tag != Logger::CALLBACK_TAG
@@ -208,8 +201,7 @@ void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
rec->start = entry_point;
rec->entry = profiles_->NewCodeEntry(
Logger::CALLBACK_TAG,
- profiles_->GetName(name),
- TokenEnumerator::kInheritsSecurityToken);
+ profiles_->GetName(name));
rec->size = 1;
rec->shared = NULL;
processor_->Enqueue(evt_rec);
@@ -280,7 +272,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry = profiles_->NewCodeEntry(
tag,
profiles_->GetFunctionName(shared->DebugName()),
- TokenEnumerator::kNoSecurityToken,
CodeEntry::kEmptyNamePrefix,
profiles_->GetName(source),
line);
@@ -306,7 +297,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry = profiles_->NewCodeEntry(
tag,
profiles_->GetName(args_count),
- TokenEnumerator::kInheritsSecurityToken,
"args_count: ");
rec->size = code->ExecutableSize();
rec->shared = NULL;
@@ -345,7 +335,6 @@ void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
rec->entry = profiles_->NewCodeEntry(
Logger::CALLBACK_TAG,
profiles_->GetName(name),
- TokenEnumerator::kInheritsSecurityToken,
"get ");
rec->size = 1;
rec->shared = NULL;
@@ -361,7 +350,6 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
rec->entry = profiles_->NewCodeEntry(
Logger::REG_EXP_TAG,
profiles_->GetName(source),
- TokenEnumerator::kInheritsSecurityToken,
"RegExp: ");
rec->size = code->ExecutableSize();
processor_->Enqueue(evt_rec);
@@ -376,7 +364,6 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
rec->entry = profiles_->NewCodeEntry(
Logger::CALLBACK_TAG,
profiles_->GetName(name),
- TokenEnumerator::kInheritsSecurityToken,
"set ");
rec->size = 1;
rec->shared = NULL;
@@ -388,7 +375,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
profiles_(new CpuProfilesCollection()),
next_profile_uid_(1),
- token_enumerator_(new TokenEnumerator()),
generator_(NULL),
processor_(NULL),
need_to_stop_sampler_(false),
@@ -403,7 +389,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
: isolate_(isolate),
profiles_(test_profiles),
next_profile_uid_(1),
- token_enumerator_(new TokenEnumerator()),
generator_(test_generator),
processor_(test_processor),
need_to_stop_sampler_(false),
@@ -413,7 +398,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
CpuProfiler::~CpuProfiler() {
ASSERT(!is_profiling_);
- delete token_enumerator_;
delete profiles_;
}
@@ -423,6 +407,7 @@ void CpuProfiler::ResetProfiles() {
profiles_ = new CpuProfilesCollection();
}
+
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
StartProcessorIfNotStarted();
@@ -469,10 +454,7 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile(title);
- CpuProfile* result =
- profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
- title,
- actual_sampling_rate);
+ CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
if (result != NULL) {
result->Print();
}
@@ -480,13 +462,12 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
}
-CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
+CpuProfile* CpuProfiler::StopProfiling(String* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
- int token = token_enumerator_->GetTokenId(security_token);
- return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
+ return profiles_->StopProfiling(profile_title, actual_sampling_rate);
}
@@ -504,8 +485,7 @@ void CpuProfiler::StopProcessor() {
need_to_stop_sampler_ = false;
}
is_profiling_ = false;
- processor_->Stop();
- processor_->Join();
+ processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 77fdb0681b..44e63fed49 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -44,7 +44,6 @@ class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
-class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@@ -111,18 +110,8 @@ class TickSampleEventRecord {
// The parameterless constructor is used when we dequeue data from
// the ticks buffer.
TickSampleEventRecord() { }
- explicit TickSampleEventRecord(unsigned order)
- : filler(1),
- order(order) {
- ASSERT(filler != SamplingCircularQueue::kClear);
- }
+ explicit TickSampleEventRecord(unsigned order) : order(order) { }
- // The first machine word of a TickSampleEventRecord must not ever
- // become equal to SamplingCircularQueue::kClear. As both order and
- // TickSample's first field are not reliable in this sense (order
- // can overflow, TickSample can have all fields reset), we are
- // forced to use an artificial filler field.
- int filler;
unsigned order;
TickSample sample;
@@ -156,7 +145,7 @@ class ProfilerEventsProcessor : public Thread {
// Thread control.
virtual void Run();
- inline void Stop() { running_ = false; }
+ void StopSynchronously();
INLINE(bool running()) { return running_; }
void Enqueue(const CodeEventsContainer& event);
@@ -171,15 +160,16 @@ class ProfilerEventsProcessor : public Thread {
private:
// Called from events processing thread (Run() method.)
- bool ProcessCodeEvent(unsigned* dequeue_order);
- bool ProcessTicks(unsigned dequeue_order);
+ bool ProcessCodeEvent();
+ bool ProcessTicks();
ProfileGenerator* generator_;
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- unsigned enqueue_order_;
+ unsigned last_code_event_id_;
+ unsigned last_processed_code_event_id_;
};
@@ -208,13 +198,11 @@ class CpuProfiler {
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
- CpuProfile* StopProfiling(Object* security_token, String* title);
+ CpuProfile* StopProfiling(String* title);
int GetProfilesCount();
- CpuProfile* GetProfile(Object* security_token, int index);
- CpuProfile* FindProfile(Object* security_token, unsigned uid);
+ CpuProfile* GetProfile(int index);
void DeleteAllProfiles();
void DeleteProfile(CpuProfile* profile);
- bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
TickSample* TickSampleEvent();
@@ -251,6 +239,9 @@ class CpuProfiler {
return &is_profiling_;
}
+ ProfileGenerator* generator() const { return generator_; }
+ ProfilerEventsProcessor* processor() const { return processor_; }
+
private:
void StartProcessorIfNotStarted();
void StopProcessorIfLastProfile(const char* title);
@@ -261,7 +252,6 @@ class CpuProfiler {
Isolate* isolate_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
- TokenEnumerator* token_enumerator_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index aac7aab156..9a72518f4c 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -50,14 +50,12 @@ void PrintPrompt() {
}
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data) {
+void HandleDebugEvent(const Debug::EventDetails& event_details) {
// TODO(svenpanne) There should be a way to retrieve this in the callback.
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
+ DebugEvent event = event_details.GetEvent();
// Check for handled event.
if (event != Break && event != Exception && event != AfterCompile) {
return;
@@ -67,6 +65,7 @@ void HandleDebugEvent(DebugEvent event,
// Get the toJSONProtocol function on the event and get the JSON format.
Local<String> to_json_fun_name = String::New("toJSONProtocol");
+ Handle<Object> event_data = event_details.GetEventData();
Local<Function> to_json_fun =
Local<Function>::Cast(event_data->Get(to_json_fun_name));
Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
@@ -91,6 +90,7 @@ void HandleDebugEvent(DebugEvent event,
// Get the debug command processor.
Local<String> fun_name = String::New("debugCommandProcessor");
+ Handle<Object> exec_state = event_details.GetExecutionState();
Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
Local<Object> cmd_processor =
Local<Object>::Cast(fun->Call(exec_state, 0, NULL));
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
index a6cea2a81b..2386b6bd6c 100644
--- a/deps/v8/src/d8-debug.h
+++ b/deps/v8/src/d8-debug.h
@@ -36,10 +36,7 @@
namespace v8 {
-void HandleDebugEvent(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data);
+void HandleDebugEvent(const Debug::EventDetails& event_details);
// Start the remove debugger connecting to a V8 debugger agent on the specified
// port.
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 65af987b42..e576e9cb37 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -810,7 +810,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
- v8::Debug::SetDebugEventListener(HandleDebugEvent);
+ v8::Debug::SetDebugEventListener2(HandleDebugEvent);
}
#endif // ENABLE_DEBUGGER_SUPPORT
}
@@ -1087,6 +1087,7 @@ static void ReadBufferWeakCallback(v8::Isolate* isolate,
array_buffer->Dispose();
}
+
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
@@ -1581,6 +1582,7 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
+ v8::V8::InitializeICU();
#ifndef V8_SHARED
i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 47a7cc0118..15d342dece 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -31,8 +31,9 @@
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
+ 'v8_enable_i18n_support%': 0,
},
- 'includes': ['../build/common.gypi'],
+ 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
'target_name': 'd8',
@@ -78,6 +79,17 @@
'../src/third_party/vtune/v8vtune.gyp:v8_vtune',
],
}],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
+ '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ ],
+ }],
],
},
{
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 4a0721fe83..3964e81178 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -112,6 +112,7 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
return true;
}
+
bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (sign_ != kNone) {
if (hour_ == kNone) hour_ = 0;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 07c1a0cce8..04f8a7a027 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -965,7 +965,7 @@ Object* Debug::Break(Arguments args) {
// Get the debug info (create it if it does not exist).
Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<SharedFunctionInfo>(frame->function()->shared());
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Find the break point where execution has stopped.
@@ -1348,8 +1348,7 @@ void Debug::FloodHandlerWithOneShot() {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
// Flood the function with the catch block with break points
- JSFunction* function = JSFunction::cast(frame->function());
- FloodWithOneShot(Handle<JSFunction>(function));
+ FloodWithOneShot(Handle<JSFunction>(frame->function()));
return;
}
}
@@ -1415,13 +1414,13 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// breakpoints.
frames_it.Advance();
// Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+ JSFunction* function = frames_it.frame()->function();
FloodWithOneShot(Handle<JSFunction>(function));
return;
}
// Get the debug info (create it if it does not exist).
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<JSFunction> function(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
@@ -1486,15 +1485,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
frames_it.Advance();
}
// Skip builtin functions on the stack.
- while (!frames_it.done() &&
- JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
+ while (!frames_it.done() && frames_it.frame()->function()->IsBuiltin()) {
frames_it.Advance();
}
// Step out: If there is a JavaScript caller frame, we need to
// flood it with breakpoints.
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
- JSFunction* function = JSFunction::cast(frames_it.frame()->function());
+ JSFunction* function = frames_it.frame()->function();
FloodWithOneShot(Handle<JSFunction>(function));
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
@@ -1811,6 +1809,7 @@ void Debug::ClearStepping() {
thread_local_.step_count_ = 0;
}
+
// Clears all the one-shot break points that are currently set. Normally this
// function is called each time a break point is hit as one shot break points
// are used to support stepping.
@@ -1907,7 +1906,7 @@ static void CollectActiveFunctionsFromThread(
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized()) {
- List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+ List<JSFunction*> functions(FLAG_max_inlining_levels + 1);
frame->GetFunctions(&functions);
for (int i = 0; i < functions.length(); i++) {
JSFunction* function = functions[i];
@@ -1915,7 +1914,7 @@ static void CollectActiveFunctionsFromThread(
function->shared()->code()->set_gc_metadata(active_code_marker);
}
} else if (frame->function()->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
active_functions->Add(Handle<JSFunction>(function));
function->shared()->code()->set_gc_metadata(active_code_marker);
@@ -1932,7 +1931,7 @@ static void RedirectActivationsToRecompiledCodeOnThread(
if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index f322e85b21..fd7c2829ad 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -43,7 +43,13 @@ namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
OS::CommitPageSize(),
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ NOT_EXECUTABLE,
+#else
EXECUTABLE,
+#endif
NULL);
}
@@ -186,7 +192,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
// Get the function and code from the frame.
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
Code* code = frame->LookupCode();
// Locate the deoptimization point in the code. As we are at a call the
@@ -542,6 +548,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (function->IsSmi()) {
function = NULL;
}
+ ASSERT(from != NULL);
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
if (bailout_type_ == Deoptimizer::SOFT) {
@@ -573,13 +580,11 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
switch (bailout_type_) {
case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
- ASSERT(from_ == NULL);
- return function->code();
case Deoptimizer::LAZY: {
Code* compiled_code =
isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
return (compiled_code == NULL)
- ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_))
+ ? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
case Deoptimizer::OSR: {
@@ -1609,7 +1614,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
JavaScriptFrame* frame = it->frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
+ Handle<JSFunction> function(frame->function(), isolate_);
Handle<JSObject> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
@@ -1619,11 +1624,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
if (arguments.is_null()) {
if (frame->has_adapted_arguments()) {
// Use the arguments adapter frame we just built to materialize the
- // arguments object. FunctionGetArguments can't throw an exception,
- // so cast away the doubt with an assert.
- arguments = Handle<JSObject>(JSObject::cast(
- Accessors::FunctionGetArguments(*function,
- NULL)->ToObjectUnchecked()));
+ // arguments object. FunctionGetArguments can't throw an exception.
+ arguments = Handle<JSObject>::cast(
+ Accessors::FunctionGetArguments(function));
values.RewindBy(length);
} else {
// Construct an arguments object and copy the parameters to a newly
@@ -2368,8 +2371,8 @@ void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
- uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
- if (loop_depth == loop_nesting_level) {
+ uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
+ if (static_cast<int>(loop_depth) == loop_nesting_level) {
// Loop back edge has the loop depth that we want to patch.
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
@@ -2400,8 +2403,8 @@ void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
- uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
- if (loop_depth <= loop_nesting_level) {
+ uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
+ if (static_cast<int>(loop_depth) <= loop_nesting_level) {
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
RevertInterruptCodeAt(unoptimized_code,
@@ -2432,13 +2435,13 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
- uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
- CHECK_LE(loop_depth, Code::kMaxLoopNestingMarker);
+ uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
+ CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- CHECK_EQ((loop_depth <= loop_nesting_level),
+ CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
@@ -3065,7 +3068,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
expression_stack_ = new Object*[expression_count_];
// Get the source position using the unoptimized code.
Address pc = reinterpret_cast<Address>(output_frame->GetPc());
- Code* code = Code::cast(deoptimizer->isolate()->heap()->FindCodeObject(pc));
+ Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
source_position_ = code->SourcePosition(pc);
for (int i = 0; i < expression_count_; i++) {
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 5ec1dcb77c..fa8ae1ffc8 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -360,6 +360,8 @@ void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
return 0;
}
+
+
void Disassembler::Decode(FILE* f, Code* code) {}
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 7b1651a953..213aa35c85 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -83,6 +83,7 @@ ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
return fast_elements_kind_sequence.Get()[sequence_number];
}
+
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 38e7a3bec9..d7b9cf5d59 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -865,6 +865,7 @@ Object* Execution::DebugBreakHelper() {
return isolate->heap()->undefined_value();
}
+
void Execution::ProcessDebugMessages(bool debug_command_only) {
Isolate* isolate = Isolate::Current();
// Clear the debug command request flag.
diff --git a/deps/v8/src/extensions/i18n/break-iterator.cc b/deps/v8/src/extensions/i18n/break-iterator.cc
index 1225360fb7..0681e264ab 100644
--- a/deps/v8/src/extensions/i18n/break-iterator.cc
+++ b/deps/v8/src/extensions/i18n/break-iterator.cc
@@ -82,6 +82,7 @@ void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
object->Dispose(isolate);
}
+
// Throws a JavaScript exception.
static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
// Returns undefined, and schedules an exception to be thrown.
@@ -90,6 +91,7 @@ static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
"that is not a BreakIterator.")));
}
+
// Deletes the old value and sets the adopted text in corresponding
// JavaScript object.
icu::UnicodeString* ResetAdoptedText(
diff --git a/deps/v8/src/extensions/i18n/collator.cc b/deps/v8/src/extensions/i18n/collator.cc
index 4ffa4145fb..61b1d63e5c 100644
--- a/deps/v8/src/extensions/i18n/collator.cc
+++ b/deps/v8/src/extensions/i18n/collator.cc
@@ -76,6 +76,7 @@ void Collator::DeleteCollator(v8::Isolate* isolate,
object->Dispose(isolate);
}
+
// Throws a JavaScript exception.
static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
// Returns undefined, and schedules an exception to be thrown.
@@ -84,11 +85,13 @@ static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
"that is not a Collator.")));
}
+
// When there's an ICU error, throw a JavaScript error with |message|.
static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
}
+
// static
void Collator::JSInternalCompare(
const v8::FunctionCallbackInfo<v8::Value>& args) {
diff --git a/deps/v8/src/extensions/i18n/i18n-extension.cc b/deps/v8/src/extensions/i18n/i18n-extension.cc
index eb7652eae8..1c77b8899f 100644
--- a/deps/v8/src/extensions/i18n/i18n-extension.cc
+++ b/deps/v8/src/extensions/i18n/i18n-extension.cc
@@ -108,6 +108,7 @@ v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
return v8::Handle<v8::FunctionTemplate>();
}
+
void Extension::Register() {
static Extension i18n_extension;
static v8::DeclareExtension extension_declaration(&i18n_extension);
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.cc b/deps/v8/src/extensions/i18n/i18n-utils.cc
index d8d3c12aff..b720329f8b 100644
--- a/deps/v8/src/extensions/i18n/i18n-utils.cc
+++ b/deps/v8/src/extensions/i18n/i18n-utils.cc
@@ -42,6 +42,7 @@ void Utils::StrNCopy(char* dest, int length, const char* src) {
dest[length - 1] = '\0';
}
+
// static
bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
icu::UnicodeString* output) {
@@ -54,6 +55,7 @@ bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
return true;
}
+
// static
bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
@@ -74,6 +76,7 @@ bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
return false;
}
+
// static
bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
@@ -95,6 +98,7 @@ bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
return false;
}
+
// static
bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
@@ -116,6 +120,7 @@ bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
return false;
}
+
// static
void Utils::AsciiToUChar(const char* source,
int32_t source_length,
@@ -135,6 +140,7 @@ void Utils::AsciiToUChar(const char* source,
target[length - 1] = 0x0u;
}
+
// static
// Chrome Linux doesn't like static initializers in class, so we create
// template on demand.
@@ -153,6 +159,7 @@ v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
return v8::Local<v8::ObjectTemplate>::New(isolate, icu_template);
}
+
// static
// Chrome Linux doesn't like static initializers in class, so we create
// template on demand. This one has 2 internal fields.
diff --git a/deps/v8/src/extensions/i18n/locale.cc b/deps/v8/src/extensions/i18n/locale.cc
index b32cc30b16..6b6f9ac314 100644
--- a/deps/v8/src/extensions/i18n/locale.cc
+++ b/deps/v8/src/extensions/i18n/locale.cc
@@ -82,6 +82,7 @@ void JSCanonicalizeLanguageTag(
args.GetReturnValue().Set(v8::String::New(result));
}
+
void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Expect service name which is a string.
if (args.Length() != 1 || !args[0]->IsString()) {
@@ -131,6 +132,7 @@ void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(locales);
}
+
void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
icu::Locale default_locale;
@@ -147,6 +149,7 @@ void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8::String::New("und"));
}
+
void JSGetLanguageTagVariants(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
diff --git a/deps/v8/src/extensions/i18n/number-format.cc b/deps/v8/src/extensions/i18n/number-format.cc
index 2240b0846b..136471561c 100644
--- a/deps/v8/src/extensions/i18n/number-format.cc
+++ b/deps/v8/src/extensions/i18n/number-format.cc
@@ -148,10 +148,10 @@ void NumberFormat::JSInternalParse(
args.GetReturnValue().Set(result.getDouble());
return;
case icu::Formattable::kLong:
- args.GetReturnValue().Set(v8::Number::New(result.getLong()));
+ args.GetReturnValue().Set(result.getLong());
return;
case icu::Formattable::kInt64:
- args.GetReturnValue().Set(v8::Number::New(result.getInt64()));
+ args.GetReturnValue().Set(static_cast<double>(result.getInt64()));
return;
default:
return;
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 63b2379692..b135a9c670 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -178,6 +178,7 @@ Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
String);
}
+
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeString(Handle<String> string) {
CALL_HEAP_FUNCTION(isolate(),
@@ -185,6 +186,7 @@ Handle<String> Factory::InternalizeString(Handle<String> string) {
String);
}
+
Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->InternalizeOneByteString(string),
@@ -517,6 +519,14 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
}
+Handle<AllocationSite> Factory::NewAllocationSite() {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateAllocationSite(),
+ AllocationSite);
+}
+
+
Handle<Map> Factory::NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind) {
@@ -925,7 +935,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
initial_map->set_constructor(*function);
}
- SetPrototypeProperty(function, prototype);
+ JSFunction::SetPrototype(function, prototype);
return function;
}
@@ -1235,6 +1245,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
JSMessageObject);
}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateSharedFunctionInfo(*name),
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 0cb7157729..dc7933aa20 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -243,6 +243,8 @@ class Factory {
Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
+ Handle<AllocationSite> NewAllocationSite();
+
Handle<Map> NewMap(
InstanceType type,
int instance_size,
@@ -564,6 +566,82 @@ Handle<Object> Factory::NewNumberFromSize(size_t value,
}
+// Used to "safely" transition from pointer-based runtime code to Handle-based
+// runtime code. When a GC happens during the called Handle-based code, a
+// failure object is returned to the pointer-based code to cause it abort and
+// re-trigger a gc of it's own. Since this double-gc will cause the Handle-based
+// code to be called twice, it must be idempotent.
+class IdempotentPointerToHandleCodeTrampoline {
+ public:
+ explicit IdempotentPointerToHandleCodeTrampoline(Isolate* isolate)
+ : isolate_(isolate) {}
+
+ template<typename R>
+ MUST_USE_RESULT MaybeObject* Call(R (*function)()) {
+ int collections = isolate_->heap()->gc_count();
+ (*function)();
+ return (collections == isolate_->heap()->gc_count())
+ ? isolate_->heap()->true_value()
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(R (*function)()) {
+ int collections = isolate_->heap()->gc_count();
+ Object* result = (*function)();
+ return (collections == isolate_->heap()->gc_count())
+ ? result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1>
+ MUST_USE_RESULT MaybeObject* Call(R (*function)(P1), P1 p1) {
+ int collections = isolate_->heap()->gc_count();
+ (*function)(p1);
+ return (collections == isolate_->heap()->gc_count())
+ ? isolate_->heap()->true_value()
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1),
+ P1 p1) {
+ int collections = isolate_->heap()->gc_count();
+ Object* result = (*function)(p1);
+ return (collections == isolate_->heap()->gc_count())
+ ? result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1, typename P2>
+ MUST_USE_RESULT MaybeObject* Call(
+ R (*function)(P1, P2),
+ P1 p1,
+ P2 p2) {
+ int collections = isolate_->heap()->gc_count();
+ (*function)(p1, p2);
+ return (collections == isolate_->heap()->gc_count())
+ ? isolate_->heap()->true_value()
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ template<typename R, typename P1, typename P2>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1, P2),
+ P1 p1,
+ P2 p2) {
+ int collections = isolate_->heap()->gc_count();
+ Object* result = (*function)(p1, p2);
+ return (collections == isolate_->heap()->gc_count())
+ ? result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
} } // namespace v8::internal
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index a0f907db34..63cf66313c 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -171,6 +171,8 @@ DEFINE_bool(harmony_array_buffer, false,
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
+DEFINE_bool(harmony_numeric_literals, false,
+ "enable harmony numeric literals (0o77, 0b11)")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@@ -180,6 +182,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
+DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// TODO[dslomov] add harmony => harmony_typed_arrays
@@ -187,7 +190,7 @@ DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
+DEFINE_bool(compiled_transitions, true, "use optimizing compiler to "
"generate array elements transition stubs")
DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
"generate keyed store stubs")
@@ -195,6 +198,9 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
+// TODO(hpayer): We will remove this flag as soon as we have pretenuring
+// support for specific allocation sites.
+DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -209,17 +215,19 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
-DEFINE_string(hydrogen_filter, "", "optimization filter")
+DEFINE_string(hydrogen_filter, "*", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
+DEFINE_bool(use_allocation_folding, true, "use allocation folding")
+DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_int(max_inlined_nodes, 196,
"maximum number of AST nodes considered for a single inlining")
-DEFINE_int(max_inlined_nodes_cumulative, 196,
+DEFINE_int(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
@@ -236,6 +244,7 @@ DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
+DEFINE_bool(trace_allocation_folding, false, "trace allocation folding")
DEFINE_bool(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
DEFINE_bool(trace_migration, false, "trace object migration")
@@ -248,6 +257,7 @@ DEFINE_int(deopt_every_n_times,
DEFINE_int(deopt_every_n_garbage_collections,
0,
"deoptimize every n garbage collections")
+DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
@@ -348,6 +358,8 @@ DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
"enable use of VFP3 instructions if available")
DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_bool(enable_neon, true,
+ "enable use of NEON instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
DEFINE_bool(enable_movw_movt, false,
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 8d10645d1d..d097ed1dbb 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -274,10 +274,8 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
}
-inline Object* JavaScriptFrame::function() const {
- Object* result = function_slot_object();
- ASSERT(result->IsJSFunction());
- return result;
+inline JSFunction* JavaScriptFrame::function() const {
+ return JSFunction::cast(function_slot_object());
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 0408aa9074..890e77ad63 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -202,9 +202,10 @@ void StackTraceFrameIterator::Advance() {
}
}
+
bool StackTraceFrameIterator::IsValidFrame() {
if (!frame()->function()->IsJSFunction()) return false;
- Object* script = JSFunction::cast(frame()->function())->shared()->script();
+ Object* script = frame()->function()->shared()->script();
// Don't show functions from native scripts to user.
return (script->IsScript() &&
Script::TYPE_NATIVE != Script::cast(script)->type()->value());
@@ -672,7 +673,7 @@ void StubFrame::Iterate(ObjectVisitor* v) const {
Code* StubFrame::unchecked_code() const {
- return static_cast<Code*>(isolate()->heap()->FindCodeObject(pc()));
+ return static_cast<Code*>(isolate()->FindCodeObject(pc()));
}
@@ -723,8 +724,7 @@ int JavaScriptFrame::GetArgumentsLength() const {
Code* JavaScriptFrame::unchecked_code() const {
- JSFunction* function = JSFunction::cast(this->function());
- return function->code();
+ return function()->code();
}
@@ -732,8 +732,7 @@ int JavaScriptFrame::GetNumberOfIncomingArguments() const {
ASSERT(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- JSFunction* function = JSFunction::cast(this->function());
- return function->shared()->formal_parameter_count();
+ return function()->shared()->formal_parameter_count();
}
@@ -744,7 +743,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
- functions->Add(JSFunction::cast(function()));
+ functions->Add(function());
}
@@ -753,7 +752,7 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
Code* code_pointer = LookupCode();
int offset = static_cast<int>(pc() - code_pointer->address());
FrameSummary summary(receiver(),
- JSFunction::cast(function()),
+ function(),
code_pointer,
offset,
IsConstructor());
@@ -774,40 +773,35 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
- Object* maybe_fun = frame->function();
- if (maybe_fun->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(maybe_fun);
- fun->PrintName();
- Code* js_code = frame->unchecked_code();
- Address pc = frame->pc();
- int code_offset =
- static_cast<int>(pc - js_code->instruction_start());
- PrintF("+%d", code_offset);
- SharedFunctionInfo* shared = fun->shared();
- if (print_line_number) {
- Code* code = Code::cast(
- v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
- int source_pos = code->SourcePosition(pc);
- Object* maybe_script = shared->script();
- if (maybe_script->IsScript()) {
- Handle<Script> script(Script::cast(maybe_script));
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- Object* script_name_raw = script->name();
- if (script_name_raw->IsString()) {
- String* script_name = String::cast(script->name());
- SmartArrayPointer<char> c_script_name =
- script_name->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL);
- PrintF(file, " at %s:%d", *c_script_name, line);
- } else {
- PrintF(file, " at <unknown>:%d", line);
- }
+ JSFunction* fun = frame->function();
+ fun->PrintName();
+ Code* js_code = frame->unchecked_code();
+ Address pc = frame->pc();
+ int code_offset =
+ static_cast<int>(pc - js_code->instruction_start());
+ PrintF("+%d", code_offset);
+ SharedFunctionInfo* shared = fun->shared();
+ if (print_line_number) {
+ Code* code = Code::cast(
+ v8::internal::Isolate::Current()->FindCodeObject(pc));
+ int source_pos = code->SourcePosition(pc);
+ Object* maybe_script = shared->script();
+ if (maybe_script->IsScript()) {
+ Handle<Script> script(Script::cast(maybe_script));
+ int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ Object* script_name_raw = script->name();
+ if (script_name_raw->IsString()) {
+ String* script_name = String::cast(script->name());
+ SmartArrayPointer<char> c_script_name =
+ script_name->ToCString(DISALLOW_NULLS,
+ ROBUST_STRING_TRAVERSAL);
+ PrintF(file, " at %s:%d", *c_script_name, line);
} else {
- PrintF(file, " at <unknown>:<unknown>");
+ PrintF(file, " at <unknown>:%d", line);
}
+ } else {
+ PrintF(file, " at <unknown>:<unknown>");
}
- } else {
- PrintF("<unknown>");
}
if (print_args) {
@@ -912,7 +906,7 @@ void FrameSummary::Print() {
JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
int literal_id) {
if (literal_id == Translation::kSelfLiteralId) {
- return JSFunction::cast(function());
+ return function();
}
return JSFunction::cast(literal_array->get(literal_id));
@@ -1017,7 +1011,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
int* deopt_index) {
ASSERT(is_optimized());
- JSFunction* opt_function = JSFunction::cast(function());
+ JSFunction* opt_function = function();
Code* code = opt_function->code();
// The code object may have been replaced by lazy deoptimization. Fall
@@ -1131,7 +1125,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
int index) const {
HandleScope scope(isolate());
Object* receiver = this->receiver();
- Object* function = this->function();
+ JSFunction* function = this->function();
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
@@ -1145,29 +1139,27 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// or context slots.
Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
- if (function->IsJSFunction()) {
- Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
- scope_info = Handle<ScopeInfo>(shared->scope_info());
- Object* script_obj = shared->script();
- if (script_obj->IsScript()) {
- Handle<Script> script(Script::cast(script_obj));
- accumulator->Add(" [");
- accumulator->PrintName(script->name());
-
- Address pc = this->pc();
- if (code != NULL && code->kind() == Code::FUNCTION &&
- pc >= code->instruction_start() && pc < code->instruction_end()) {
- int source_pos = code->SourcePosition(pc);
- int line = GetScriptLineNumberSafe(script, source_pos) + 1;
- accumulator->Add(":%d", line);
- } else {
- int function_start_pos = shared->start_position();
- int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
- accumulator->Add(":~%d", line);
- }
-
- accumulator->Add("] ");
+ Handle<SharedFunctionInfo> shared(function->shared());
+ scope_info = Handle<ScopeInfo>(shared->scope_info());
+ Object* script_obj = shared->script();
+ if (script_obj->IsScript()) {
+ Handle<Script> script(Script::cast(script_obj));
+ accumulator->Add(" [");
+ accumulator->PrintName(script->name());
+
+ Address pc = this->pc();
+ if (code != NULL && code->kind() == Code::FUNCTION &&
+ pc >= code->instruction_start() && pc < code->instruction_end()) {
+ int source_pos = code->SourcePosition(pc);
+ int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ accumulator->Add(":%d", line);
+ } else {
+ int function_start_pos = shared->start_position();
+ int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
+ accumulator->Add(":~%d", line);
}
+
+ accumulator->Add("] ");
}
accumulator->Add("(this=%o", receiver);
@@ -1257,7 +1249,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// Print details about the function.
if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
- SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
+ SharedFunctionInfo* shared = function->shared();
accumulator->Add("--------- s o u r c e c o d e ---------\n");
shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
accumulator->Add("\n-----------------------------------------\n");
@@ -1272,10 +1264,8 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
int index) const {
int actual = ComputeParametersCount();
int expected = -1;
- Object* function = this->function();
- if (function->IsJSFunction()) {
- expected = JSFunction::cast(function)->shared()->formal_parameter_count();
- }
+ JSFunction* function = this->function();
+ expected = function->shared()->formal_parameter_count();
PrintIndex(accumulator, mode, index);
accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
@@ -1568,6 +1558,7 @@ void SetUpJSCallerSavedCodeData() {
ASSERT(i == kNumJSCallerSaved);
}
+
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
return caller_saved_code_data.reg_code[n];
@@ -1600,6 +1591,7 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
return NULL;
}
+
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
ZoneList<StackFrame*> list(10, zone);
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 0a5b609442..7e667a6acd 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -543,7 +543,7 @@ class JavaScriptFrame: public StandardFrame {
virtual Type type() const { return JAVA_SCRIPT; }
// Accessors.
- inline Object* function() const;
+ inline JSFunction* function() const;
inline Object* receiver() const;
inline void set_receiver(Object* value);
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index c1350a14d9..6d802e965d 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -76,12 +76,15 @@ void BreakableStatementChecker::VisitExportDeclaration(
void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
}
+
void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
}
+
void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
}
+
void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
}
@@ -376,7 +379,7 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
for (unsigned i = 0; i < length; ++i) {
__ dd(back_edges_[i].id.ToInt());
__ dd(back_edges_[i].pc);
- __ db(back_edges_[i].loop_depth);
+ __ dd(back_edges_[i].loop_depth);
}
return offset;
}
@@ -1602,7 +1605,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
return true;
}
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 7e6450655f..a9db54e32c 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -136,7 +136,7 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
- static const int kBackEdgeEntrySize = 2 * kIntSize + kOneByteSize;
+ static const int kBackEdgeEntrySize = 3 * kIntSize;
private:
class Breakable;
@@ -648,7 +648,7 @@ class FullCodeGenerator: public AstVisitor {
struct BackEdgeEntry {
BailoutId id;
unsigned pc;
- uint8_t loop_depth;
+ uint32_t loop_depth;
};
struct TypeFeedbackCellEntry {
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 825d1e7c1c..74db807fb3 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -2015,6 +2015,7 @@ void GDBJITInterface::AddCode(Handle<Name> name,
}
}
+
static void AddUnwindInfo(CodeDescription* desc) {
#if V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index b601e99900..5c65635d0f 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -634,6 +634,11 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
+ if (!node->IsRetainer()) {
+ // Free nodes do not have weak callbacks. Do not use them to compute
+ // the next_gc_likely_to_collect_more.
+ continue;
+ }
// Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
@@ -656,6 +661,11 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (!it.node()->IsRetainer()) {
+ // Free nodes do not have weak callbacks. Do not use them to compute
+ // the next_gc_likely_to_collect_more.
+ continue;
+ }
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
@@ -799,6 +809,7 @@ void GlobalHandles::PrintStats() {
PrintF(" # total = %d\n", total);
}
+
void GlobalHandles::Print() {
PrintF("Global handles:\n");
for (NodeIterator it(this); !it.done(); it.Advance()) {
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index baacf5226e..e695e94d4e 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -89,12 +89,6 @@ namespace internal {
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_HOST_CAN_READ_UNALIGNED 1
-#endif
#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
@@ -102,6 +96,16 @@ namespace internal {
#error Host architecture was not detected as supported by v8
#endif
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+# ifndef CAN_USE_VFP3_INSTRUCTIONS
+# define CAN_USE_VFP3_INSTRUCTIONS
+# endif
+#endif
+
+
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
@@ -323,11 +327,18 @@ F FUNCTION_CAST(Address addr) {
}
+#if __cplusplus >= 201103L
+#define DISALLOW_BY_DELETE = delete
+#else
+#define DISALLOW_BY_DELETE
+#endif
+
+
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) DISALLOW_BY_DELETE; \
+ void operator=(const TypeName&) DISALLOW_BY_DELETE
// A macro to disallow all the implicit constructors, namely the
@@ -337,7 +348,7 @@ F FUNCTION_CAST(Address addr) {
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
+ TypeName() DISALLOW_BY_DELETE; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7d4b25f10f..48114d91a7 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -169,12 +169,6 @@ void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
}
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
- CALL_HEAP_FUNCTION_VOID(func->GetIsolate(),
- func->SetPrototype(*value));
-}
-
-
static int ExpectedNofPropertiesFromEstimate(int estimate) {
// If no properties are added in the constructor, they are more likely
// to be added later.
@@ -499,6 +493,7 @@ int GetScriptLineNumber(Handle<Script> script, int code_pos) {
return right + script->line_offset()->value();
}
+
// Convert code position into column number.
int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
int line_number = GetScriptLineNumber(script, code_pos);
@@ -513,6 +508,7 @@ int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
return code_pos - (prev_line_end_pos + 1);
}
+
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
DisallowHeapAllocation no_allocation;
if (!script->line_ends()->IsUndefined()) {
@@ -648,6 +644,10 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ if (isolate->has_scheduled_exception()) {
+ isolate->PromoteScheduledException();
+ *threw = true;
+ }
break;
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 5976b758e1..90db7d1212 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -106,6 +106,13 @@ inline Handle<T> handle(T* t, Isolate* isolate) {
}
+// Convenience wrapper.
+template<class T>
+inline Handle<T> handle(T* t) {
+ return Handle<T>(t, t->GetIsolate());
+}
+
+
class DeferredHandles;
class HandleScopeImplementer;
@@ -306,9 +313,6 @@ Handle<String> SubString(Handle<String> str,
// Sets the expected number of properties for the function's instances.
void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-// Sets the prototype property for a function instance.
-void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
-
// Sets the expected number of properties based on estimate from compiler.
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 92ae8e54b5..97c56df0c0 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -495,10 +495,9 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
ElementsKind elements_kind,
- Handle<Object> allocation_site_payload) {
+ Handle<AllocationSite> allocation_site) {
return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
- allocation_site_payload,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
+ allocation_site, DONT_INITIALIZE_ARRAY_ELEMENTS);
}
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index e517df4a1a..e66af3364d 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -94,6 +94,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
return TakeSnapshot(snapshots_->names()->GetName(name), control, resolver);
}
+
void HeapProfiler::StartHeapObjectsTracking() {
snapshots_->StartHeapObjectsTracking();
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index f959aee00e..3b1f235e75 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -459,6 +459,7 @@ void HeapObjectsMap::StopHeapObjectsTracking() {
time_intervals_.Clear();
}
+
void HeapObjectsMap::UpdateHeapObjectsMap() {
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::UpdateHeapObjectsMap");
@@ -572,7 +573,6 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
: is_tracking_objects_(false),
- token_enumerator_(new TokenEnumerator()),
ids_(heap) {
}
@@ -583,7 +583,6 @@ static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete token_enumerator_;
snapshots_.Iterate(DeleteHeapSnapshot);
}
@@ -893,7 +892,12 @@ class IndexedReferencesExtractor : public ObjectVisitor {
: generator_(generator),
parent_obj_(parent_obj),
parent_(parent),
- next_index_(1) {
+ next_index_(0) {
+ }
+ void VisitCodeEntry(Address entry_address) {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ generator_->SetInternalReference(parent_obj_, parent_, "code", code);
+ generator_->TagObject(code, "(code)");
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
@@ -931,7 +935,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (heap_entry == NULL) return; // No interest in this object.
int entry = heap_entry->index();
- bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
} else if (obj->IsJSObject()) {
@@ -954,17 +957,17 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractCodeReferences(entry, Code::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
- extract_indexed_refs = false;
} else if (obj->IsPropertyCell()) {
- ExtractPropertyCellReferences(
- entry, PropertyCell::cast(obj));
- extract_indexed_refs = false;
- }
- if (extract_indexed_refs) {
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
+ ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
+ } else if (obj->IsAllocationSite()) {
+ ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj));
}
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+
+ // Extract unvisited fields as hidden references and restore tags
+ // of visited fields.
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
}
@@ -999,6 +1002,9 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetPropertyReference(
obj, entry,
heap_->prototype_string(), js_fun->prototype());
+ SetInternalReference(
+ obj, entry, "initial_map", proto_or_map,
+ JSFunction::kPrototypeOrInitialMapOffset);
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
@@ -1109,22 +1115,13 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
- SetInternalReference(map, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(map, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
if (map->HasTransitionArray()) {
TransitionArray* transitions = map->transitions();
-
+ int transitions_entry = GetEntry(transitions)->index();
Object* back_pointer = transitions->back_pointer_storage();
- TagObject(transitions->back_pointer_storage(), "(back pointer)");
- SetInternalReference(transitions, entry,
- "backpointer", back_pointer,
- TransitionArray::kBackPointerStorageOffset);
- IndexedReferencesExtractor transitions_refs(this, transitions, entry);
- transitions->Iterate(&transitions_refs);
-
+ TagObject(back_pointer, "(back pointer)");
+ SetInternalReference(transitions, transitions_entry,
+ "back_pointer", back_pointer);
TagObject(transitions, "(transition array)");
SetInternalReference(map, entry,
"transitions", transitions,
@@ -1133,7 +1130,7 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
Object* back_pointer = map->GetBackPointer();
TagObject(back_pointer, "(back pointer)");
SetInternalReference(map, entry,
- "backpointer", back_pointer,
+ "back_pointer", back_pointer,
Map::kTransitionsOrBackPointerOffset);
}
DescriptorArray* descriptors = map->instance_descriptors();
@@ -1145,6 +1142,15 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
SetInternalReference(map, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
+ SetInternalReference(map, entry,
+ "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(map, entry,
+ "constructor", map->constructor(),
+ Map::kConstructorOffset);
+ TagObject(map->dependent_code(), "(dependent code)");
+ SetInternalReference(map, entry,
+ "dependent_code", map->dependent_code(),
+ Map::kDependentCodeOffset);
}
@@ -1254,14 +1260,24 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
- SetInternalReference(cell, entry, "value", cell->value());
+ SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
}
void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
PropertyCell* cell) {
- SetInternalReference(cell, entry, "value", cell->value());
- SetInternalReference(cell, entry, "type", cell->type());
+ ExtractCellReferences(entry, cell);
+ SetInternalReference(cell, entry, "type", cell->type(),
+ PropertyCell::kTypeOffset);
+ SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
+ PropertyCell::kDependentCodeOffset);
+}
+
+
+void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
+ AllocationSite* site) {
+ SetInternalReference(site, entry, "transition_info", site->transition_info(),
+ AllocationSite::kTransitionInfoOffset);
}
@@ -1569,6 +1585,7 @@ void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
String* reference_name,
Object* child_obj,
int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
@@ -1584,6 +1601,7 @@ void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
int parent_entry,
const char* reference_name,
Object* child_obj) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kShortcut,
@@ -1598,6 +1616,7 @@ void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
int parent_entry,
int index,
Object* child_obj) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedReference(HeapGraphEdge::kElement,
@@ -1613,6 +1632,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
const char* reference_name,
Object* child_obj,
int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == NULL) return;
if (IsEssentialObject(child_obj)) {
@@ -1630,6 +1650,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
int index,
Object* child_obj,
int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == NULL) return;
if (IsEssentialObject(child_obj)) {
@@ -1646,6 +1667,7 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
int parent_entry,
int index,
Object* child_obj) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL && IsEssentialObject(child_obj)) {
filler_->SetIndexedReference(HeapGraphEdge::kHidden,
@@ -1661,14 +1683,16 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
int index,
Object* child_obj,
int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
filler_->SetIndexedReference(HeapGraphEdge::kWeak,
parent_entry,
index,
child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -1678,6 +1702,7 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
Object* child_obj,
const char* name_format_string,
int field_offset) {
+ ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
HeapGraphEdge::Type type =
@@ -1963,6 +1988,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
embedder_queried_ = true;
}
+
void NativeObjectsExplorer::FillImplicitReferences() {
Isolate* isolate = Isolate::Current();
List<ImplicitRefGroup*>* groups =
@@ -2588,6 +2614,7 @@ static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
w->AddCharacter(hex_chars[u & 0xf]);
}
+
void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
writer_->AddCharacter('\n');
writer_->AddCharacter('\"');
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index cd1ec29242..31d808856d 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -306,7 +306,6 @@ class HeapSnapshotsCollection {
void RemoveSnapshot(HeapSnapshot* snapshot);
StringsStorage* names() { return &names_; }
- TokenEnumerator* token_enumerator() { return token_enumerator_; }
SnapshotObjectId FindObjectId(Address object_addr) {
return ids_.FindEntry(object_addr);
@@ -325,7 +324,6 @@ class HeapSnapshotsCollection {
bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
StringsStorage names_;
- TokenEnumerator* token_enumerator_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
@@ -462,6 +460,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractCodeReferences(int entry, Code* code);
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
+ void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 9d1ac8c1ed..dff217a991 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -74,7 +74,7 @@ Heap::Heap()
#define LUMP_OF_MEMORY MB
code_range_size_(0),
#endif
-#if defined(ANDROID)
+#if defined(ANDROID) || V8_TARGET_ARCH_MIPS
reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
@@ -182,6 +182,7 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
native_contexts_list_ = NULL;
array_buffers_list_ = Smi::FromInt(0);
+ allocation_sites_list_ = Smi::FromInt(0);
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
@@ -613,8 +614,10 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
+ const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
+ if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
+ attempt + 1 >= kMinNumberOfAttempts) {
break;
}
}
@@ -1108,12 +1111,6 @@ void Heap::MarkCompactPrologue() {
}
-Object* Heap::FindCodeObject(Address a) {
- return isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(a);
-}
-
-
// Helper class for copying HeapObjects
class ScavengeVisitor: public ObjectVisitor {
public:
@@ -1662,6 +1659,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
mark_compact_collector()->is_compacting();
ProcessArrayBuffers(retainer, record_slots);
ProcessNativeContexts(retainer, record_slots);
+ ProcessAllocationSites(retainer, record_slots);
}
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
@@ -1755,6 +1753,39 @@ void Heap::TearDownArrayBuffers() {
}
+template<>
+struct WeakListVisitor<AllocationSite> {
+ static void SetWeakNext(AllocationSite* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* WeakNext(AllocationSite* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(Heap* heap,
+ AllocationSite* array_buffer,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {}
+
+ static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
+
+ static int WeakNextOffset() {
+ return AllocationSite::kWeakNextOffset;
+ }
+};
+
+
+void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* allocation_site_obj =
+ VisitWeakList<AllocationSite>(this,
+ allocation_sites_list(),
+ retainer, record_slots);
+ set_allocation_sites_list(allocation_site_obj);
+}
+
+
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
@@ -1930,6 +1961,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
+ table_.Register(kVisitJSWeakSet,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
table_.Register(kVisitJSArrayBuffer,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@@ -2855,9 +2890,9 @@ MaybeObject* Heap::AllocateCell(Object* value) {
MaybeObject* Heap::AllocatePropertyCell(Object* value) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawPropertyCell();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawPropertyCell();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+
HeapObject::cast(result)->set_map_no_write_barrier(
global_property_cell_map());
PropertyCell* cell = PropertyCell::cast(result);
@@ -2865,6 +2900,8 @@ MaybeObject* Heap::AllocatePropertyCell(Object* value) {
SKIP_WRITE_BARRIER);
cell->set_value(value);
cell->set_type(Type::None());
+ maybe_result = cell->SetValueInferType(value);
+ if (maybe_result->IsFailure()) return maybe_result;
return result;
}
@@ -2878,6 +2915,21 @@ MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
}
+MaybeObject* Heap::AllocateAllocationSite() {
+ Object* result;
+ MaybeObject* maybe_result = Allocate(allocation_site_map(),
+ OLD_POINTER_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ AllocationSite* site = AllocationSite::cast(result);
+ site->Initialize();
+
+ // Link the site
+ site->set_weak_next(allocation_sites_list());
+ set_allocation_sites_list(site);
+ return result;
+}
+
+
MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
@@ -3158,6 +3210,11 @@ bool Heap::CreateInitialObjects() {
SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_observed_symbol(Symbol::cast(obj));
+
// Handling of script id generation is in Factory::NewScript.
set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
@@ -3501,6 +3558,7 @@ Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
}
}
+
ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
return ExternalArray::cast(
roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
@@ -3563,7 +3621,6 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
- share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture).
@@ -4183,23 +4240,23 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
- Handle<Object> allocation_site_info_payload) {
+ Handle<AllocationSite> allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
// If allocation failures are disallowed, we may allocate in a different
// space when new space is full and the object is not a large object.
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- int size = map->instance_size() + AllocationSiteInfo::kSize;
+ int size = map->instance_size() + AllocationMemento::kSize;
Object* result;
MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
if (!maybe_result->ToObject(&result)) return maybe_result;
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_no_write_barrier(map);
- AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
- alloc_info->set_map_no_write_barrier(allocation_site_info_map());
- alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
return result;
}
@@ -4429,10 +4486,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
+ int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
@@ -4458,7 +4512,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
- Handle<Object> allocation_site_info_payload) {
+ Handle<AllocationSite> allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4469,10 +4523,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size =
- map->pre_allocated_property_fields() +
- map->unused_property_fields() -
- map->inobject_properties();
+ int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
@@ -4483,8 +4534,8 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
AllocationSpace space = NEW_SPACE;
if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
Object* obj;
- MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
- allocation_site_info_payload);
+ MaybeObject* maybe_obj =
+ AllocateWithAllocationSite(map, space, allocation_site);
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
@@ -4520,7 +4571,7 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
- Handle<Object> allocation_site_info_payload) {
+ Handle<AllocationSite> allocation_site) {
// Allocate the initial map if absent.
if (!constructor->has_initial_map()) {
Object* initial_map;
@@ -4534,8 +4585,7 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
// advice
Map* initial_map = constructor->initial_map();
- Cell* cell = Cell::cast(*allocation_site_info_payload);
- Smi* smi = Smi::cast(cell->value());
+ Smi* smi = Smi::cast(allocation_site->transition_info());
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
if (to_kind != initial_map->elements_kind()) {
@@ -4543,13 +4593,13 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
// Possibly alter the mode, since we found an updated elements kind
// in the type info cell.
- mode = AllocationSiteInfo::GetMode(to_kind);
+ mode = AllocationSite::GetMode(to_kind);
}
MaybeObject* result;
if (mode == TRACK_ALLOCATION_SITE) {
result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
- allocation_site_info_payload);
+ allocation_site);
} else {
result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
}
@@ -4644,10 +4694,10 @@ MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
ElementsKind elements_kind,
int length,
int capacity,
- Handle<Object> allocation_site_payload,
+ Handle<AllocationSite> allocation_site,
ArrayStorageAllocationMode mode) {
MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
- allocation_site_payload);
+ allocation_site);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
return AllocateJSArrayStorage(array, length, capacity, mode);
@@ -4896,7 +4946,9 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
}
-MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
+MaybeObject* Heap::CopyJSObjectWithAllocationSite(
+ JSObject* source,
+ AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4916,8 +4968,8 @@ MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
if (always_allocate()) {
// We'll only track origin if we are certain to allocate in new space
const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationSiteInfo::kSize;
+ if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
+ adjusted_object_size += AllocationMemento::kSize;
}
{ MaybeObject* maybe_clone =
@@ -4930,7 +4982,7 @@ MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
object_size);
// Update write barrier for all fields that lie beyond the header.
int write_barrier_offset = adjusted_object_size > object_size
- ? JSArray::kSize + AllocationSiteInfo::kSize
+ ? JSArray::kSize + AllocationMemento::kSize
: JSObject::kHeaderSize;
if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
RecordWrites(clone_address,
@@ -4941,17 +4993,17 @@ MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
// Track allocation site information, if we failed to allocate it inline.
if (InNewSpace(clone) &&
adjusted_object_size == object_size) {
- MaybeObject* maybe_alloc_info =
- AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
- AllocationSiteInfo* alloc_info;
- if (maybe_alloc_info->To(&alloc_info)) {
- alloc_info->set_map_no_write_barrier(allocation_site_info_map());
- alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
+ MaybeObject* maybe_alloc_memento =
+ AllocateStruct(ALLOCATION_MEMENTO_TYPE);
+ AllocationMemento* alloc_memento;
+ if (maybe_alloc_memento->To(&alloc_memento)) {
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
}
} else {
wb_mode = SKIP_WRITE_BARRIER;
- adjusted_object_size += AllocationSiteInfo::kSize;
+ adjusted_object_size += AllocationMemento::kSize;
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
@@ -4965,10 +5017,10 @@ MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
}
if (adjusted_object_size > object_size) {
- AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
- alloc_info->set_map_no_write_barrier(allocation_site_info_map());
- alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
SLOW_ASSERT(
@@ -5231,6 +5283,7 @@ static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
String::WriteToFlat(s, chars, 0, len);
}
+
static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
ASSERT(s->length() == len);
String::WriteToFlat(s, chars, 0, len);
@@ -5384,7 +5437,7 @@ MaybeObject* Heap::AllocateJSArray(
MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
ElementsKind elements_kind,
- Handle<Object> allocation_site_info_payload) {
+ Handle<AllocationSite> allocation_site) {
Context* native_context = isolate()->context()->native_context();
JSFunction* array_function = native_context->array_function();
Map* map = array_function->initial_map();
@@ -5396,8 +5449,7 @@ MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
map = Map::cast(maybe_transitioned_map);
}
}
- return AllocateJSObjectFromMapWithAllocationSite(map,
- allocation_site_info_payload);
+ return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
}
@@ -5415,6 +5467,7 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
return result;
}
+
MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
return AllocateExternalArray(0, array_type, NULL, TENURED);
}
@@ -5791,7 +5844,7 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
MaybeObject* Heap::AllocateWithContext(JSFunction* function,
Context* previous,
- JSObject* extension) {
+ JSReceiver* extension) {
Object* result;
{ MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -6751,6 +6804,7 @@ static void InitializeGCOnce() {
MarkCompactCollector::Initialize();
}
+
bool Heap::SetUp() {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
@@ -6861,6 +6915,7 @@ bool Heap::SetUp() {
return true;
}
+
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
@@ -6871,6 +6926,7 @@ bool Heap::CreateHeapObjects() {
native_contexts_list_ = undefined_value();
array_buffers_list_ = undefined_value();
+ allocation_sites_list_ = undefined_value();
return true;
}
@@ -7026,6 +7082,7 @@ class PrintHandleVisitor: public ObjectVisitor {
}
};
+
void Heap::PrintHandles() {
PrintF("Handles:\n");
PrintHandleVisitor v;
@@ -7331,7 +7388,7 @@ void HeapIterator::reset() {
#ifdef DEBUG
-Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
+Object* const PathTracer::kAnyGlobalObject = NULL;
class PathTracer::MarkVisitor: public ObjectVisitor {
public:
@@ -7683,8 +7740,10 @@ GCTracer::~GCTracer() {
PrintF("intracompaction_ptrs=%.1f ",
scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
- PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
- PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
+ PrintF("weakcollection_process=%.1f ",
+ scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
+ PrintF("weakcollection_clear=%.1f ",
+ scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index d254b607b6..6b0236330f 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -187,7 +187,8 @@ namespace internal {
V(Map, external_map, ExternalMap) \
V(Symbol, frozen_symbol, FrozenSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary)
+ EmptySlowElementDictionary) \
+ V(Symbol, observed_symbol, ObservedSymbol)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -232,6 +233,7 @@ namespace internal {
V(last_index_string, "lastIndex") \
V(object_string, "object") \
V(payload_string, "payload") \
+ V(literals_string, "literals") \
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
@@ -261,6 +263,7 @@ namespace internal {
V(map_field_string, "%map") \
V(elements_field_string, "%elements") \
V(length_field_string, "%length") \
+ V(cell_value_string, "%cell_value") \
V(function_class_string, "Function") \
V(properties_field_symbol, "%properties") \
V(payload_field_symbol, "%payload") \
@@ -657,7 +660,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
JSFunction* constructor,
- Handle<Object> allocation_site_info_payload);
+ Handle<AllocationSite> allocation_site);
MUST_USE_RESULT MaybeObject* AllocateJSGeneratorObject(
JSFunction* function);
@@ -676,7 +679,7 @@ class Heap {
inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
ElementsKind elements_kind,
- Handle<Object> allocation_site_payload);
+ Handle<AllocationSite> allocation_site);
// Allocate a JSArray with a specified length but elements that are left
// uninitialized.
@@ -691,7 +694,7 @@ class Heap {
ElementsKind elements_kind,
int length,
int capacity,
- Handle<Object> allocation_site_payload,
+ Handle<AllocationSite> allocation_site,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
@@ -718,7 +721,8 @@ class Heap {
// Returns failure if allocation failed.
MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
- MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(JSObject* source);
+ MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(
+ JSObject* source, AllocationSite* site);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -768,7 +772,7 @@ class Heap {
Map* map, PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<Object> allocation_site_info_payload);
+ Map* map, Handle<AllocationSite> allocation_site);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -777,7 +781,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
- AllocationSpace space, Handle<Object> allocation_site_info_payload);
+ AllocationSpace space, Handle<AllocationSite> allocation_site);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -955,6 +959,9 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
PretenureFlag pretenure);
+ // Allocate a tenured AllocationSite. It's payload is null
+ MUST_USE_RESULT MaybeObject* AllocateAllocationSite();
+
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -1046,7 +1053,7 @@ class Heap {
// Allocate a 'with' context.
MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
Context* previous,
- JSObject* extension);
+ JSReceiver* extension);
// Allocate a block context.
MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
@@ -1369,6 +1376,11 @@ class Heap {
}
Object* array_buffers_list() { return array_buffers_list_; }
+ void set_allocation_sites_list(Object* object) {
+ allocation_sites_list_ = object;
+ }
+ Object* allocation_sites_list() { return allocation_sites_list_; }
+ Object** allocation_sites_list_address() { return &allocation_sites_list_; }
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1508,9 +1520,6 @@ class Heap {
// Write barrier support for address[start : start + len[ = o.
INLINE(void RecordWrites(Address address, int start, int len));
- // Given an address occupied by a live code object, return that object.
- Object* FindCodeObject(Address a);
-
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
@@ -1812,6 +1821,8 @@ class Heap {
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
+ int gc_count() const { return gc_count_; }
+
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
@@ -2036,9 +2047,10 @@ class Heap {
// last GC.
bool old_gen_exhausted_;
+ // Weak list heads, threaded through the objects.
Object* native_contexts_list_;
-
Object* array_buffers_list_;
+ Object* allocation_sites_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
@@ -2156,7 +2168,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
ElementsKind elements_kind,
- Handle<Object> allocation_site_info_payload);
+ Handle<AllocationSite> allocation_site);
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
@@ -2188,6 +2200,7 @@ class Heap {
void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessAllocationSites(WeakObjectRetainer* retainer, bool record_slots);
// Called on heap tear-down.
void TearDownArrayBuffers();
@@ -2745,8 +2758,8 @@ class GCTracer BASE_EMBEDDED {
MC_UPDATE_POINTERS_TO_EVACUATED,
MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
MC_UPDATE_MISC_POINTERS,
- MC_WEAKMAP_PROCESS,
- MC_WEAKMAP_CLEAR,
+ MC_WEAKCOLLECTION_PROCESS,
+ MC_WEAKCOLLECTION_CLEAR,
MC_FLUSH_CODE,
kNumberOfScopes
};
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
new file mode 100644
index 0000000000..e50cd7aaf0
--- /dev/null
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -0,0 +1,390 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-bce.h"
+
+namespace v8 {
+namespace internal {
+
+// We try to "factor up" HBoundsCheck instructions towards the root of the
+// dominator tree.
+// For now we handle checks where the index is like "exp + int32value".
+// If in the dominator tree we check "exp + v1" and later (dominated)
+// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
+// v2 > v1 we can use v2 in the 1st check and again remove the second.
+// To do so we keep a dictionary of all checks where the key if the pair
+// "exp, length".
+// The class BoundsCheckKey represents this key.
+class BoundsCheckKey : public ZoneObject {
+ public:
+ HValue* IndexBase() const { return index_base_; }
+ HValue* Length() const { return length_; }
+
+ uint32_t Hash() {
+ return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
+ }
+
+ static BoundsCheckKey* Create(Zone* zone,
+ HBoundsCheck* check,
+ int32_t* offset) {
+ if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
+
+ HValue* index_base = NULL;
+ HConstant* constant = NULL;
+ bool is_sub = false;
+
+ if (check->index()->IsAdd()) {
+ HAdd* index = HAdd::cast(check->index());
+ if (index->left()->IsConstant()) {
+ constant = HConstant::cast(index->left());
+ index_base = index->right();
+ } else if (index->right()->IsConstant()) {
+ constant = HConstant::cast(index->right());
+ index_base = index->left();
+ }
+ } else if (check->index()->IsSub()) {
+ HSub* index = HSub::cast(check->index());
+ is_sub = true;
+ if (index->left()->IsConstant()) {
+ constant = HConstant::cast(index->left());
+ index_base = index->right();
+ } else if (index->right()->IsConstant()) {
+ constant = HConstant::cast(index->right());
+ index_base = index->left();
+ }
+ }
+
+ if (constant != NULL && constant->HasInteger32Value()) {
+ *offset = is_sub ? - constant->Integer32Value()
+ : constant->Integer32Value();
+ } else {
+ *offset = 0;
+ index_base = check->index();
+ }
+
+ return new(zone) BoundsCheckKey(index_base, check->length());
+ }
+
+ private:
+ BoundsCheckKey(HValue* index_base, HValue* length)
+ : index_base_(index_base),
+ length_(length) { }
+
+ HValue* index_base_;
+ HValue* length_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey);
+};
+
+
+// Data about each HBoundsCheck that can be eliminated or moved.
+// It is the "value" in the dictionary indexed by "base-index, length"
+// (the key is BoundsCheckKey).
+// We scan the code with a dominator tree traversal.
+// Traversing the dominator tree we keep a stack (implemented as a singly
+// linked list) of "data" for each basic block that contains a relevant check
+// with the same key (the dictionary holds the head of the list).
+// We also keep all the "data" created for a given basic block in a list, and
+// use it to "clean up" the dictionary when backtracking in the dominator tree
+// traversal.
+// Doing this each dictionary entry always directly points to the check that
+// is dominating the code being examined now.
+// We also track the current "offset" of the index expression and use it to
+// decide if any check is already "covered" (so it can be removed) or not.
+class BoundsCheckBbData: public ZoneObject {
+ public:
+ BoundsCheckKey* Key() const { return key_; }
+ int32_t LowerOffset() const { return lower_offset_; }
+ int32_t UpperOffset() const { return upper_offset_; }
+ HBasicBlock* BasicBlock() const { return basic_block_; }
+ HBoundsCheck* LowerCheck() const { return lower_check_; }
+ HBoundsCheck* UpperCheck() const { return upper_check_; }
+ BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
+ BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
+
+ bool OffsetIsCovered(int32_t offset) const {
+ return offset >= LowerOffset() && offset <= UpperOffset();
+ }
+
+ bool HasSingleCheck() { return lower_check_ == upper_check_; }
+
+ // The goal of this method is to modify either upper_offset_ or
+ // lower_offset_ so that also new_offset is covered (the covered
+ // range grows).
+ //
+ // The precondition is that new_check follows UpperCheck() and
+ // LowerCheck() in the same basic block, and that new_offset is not
+ // covered (otherwise we could simply remove new_check).
+ //
+ // If HasSingleCheck() is true then new_check is added as "second check"
+ // (either upper or lower; note that HasSingleCheck() becomes false).
+ // Otherwise one of the current checks is modified so that it also covers
+ // new_offset, and new_check is removed.
+ //
+ // If the check cannot be modified because the context is unknown it
+ // returns false, otherwise it returns true.
+ bool CoverCheck(HBoundsCheck* new_check,
+ int32_t new_offset) {
+ ASSERT(new_check->index()->representation().IsSmiOrInteger32());
+ bool keep_new_check = false;
+
+ if (new_offset > upper_offset_) {
+ upper_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ upper_check_ = new_check;
+ } else {
+ bool result = BuildOffsetAdd(upper_check_,
+ &added_upper_index_,
+ &added_upper_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ if (!result) return false;
+ upper_check_->ReplaceAllUsesWith(upper_check_->index());
+ upper_check_->SetOperandAt(0, added_upper_index_);
+ }
+ } else if (new_offset < lower_offset_) {
+ lower_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ lower_check_ = new_check;
+ } else {
+ bool result = BuildOffsetAdd(lower_check_,
+ &added_lower_index_,
+ &added_lower_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ if (!result) return false;
+ lower_check_->ReplaceAllUsesWith(lower_check_->index());
+ lower_check_->SetOperandAt(0, added_lower_index_);
+ }
+ } else {
+ ASSERT(false);
+ }
+
+ if (!keep_new_check) {
+ new_check->DeleteAndReplaceWith(new_check->ActualValue());
+ }
+
+ return true;
+ }
+
+ void RemoveZeroOperations() {
+ RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
+ RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
+ }
+
+ BoundsCheckBbData(BoundsCheckKey* key,
+ int32_t lower_offset,
+ int32_t upper_offset,
+ HBasicBlock* bb,
+ HBoundsCheck* lower_check,
+ HBoundsCheck* upper_check,
+ BoundsCheckBbData* next_in_bb,
+ BoundsCheckBbData* father_in_dt)
+ : key_(key),
+ lower_offset_(lower_offset),
+ upper_offset_(upper_offset),
+ basic_block_(bb),
+ lower_check_(lower_check),
+ upper_check_(upper_check),
+ added_lower_index_(NULL),
+ added_lower_offset_(NULL),
+ added_upper_index_(NULL),
+ added_upper_offset_(NULL),
+ next_in_bb_(next_in_bb),
+ father_in_dt_(father_in_dt) { }
+
+ private:
+ BoundsCheckKey* key_;
+ int32_t lower_offset_;
+ int32_t upper_offset_;
+ HBasicBlock* basic_block_;
+ HBoundsCheck* lower_check_;
+ HBoundsCheck* upper_check_;
+ HInstruction* added_lower_index_;
+ HConstant* added_lower_offset_;
+ HInstruction* added_upper_index_;
+ HConstant* added_upper_offset_;
+ BoundsCheckBbData* next_in_bb_;
+ BoundsCheckBbData* father_in_dt_;
+
+ // Given an existing add instruction and a bounds check it tries to
+ // find the current context (either of the add or of the check index).
+ HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
+ if (add != NULL && add->IsAdd()) {
+ return HAdd::cast(add)->context();
+ }
+ if (check->index()->IsBinaryOperation()) {
+ return HBinaryOperation::cast(check->index())->context();
+ }
+ return NULL;
+ }
+
+ // This function returns false if it cannot build the add because the
+ // current context cannot be determined.
+ bool BuildOffsetAdd(HBoundsCheck* check,
+ HInstruction** add,
+ HConstant** constant,
+ HValue* original_value,
+ Representation representation,
+ int32_t new_offset) {
+ HValue* index_context = IndexContext(*add, check);
+ if (index_context == NULL) return false;
+
+ HConstant* new_constant = new(BasicBlock()->zone()) HConstant(
+ new_offset, representation);
+ if (*add == NULL) {
+ new_constant->InsertBefore(check);
+ (*add) = HAdd::New(
+ BasicBlock()->zone(), index_context, original_value, new_constant);
+ (*add)->AssumeRepresentation(representation);
+ (*add)->InsertBefore(check);
+ } else {
+ new_constant->InsertBefore(*add);
+ (*constant)->DeleteAndReplaceWith(new_constant);
+ }
+ *constant = new_constant;
+ return true;
+ }
+
+ void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
+ if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
+ (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
+ (*constant)->DeleteAndReplaceWith(NULL);
+ }
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
+};
+
+
+static bool BoundsCheckKeyMatch(void* key1, void* key2) {
+ BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
+ BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
+ return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
+}
+
+
+BoundsCheckTable::BoundsCheckTable(Zone* zone)
+ : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)) { }
+
+
+BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
+ Zone* zone) {
+ return reinterpret_cast<BoundsCheckBbData**>(
+ &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
+}
+
+
+void BoundsCheckTable::Insert(BoundsCheckKey* key,
+ BoundsCheckBbData* data,
+ Zone* zone) {
+ Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
+}
+
+
+void BoundsCheckTable::Delete(BoundsCheckKey* key) {
+ Remove(key, key->Hash());
+}
+
+
+// Eliminates checks in bb and recursively in the dominated blocks.
+// Also replace the results of check instructions with the original value, if
+// the result is used. This is safe now, since we don't do code motion after
+// this point. It enables better register allocation since the value produced
+// by check instructions is really a copy of the original value.
+void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
+ HBasicBlock* bb) {
+ BoundsCheckBbData* bb_data_list = NULL;
+
+ for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
+ HInstruction* i = it.Current();
+ if (!i->IsBoundsCheck()) continue;
+
+ HBoundsCheck* check = HBoundsCheck::cast(i);
+ int32_t offset;
+ BoundsCheckKey* key =
+ BoundsCheckKey::Create(zone(), check, &offset);
+ if (key == NULL) continue;
+ BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone());
+ BoundsCheckBbData* data = *data_p;
+ if (data == NULL) {
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ offset,
+ offset,
+ bb,
+ check,
+ check,
+ bb_data_list,
+ NULL);
+ *data_p = bb_data_list;
+ } else if (data->OffsetIsCovered(offset)) {
+ check->DeleteAndReplaceWith(check->ActualValue());
+ } else if (data->BasicBlock() != bb ||
+ !data->CoverCheck(check, offset)) {
+ // If the check is in the current BB we try to modify it by calling
+ // "CoverCheck", but if also that fails we record the current offsets
+ // in a new data instance because from now on they are covered.
+ int32_t new_lower_offset = offset < data->LowerOffset()
+ ? offset
+ : data->LowerOffset();
+ int32_t new_upper_offset = offset > data->UpperOffset()
+ ? offset
+ : data->UpperOffset();
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ new_lower_offset,
+ new_upper_offset,
+ bb,
+ data->LowerCheck(),
+ data->UpperCheck(),
+ bb_data_list,
+ data);
+ table_.Insert(key, bb_data_list, zone());
+ }
+ }
+
+ for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
+ EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i));
+ }
+
+ for (BoundsCheckBbData* data = bb_data_list;
+ data != NULL;
+ data = data->NextInBasicBlock()) {
+ data->RemoveZeroOperations();
+ if (data->FatherInDominatorTree()) {
+ table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
+ } else {
+ table_.Delete(data->Key());
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-bce.h b/deps/v8/src/hydrogen-bce.h
new file mode 100644
index 0000000000..d91997bda0
--- /dev/null
+++ b/deps/v8/src/hydrogen-bce.h
@@ -0,0 +1,72 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_BCE_H_
+#define V8_HYDROGEN_BCE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class BoundsCheckBbData;
+class BoundsCheckKey;
+class BoundsCheckTable : private ZoneHashMap {
+ public:
+ explicit BoundsCheckTable(Zone* zone);
+
+ INLINE(BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone));
+ INLINE(void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone));
+ INLINE(void Delete(BoundsCheckKey* key));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckTable);
+};
+
+
+class HBoundsCheckEliminationPhase : public HPhase {
+ public:
+ explicit HBoundsCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Bounds checks elimination", graph), table_(zone()) { }
+
+ void Run() {
+ EliminateRedundantBoundsChecks(graph()->entry_block());
+ }
+
+ private:
+ void EliminateRedundantBoundsChecks(HBasicBlock* bb);
+
+ BoundsCheckTable table_;
+
+ DISALLOW_COPY_AND_ASSIGN(HBoundsCheckEliminationPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc
new file mode 100644
index 0000000000..40cbe4c065
--- /dev/null
+++ b/deps/v8/src/hydrogen-canonicalize.cc
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-canonicalize.h"
+
+namespace v8 {
+namespace internal {
+
+void HCanonicalizePhase::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ // Before removing no-op instructions, save their semantic value.
+ // We must be careful not to set the flag unnecessarily, because GVN
+ // cannot identify two instructions when their flag value differs.
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->IsArithmeticBinaryOperation() &&
+ instr->representation().IsInteger32() &&
+ instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+ }
+ }
+ }
+ // Perform actual Canonicalization pass.
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ HValue* value = instr->Canonicalize();
+ if (value != instr) instr->DeleteAndReplaceWith(value);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-canonicalize.h b/deps/v8/src/hydrogen-canonicalize.h
new file mode 100644
index 0000000000..d2b289bc21
--- /dev/null
+++ b/deps/v8/src/hydrogen-canonicalize.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_CANONICALIZE_H_
+#define V8_HYDROGEN_CANONICALIZE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HCanonicalizePhase : public HPhase {
+ public:
+ explicit HCanonicalizePhase(HGraph* graph)
+ : HPhase("H_Canonicalize", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_CANONICALIZE_H_
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
new file mode 100644
index 0000000000..4ad32d2e4c
--- /dev/null
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -0,0 +1,125 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-dce.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+bool HDeadCodeEliminationPhase::MarkLive(HValue* ref, HValue* instr) {
+ if (instr->CheckFlag(HValue::kIsLive)) return false;
+ instr->SetFlag(HValue::kIsLive);
+
+ if (FLAG_trace_dead_code_elimination) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ if (ref != NULL) {
+ ref->PrintTo(&stream);
+ } else {
+ stream.Add("root ");
+ }
+ stream.Add(" -> ");
+ instr->PrintTo(&stream);
+ PrintF("[MarkLive %s]\n", *stream.ToCString());
+ }
+
+ return true;
+}
+
+
+void HDeadCodeEliminationPhase::MarkLiveInstructions() {
+ ZoneList<HValue*> worklist(graph()->blocks()->length(), zone());
+
+ // Mark initial root instructions for dead code elimination.
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->CannotBeEliminated() && MarkLive(NULL, instr)) {
+ worklist.Add(instr, zone());
+ }
+ }
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ if (phi->CannotBeEliminated() && MarkLive(NULL, phi)) {
+ worklist.Add(phi, zone());
+ }
+ }
+ }
+
+ // Transitively mark all inputs of live instructions live.
+ while (!worklist.is_empty()) {
+ HValue* instr = worklist.RemoveLast();
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ if (MarkLive(instr, instr->OperandAt(i))) {
+ worklist.Add(instr->OperandAt(i), zone());
+ }
+ }
+ }
+}
+
+
+void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
+ ZoneList<HPhi*> worklist(graph()->blocks()->length(), zone());
+
+ // Remove any instruction not marked kIsLive.
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (!instr->CheckFlag(HValue::kIsLive)) {
+ // Instruction has not been marked live; assume it is dead and remove.
+ // TODO(titzer): we don't remove constants because some special ones
+ // might be used by later phases and are assumed to be in the graph
+ if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
+ } else {
+ // Clear the liveness flag to leave the graph clean for the next DCE.
+ instr->ClearFlag(HValue::kIsLive);
+ }
+ }
+ // Collect phis that are dead and remove them in the next pass.
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ if (!phi->CheckFlag(HValue::kIsLive)) {
+ worklist.Add(phi, zone());
+ } else {
+ phi->ClearFlag(HValue::kIsLive);
+ }
+ }
+ }
+
+ // Process phis separately to avoid simultaneously mutating the phi list.
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ HBasicBlock* block = phi->block();
+ phi->DeleteAndReplaceWith(NULL);
+ block->RecordDeletedPhi(phi->merged_index());
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-dce.h b/deps/v8/src/hydrogen-dce.h
new file mode 100644
index 0000000000..19749f279a
--- /dev/null
+++ b/deps/v8/src/hydrogen-dce.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_DCE_H_
+#define V8_HYDROGEN_DCE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDeadCodeEliminationPhase : public HPhase {
+ public:
+ explicit HDeadCodeEliminationPhase(HGraph* graph)
+ : HPhase("H_Dead code elimination", graph) { }
+
+ void Run() {
+ MarkLiveInstructions();
+ RemoveDeadInstructions();
+ }
+
+ private:
+ bool MarkLive(HValue* ref, HValue* instr);
+ void MarkLiveInstructions();
+ void RemoveDeadInstructions();
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_DCE_H_
diff --git a/deps/v8/src/hydrogen-dehoist.cc b/deps/v8/src/hydrogen-dehoist.cc
new file mode 100644
index 0000000000..696d22c608
--- /dev/null
+++ b/deps/v8/src/hydrogen-dehoist.cc
@@ -0,0 +1,80 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-dehoist.h"
+
+namespace v8 {
+namespace internal {
+
+static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
+ HValue* index = array_operation->GetKey()->ActualValue();
+ if (!index->representation().IsSmiOrInteger32()) return;
+ if (!index->IsAdd() && !index->IsSub()) return;
+
+ HConstant* constant;
+ HValue* subexpression;
+ HBinaryOperation* binary_operation = HBinaryOperation::cast(index);
+ if (binary_operation->left()->IsConstant()) {
+ subexpression = binary_operation->right();
+ constant = HConstant::cast(binary_operation->left());
+ } else if (binary_operation->right()->IsConstant()) {
+ subexpression = binary_operation->left();
+ constant = HConstant::cast(binary_operation->right());
+ } else {
+ return;
+ }
+
+ if (!constant->HasInteger32Value()) return;
+ int32_t sign = binary_operation->IsSub() ? -1 : 1;
+ int32_t value = constant->Integer32Value() * sign;
+ // We limit offset values to 30 bits because we want to avoid the risk of
+ // overflows when the offset is added to the object header size.
+ if (value >= 1 << 30 || value < 0) return;
+ array_operation->SetKey(subexpression);
+ if (binary_operation->HasNoUses()) {
+ binary_operation->DeleteAndReplaceWith(NULL);
+ }
+ array_operation->SetIndexOffset(static_cast<uint32_t>(value));
+ array_operation->SetDehoisted(true);
+}
+
+
+void HDehoistIndexComputationsPhase::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->IsLoadKeyed()) {
+ DehoistArrayIndex(HLoadKeyed::cast(instr));
+ } else if (instr->IsStoreKeyed()) {
+ DehoistArrayIndex(HStoreKeyed::cast(instr));
+ }
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-dehoist.h b/deps/v8/src/hydrogen-dehoist.h
new file mode 100644
index 0000000000..140dc6e0e2
--- /dev/null
+++ b/deps/v8/src/hydrogen-dehoist.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_DEHOIST_H_
+#define V8_HYDROGEN_DEHOIST_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDehoistIndexComputationsPhase : public HPhase {
+ public:
+ explicit HDehoistIndexComputationsPhase(HGraph* graph)
+ : HPhase("H_Dehoist index computations", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_DEHOIST_H_
diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.cc b/deps/v8/src/hydrogen-deoptimizing-mark.cc
new file mode 100644
index 0000000000..804d94753a
--- /dev/null
+++ b/deps/v8/src/hydrogen-deoptimizing-mark.cc
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-deoptimizing-mark.h"
+
+namespace v8 {
+namespace internal {
+
+void HPropagateDeoptimizingMarkPhase::MarkAsDeoptimizing() {
+ HBasicBlock* block = graph()->entry_block();
+ ZoneList<HBasicBlock*> stack(graph()->blocks()->length(), zone());
+ while (block != NULL) {
+ const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
+ if (!dominated_blocks->is_empty()) {
+ if (block->IsDeoptimizing()) {
+ for (int i = 0; i < dominated_blocks->length(); ++i) {
+ dominated_blocks->at(i)->MarkAsDeoptimizing();
+ }
+ }
+ for (int i = 1; i < dominated_blocks->length(); ++i) {
+ stack.Add(dominated_blocks->at(i), zone());
+ }
+ block = dominated_blocks->at(0);
+ } else if (!stack.is_empty()) {
+ // Pop next block from stack.
+ block = stack.RemoveLast();
+ } else {
+ // All blocks processed.
+ block = NULL;
+ }
+ }
+}
+
+
+void HPropagateDeoptimizingMarkPhase::NullifyUnreachableInstructions() {
+ if (!FLAG_unreachable_code_elimination) return;
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ bool nullify = false;
+ const ZoneList<HBasicBlock*>* predecessors = block->predecessors();
+ int predecessors_length = predecessors->length();
+ bool all_predecessors_deoptimizing = (predecessors_length > 0);
+ for (int j = 0; j < predecessors_length; ++j) {
+ if (!predecessors->at(j)->IsDeoptimizing()) {
+ all_predecessors_deoptimizing = false;
+ break;
+ }
+ }
+ if (all_predecessors_deoptimizing) nullify = true;
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ // Leave the basic structure of the graph intact.
+ if (instr->IsBlockEntry()) continue;
+ if (instr->IsControlInstruction()) continue;
+ if (instr->IsSimulate()) continue;
+ if (instr->IsEnterInlined()) continue;
+ if (instr->IsLeaveInlined()) continue;
+ if (nullify) {
+ HInstruction* last_dummy = NULL;
+ for (int j = 0; j < instr->OperandCount(); ++j) {
+ HValue* operand = instr->OperandAt(j);
+ // Insert an HDummyUse for each operand, unless the operand
+ // is an HDummyUse itself. If it's even from the same block,
+ // remember it as a potential replacement for the instruction.
+ if (operand->IsDummyUse()) {
+ if (operand->block() == instr->block() &&
+ last_dummy == NULL) {
+ last_dummy = HInstruction::cast(operand);
+ }
+ continue;
+ }
+ if (operand->IsControlInstruction()) {
+ // Inserting a dummy use for a value that's not defined anywhere
+ // will fail. Some instructions define fake inputs on such
+ // values as control flow dependencies.
+ continue;
+ }
+ HDummyUse* dummy = new(graph()->zone()) HDummyUse(operand);
+ dummy->InsertBefore(instr);
+ last_dummy = dummy;
+ }
+ if (last_dummy == NULL) last_dummy = graph()->GetConstant1();
+ instr->DeleteAndReplaceWith(last_dummy);
+ continue;
+ }
+ if (instr->IsSoftDeoptimize()) {
+ ASSERT(block->IsDeoptimizing());
+ nullify = true;
+ }
+ }
+ }
+}
+
+
+void HPropagateDeoptimizingMarkPhase::Run() {
+ // Skip this phase if there is nothing to be done anyway.
+ if (!graph()->has_soft_deoptimize()) return;
+ MarkAsDeoptimizing();
+ NullifyUnreachableInstructions();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.h b/deps/v8/src/hydrogen-deoptimizing-mark.h
new file mode 100644
index 0000000000..7d6e6e4bda
--- /dev/null
+++ b/deps/v8/src/hydrogen-deoptimizing-mark.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_DEOPTIMIZING_MARK_H_
+#define V8_HYDROGEN_DEOPTIMIZING_MARK_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Mark all blocks that are dominated by an unconditional soft deoptimize to
+// prevent code motion across those blocks.
+class HPropagateDeoptimizingMarkPhase : public HPhase {
+ public:
+ explicit HPropagateDeoptimizingMarkPhase(HGraph* graph)
+ : HPhase("H_Propagate deoptimizing mark", graph) { }
+
+ void Run();
+
+ private:
+ void MarkAsDeoptimizing();
+ void NullifyUnreachableInstructions();
+
+ DISALLOW_COPY_AND_ASSIGN(HPropagateDeoptimizingMarkPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_DEOPTIMIZING_MARK_H_
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
index e852fb8d6d..961bb94e9c 100644
--- a/deps/v8/src/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -55,7 +55,7 @@ void HEscapeAnalysisPhase::CollectCapturedValues() {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->IsAllocate() || instr->IsAllocateObject()) {
+ if (instr->IsAllocate()) {
CollectIfNoEscapingUses(instr);
}
}
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index 7ea2f162bf..09bea5bb18 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -123,6 +123,7 @@ void TraceGVN(const char* msg, ...) {
va_end(arguments);
}
+
// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
// --trace-gvn is off.
#define TRACE_GVN_1(msg, a1) \
@@ -339,6 +340,7 @@ HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
return *this;
}
+
void HSideEffectMap::Kill(GVNFlagSet flags) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
@@ -366,7 +368,7 @@ HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
removed_side_effects_(false),
block_side_effects_(graph->blocks()->length(), zone()),
loop_side_effects_(graph->blocks()->length(), zone()),
- visited_on_paths_(zone(), graph->blocks()->length()) {
+ visited_on_paths_(graph->blocks()->length(), zone()) {
ASSERT(!AllowHandleAllocation::IsAllowed());
block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
zone());
@@ -619,7 +621,8 @@ HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
block->block_id() < dominated->block_id() &&
- visited_on_paths_.Add(block->block_id())) {
+ !visited_on_paths_.Contains(block->block_id())) {
+ visited_on_paths_.Add(block->block_id());
side_effects.Add(block_side_effects_[block->block_id()]);
if (block->IsLoopHeader()) {
side_effects.Add(loop_side_effects_[block->block_id()]);
@@ -709,22 +712,18 @@ class GvnBasicBlockState: public ZoneObject {
zone);
return this;
} else if (dominated_index_ < length_) {
- return push(zone,
- block_->dominated_blocks()->at(dominated_index_),
- dominators());
+ return push(zone, block_->dominated_blocks()->at(dominated_index_));
} else {
return NULL;
}
}
- GvnBasicBlockState* push(Zone* zone,
- HBasicBlock* block,
- HSideEffectMap* dominators) {
+ GvnBasicBlockState* push(Zone* zone, HBasicBlock* block) {
if (next_ == NULL) {
next_ =
- new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
+ new(zone) GvnBasicBlockState(this, block, map(), dominators(), zone);
} else {
- next_->Initialize(block, map(), dominators, true, zone);
+ next_->Initialize(block, map(), dominators(), true, zone);
}
return next_;
}
@@ -748,6 +747,7 @@ class GvnBasicBlockState: public ZoneObject {
int length_;
};
+
// This is a recursive traversal of the dominator tree but it has been turned
// into a loop to avoid stack overflows.
// The logical "stack frames" of the recursion are kept in a list of
@@ -770,12 +770,32 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
// If this is a loop header kill everything killed by the loop.
if (block->IsLoopHeader()) {
map->Kill(loop_side_effects_[block->block_id()]);
+ dominators->Kill(loop_side_effects_[block->block_id()]);
}
// Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ HValue* other = dominators->at(i);
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+ (other != NULL)) {
+ TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+ i,
+ instr->id(),
+ instr->Mnemonic(),
+ other->id(),
+ other->Mnemonic());
+ instr->HandleSideEffectDominator(changes_flag, other);
+ }
+ }
+ }
+ // Instruction was unlinked during graph traversal.
+ if (!instr->IsLinked()) continue;
+
GVNFlagSet flags = instr->ChangesFlags();
if (!flags.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
@@ -801,25 +821,6 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
map->Add(instr, zone());
}
}
- if (instr->IsLinked() &&
- instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
- TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
- i,
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- instr->SetSideEffectDominator(changes_flag, other);
- }
- }
- }
- instr = next;
}
HBasicBlock* dominator_block;
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index 66224e4338..64a0fec76b 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -36,46 +36,6 @@
namespace v8 {
namespace internal {
-// Simple sparse set with O(1) add, contains, and clear.
-class SparseSet {
- public:
- SparseSet(Zone* zone, int capacity)
- : capacity_(capacity),
- length_(0),
- dense_(zone->NewArray<int>(capacity)),
- sparse_(zone->NewArray<int>(capacity)) {
-#ifndef NVALGRIND
- // Initialize the sparse array to make valgrind happy.
- memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
-#endif
- }
-
- bool Contains(int n) const {
- ASSERT(0 <= n && n < capacity_);
- int d = sparse_[n];
- return 0 <= d && d < length_ && dense_[d] == n;
- }
-
- bool Add(int n) {
- if (Contains(n)) return false;
- dense_[length_] = n;
- sparse_[n] = length_;
- ++length_;
- return true;
- }
-
- void Clear() { length_ = 0; }
-
- private:
- int capacity_;
- int length_;
- int* dense_;
- int* sparse_;
-
- DISALLOW_COPY_AND_ASSIGN(SparseSet);
-};
-
-
// Perform common subexpression elimination and loop-invariant code motion.
class HGlobalValueNumberingPhase : public HPhase {
public:
@@ -118,7 +78,7 @@ class HGlobalValueNumberingPhase : public HPhase {
// Used when collecting side effects on paths from dominator to
// dominated.
- SparseSet visited_on_paths_;
+ BitVector visited_on_paths_;
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
diff --git a/deps/v8/src/hydrogen-infer-types.cc b/deps/v8/src/hydrogen-infer-types.cc
new file mode 100644
index 0000000000..01c6084736
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-types.cc
@@ -0,0 +1,77 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-infer-types.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) {
+ for (int i = from_inclusive; i <= to_inclusive; ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ phis->at(j)->UpdateInferredType();
+ }
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ it.Current()->UpdateInferredType();
+ }
+
+ if (block->IsLoopHeader()) {
+ HBasicBlock* last_back_edge =
+ block->loop_information()->GetLastBackEdge();
+ InferTypes(i + 1, last_back_edge->block_id());
+ // Skip all blocks already processed by the recursive call.
+ i = last_back_edge->block_id();
+ // Update phis of the loop header now after the whole loop body is
+ // guaranteed to be processed.
+ for (int j = 0; j < block->phis()->length(); ++j) {
+ HPhi* phi = block->phis()->at(j);
+ worklist_.Add(phi, zone());
+ in_worklist_.Add(phi->id());
+ }
+ while (!worklist_.is_empty()) {
+ HValue* current = worklist_.RemoveLast();
+ in_worklist_.Remove(current->id());
+ if (current->UpdateInferredType()) {
+ for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (!in_worklist_.Contains(use->id())) {
+ in_worklist_.Add(use->id());
+ worklist_.Add(use, zone());
+ }
+ }
+ }
+ }
+ ASSERT(in_worklist_.IsEmpty());
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-infer-types.h b/deps/v8/src/hydrogen-infer-types.h
new file mode 100644
index 0000000000..cfcbf3549b
--- /dev/null
+++ b/deps/v8/src/hydrogen-infer-types.h
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_INFER_TYPES_H_
+#define V8_HYDROGEN_INFER_TYPES_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HInferTypesPhase : public HPhase {
+ public:
+ explicit HInferTypesPhase(HGraph* graph)
+ : HPhase("H_Inferring types", graph), worklist_(8, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()) { }
+
+ void Run() {
+ InferTypes(0, graph()->blocks()->length() - 1);
+ }
+
+ private:
+ void InferTypes(int from_inclusive, int to_inclusive);
+
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInferTypesPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_INFER_TYPES_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 932fd47af1..880de29aca 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -1645,7 +1645,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
*tag = kStringTag;
return;
case IS_INTERNALIZED_STRING:
- *mask = kIsInternalizedMask;
+ *mask = kIsNotInternalizedMask;
*tag = kInternalizedTag;
return;
default:
@@ -1654,8 +1654,8 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
-void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
+void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
ASSERT(side_effect == kChangesMaps);
// TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
// type information is rich enough we should generalize this to any HType
@@ -1690,6 +1690,14 @@ void HCheckFunction::PrintDataTo(StringStream* stream) {
}
+HValue* HCheckFunction::Canonicalize() {
+ return (value()->IsConstant() &&
+ HConstant::cast(value())->UniqueValueIdsMatch(target_unique_id_))
+ ? NULL
+ : this;
+}
+
+
const char* HCheckInstanceType::GetCheckName() {
switch (check_) {
case IS_SPEC_OBJECT: return "object";
@@ -1701,6 +1709,7 @@ const char* HCheckInstanceType::GetCheckName() {
return "";
}
+
void HCheckInstanceType::PrintDataTo(StringStream* stream) {
stream->Add("%s ", GetCheckName());
HUnaryOperation::PrintDataTo(stream);
@@ -1736,9 +1745,10 @@ Range* HValue::InferRange(Zone* zone) {
result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
result->set_can_be_minus_zero(false);
} else {
- // Untagged integer32 cannot be -0, all other representations can.
result = new(zone) Range();
- result->set_can_be_minus_zero(!representation().IsInteger32());
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32));
+ // TODO(jkummerow): The range cannot be minus zero when the upper type
+ // bound is Integer32.
}
return result;
}
@@ -1756,7 +1766,8 @@ Range* HChange::InferRange(Zone* zone) {
Range* result = (input_range != NULL)
? input_range->Copy(zone)
: HValue::InferRange(zone);
- if (to().IsInteger32()) result->set_can_be_minus_zero(false);
+ result->set_can_be_minus_zero(!to().IsSmiOrInteger32() ||
+ !CheckFlag(kAllUsesTruncatingToInt32));
return result;
}
@@ -1801,9 +1812,8 @@ Range* HAdd::InferRange(Zone* zone) {
CheckFlag(kAllUsesTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
- if (!CheckFlag(kAllUsesTruncatingToInt32)) {
- res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeMinusZero());
- }
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ a->CanBeMinusZero() && b->CanBeMinusZero());
return res;
} else {
return HValue::InferRange(zone);
@@ -1820,9 +1830,8 @@ Range* HSub::InferRange(Zone* zone) {
CheckFlag(kAllUsesTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
- if (!CheckFlag(kAllUsesTruncatingToInt32)) {
- res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
- }
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ a->CanBeMinusZero() && b->CanBeZero());
return res;
} else {
return HValue::InferRange(zone);
@@ -1841,11 +1850,9 @@ Range* HMul::InferRange(Zone* zone) {
// precise and therefore not the same as converting to Double and back.
ClearFlag(kCanOverflow);
}
- if (!CheckFlag(kAllUsesTruncatingToInt32)) {
- bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
- (a->CanBeNegative() && b->CanBeZero());
- res->set_can_be_minus_zero(m0);
- }
+ res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ ((a->CanBeZero() && b->CanBeNegative()) ||
+ (a->CanBeNegative() && b->CanBeZero())));
return res;
} else {
return HValue::InferRange(zone);
@@ -1858,16 +1865,9 @@ Range* HDiv::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* result = new(zone) Range();
- if (!CheckFlag(kAllUsesTruncatingToInt32)) {
- if (a->CanBeMinusZero()) {
- result->set_can_be_minus_zero(true);
- }
-
- if (a->CanBeZero() && b->CanBeNegative()) {
- result->set_can_be_minus_zero(true);
- }
- }
-
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ (a->CanBeMinusZero() ||
+ (a->CanBeZero() && b->CanBeNegative())));
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}
@@ -1897,9 +1897,8 @@ Range* HMod::InferRange(Zone* zone) {
Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
a->CanBePositive() ? positive_bound : 0);
- if (left_can_be_negative && !CheckFlag(kAllUsesTruncatingToInt32)) {
- result->set_can_be_minus_zero(true);
- }
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ left_can_be_negative);
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
@@ -2174,6 +2173,7 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
has_double_value_(false),
is_internalized_string_(false),
is_not_in_new_space_(true),
+ is_cell_(false),
boolean_value_(handle->BooleanValue()) {
if (handle_->IsHeapObject()) {
Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
@@ -2190,6 +2190,9 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
type_from_value_ = HType::TypeFromValue(handle_);
is_internalized_string_ = handle_->IsInternalizedString();
}
+
+ is_cell_ = !handle_.is_null() &&
+ (handle_->IsCell() || handle_->IsPropertyCell());
Initialize(r);
}
@@ -2200,6 +2203,7 @@ HConstant::HConstant(Handle<Object> handle,
HType type,
bool is_internalize_string,
bool is_not_in_new_space,
+ bool is_cell,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
@@ -2208,6 +2212,7 @@ HConstant::HConstant(Handle<Object> handle,
has_double_value_(false),
is_internalized_string_(is_internalize_string),
is_not_in_new_space_(is_not_in_new_space),
+ is_cell_(is_cell),
boolean_value_(boolean_value),
type_from_value_(type) {
ASSERT(!handle.is_null());
@@ -2227,6 +2232,7 @@ HConstant::HConstant(int32_t integer_value,
has_double_value_(true),
is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
+ is_cell_(false),
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
@@ -2245,6 +2251,7 @@ HConstant::HConstant(double double_value,
has_double_value_(true),
is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
+ is_cell_(false),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
@@ -2267,9 +2274,17 @@ void HConstant::Initialize(Representation r) {
}
set_representation(r);
SetFlag(kUseGVN);
- if (representation().IsInteger32()) {
- ClearGVNFlag(kDependsOnOsrEntries);
+}
+
+
+bool HConstant::EmitAtUses() {
+ ASSERT(IsLinked());
+ if (block()->graph()->has_osr()) {
+ return block()->graph()->IsStandardConstant(this);
}
+ if (IsCell()) return false;
+ if (representation().IsDouble()) return false;
+ return true;
}
@@ -2290,6 +2305,7 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
type_from_value_,
is_internalized_string_,
is_not_in_new_space_,
+ is_cell_,
boolean_value_);
}
@@ -2432,7 +2448,9 @@ Range* HBitwise::InferRange(Zone* zone) {
? static_cast<int32_t>(-limit) : 0;
return new(zone) Range(min, static_cast<int32_t>(limit - 1));
}
- return HValue::InferRange(zone);
+ Range* result = HValue::InferRange(zone);
+ result->set_can_be_minus_zero(false);
+ return result;
}
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
int32_t left_mask = (left()->range() != NULL)
@@ -2444,9 +2462,11 @@ Range* HBitwise::InferRange(Zone* zone) {
int32_t result_mask = (op() == Token::BIT_AND)
? left_mask & right_mask
: left_mask | right_mask;
- return (result_mask >= 0)
- ? new(zone) Range(0, result_mask)
- : HValue::InferRange(zone);
+ if (result_mask >= 0) return new(zone) Range(0, result_mask);
+
+ Range* result = HValue::InferRange(zone);
+ result->set_can_be_minus_zero(false);
+ return result;
}
@@ -2458,7 +2478,6 @@ Range* HSar::InferRange(Zone* zone) {
? left()->range()->Copy(zone)
: new(zone) Range();
result->Sar(c->Integer32Value());
- result->set_can_be_minus_zero(false);
return result;
}
}
@@ -2483,7 +2502,6 @@ Range* HShr::InferRange(Zone* zone) {
? left()->range()->Copy(zone)
: new(zone) Range();
result->Sar(c->Integer32Value());
- result->set_can_be_minus_zero(false);
return result;
}
}
@@ -2500,7 +2518,6 @@ Range* HShl::InferRange(Zone* zone) {
? left()->range()->Copy(zone)
: new(zone) Range();
result->Shl(c->Integer32Value());
- result->set_can_be_minus_zero(false);
return result;
}
}
@@ -2540,7 +2557,7 @@ void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
}
-void HCompareIDAndBranch::AddInformativeDefinitions() {
+void HCompareNumericAndBranch::AddInformativeDefinitions() {
NumericRelation r = NumericRelation::FromToken(token());
if (r.IsNone()) return;
@@ -2550,7 +2567,7 @@ void HCompareIDAndBranch::AddInformativeDefinitions() {
}
-void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
+void HCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
left()->PrintNameTo(stream);
@@ -2573,7 +2590,7 @@ void HGoto::PrintDataTo(StringStream* stream) {
}
-void HCompareIDAndBranch::InferRepresentation(
+void HCompareNumericAndBranch::InferRepresentation(
HInferRepresentationPhase* h_infer) {
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
@@ -2595,9 +2612,9 @@ void HCompareIDAndBranch::InferRepresentation(
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
// calling ToPrimitive() on their arguments. The standard Crankshaft
- // tagged-to-double conversion to ensure the HCompareIDAndBranch's inputs
- // are doubles caused 'undefined' to be converted to NaN. That's compatible
- // out-of-the box with ordered relational comparisons (<, >, <=,
+ // tagged-to-double conversion to ensure the HCompareNumericAndBranch's
+ // inputs are doubles caused 'undefined' to be converted to NaN. That's
+ // compatible out-of-the box with ordered relational comparisons (<, >, <=,
// >=). However, for equality comparisons (and for 'in' and 'instanceof'),
// it is not consistent with the spec. For example, it would cause undefined
// == undefined (should be true) to be evaluated as NaN == NaN
@@ -3042,6 +3059,12 @@ void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
}
+void HLinkObjectInList::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+ stream->Add(" offset %d", store_field_.offset());
+}
+
+
void HLoadContextSlot::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
@@ -3078,6 +3101,11 @@ HType HCheckHeapObject::CalculateInferredType() {
}
+HType HCheckSmi::CalculateInferredType() {
+ return HType::Smi();
+}
+
+
HType HPhi::CalculateInferredType() {
HType result = HType::Uninitialized();
for (int i = 0; i < OperandCount(); ++i) {
@@ -3108,11 +3136,6 @@ HType HInstanceOf::CalculateInferredType() {
}
-HType HDeleteProperty::CalculateInferredType() {
- return HType::Boolean();
-}
-
-
HType HInstanceOfKnownGlobal::CalculateInferredType() {
return HType::Boolean();
}
@@ -3164,13 +3187,98 @@ HType HStringCharFromCode::CalculateInferredType() {
}
-HType HAllocateObject::CalculateInferredType() {
- return HType::JSObject();
+HType HAllocate::CalculateInferredType() {
+ return type_;
}
-HType HAllocate::CalculateInferredType() {
- return type_;
+void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
+ ASSERT(side_effect == kChangesNewSpacePromotion);
+ if (!FLAG_use_allocation_folding) return;
+
+ // Try to fold allocations together with their dominating allocations.
+ if (!dominator->IsAllocate()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s)\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return;
+ }
+
+ HAllocate* dominator_allocate_instr = HAllocate::cast(dominator);
+ HValue* dominator_size = dominator_allocate_instr->size();
+ HValue* current_size = size();
+ // We can just fold allocations that are guaranteed in new space.
+ // TODO(hpayer): Support double aligned allocations.
+ // TODO(hpayer): Add support for non-constant allocation in dominator.
+ if (!GuaranteedInNewSpace() || MustAllocateDoubleAligned() ||
+ !current_size->IsInteger32Constant() ||
+ !dominator_allocate_instr->GuaranteedInNewSpace() ||
+ dominator_allocate_instr->MustAllocateDoubleAligned() ||
+ !dominator_size->IsInteger32Constant()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s)\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return;
+ }
+
+ // First update the size of the dominator allocate instruction.
+ int32_t dominator_size_constant =
+ HConstant::cast(dominator_size)->GetInteger32Constant();
+ int32_t current_size_constant =
+ HConstant::cast(current_size)->GetInteger32Constant();
+ HBasicBlock* block = dominator->block();
+ Zone* zone = block->zone();
+ HInstruction* new_dominator_size = new(zone) HConstant(
+ dominator_size_constant + current_size_constant);
+ new_dominator_size->InsertBefore(dominator_allocate_instr);
+ dominator_allocate_instr->UpdateSize(new_dominator_size);
+
+#ifdef VERIFY_HEAP
+ HInstruction* free_space_instr =
+ new(zone) HInnerAllocatedObject(dominator_allocate_instr,
+ dominator_size_constant,
+ type());
+ free_space_instr->InsertAfter(dominator_allocate_instr);
+ HConstant* filler_map = new(zone) HConstant(
+ isolate()->factory()->free_space_map(),
+ UniqueValueId(isolate()->heap()->free_space_map()),
+ Representation::Tagged(),
+ HType::Tagged(),
+ false,
+ true,
+ false,
+ false);
+ filler_map->InsertAfter(free_space_instr);
+
+ HInstruction* store_map = new(zone) HStoreNamedField(
+ free_space_instr, HObjectAccess::ForMap(), filler_map);
+ store_map->SetFlag(HValue::kHasNoObservableSideEffects);
+ store_map->InsertAfter(filler_map);
+
+ HInstruction* free_space_size = new(zone) HConstant(current_size_constant);
+ free_space_size->InsertAfter(store_map);
+ HObjectAccess access =
+ HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
+ HInstruction* store_size = new(zone) HStoreNamedField(
+ free_space_instr, access, free_space_size);
+ store_size->SetFlag(HValue::kHasNoObservableSideEffects);
+ store_size->InsertAfter(free_space_size);
+#endif
+
+ // After that replace the dominated allocate instruction.
+ HInstruction* dominated_allocate_instr =
+ new(zone) HInnerAllocatedObject(dominator_allocate_instr,
+ dominator_size_constant,
+ type());
+ dominated_allocate_instr->InsertBefore(this);
+ DeleteAndReplaceWith(dominated_allocate_instr);
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) folded into #%d (%s)\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
}
@@ -3343,8 +3451,11 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-HInstruction* HStringAdd::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
+HInstruction* HStringAdd::New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ StringAddFlags flags) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
@@ -3354,7 +3465,7 @@ HInstruction* HStringAdd::New(
return new(zone) HConstant(concat, Representation::Tagged());
}
}
- return new(zone) HStringAdd(context, left, right);
+ return new(zone) HStringAdd(context, left, right, flags);
}
@@ -3629,13 +3740,6 @@ HInstruction* HShr::New(
#undef H_CONSTANT_DOUBLE
-void HIn::PrintDataTo(StringStream* stream) {
- key()->PrintNameTo(stream);
- stream->Add(" ");
- object()->PrintNameTo(stream);
-}
-
-
void HBitwise::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(op_));
stream->Add(" ");
@@ -3824,6 +3928,13 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map,
}
+HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
+ return HObjectAccess(
+ kInobject, Cell::kValueOffset,
+ Handle<String>(isolate->heap()->cell_value_string()));
+}
+
+
void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
// set the appropriate GVN flags for a given load or store instruction
if (is_store) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 26bda87caa..5fba5f2c63 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -66,7 +66,6 @@ class LChunkBuilder;
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -92,10 +91,11 @@ class LChunkBuilder;
V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckSmi) \
V(CheckPrototypeMaps) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CompareGeneric) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
@@ -104,7 +104,6 @@ class LChunkBuilder;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
V(DummyUse) \
@@ -120,7 +119,6 @@ class LChunkBuilder;
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InductionVariableAnnotation) \
- V(In) \
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
@@ -128,10 +126,12 @@ class LChunkBuilder;
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
+ V(IsNumberAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(LeaveInlined) \
+ V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@@ -790,7 +790,7 @@ class HValue: public ZoneObject {
// occurrences of the instruction are indeed the same.
kUseGVN,
// Track instructions that are dominating side effects. If an instruction
- // sets this flag, it must implement SetSideEffectDominator() and should
+ // sets this flag, it must implement HandleSideEffectDominator() and should
// indicate which side effects to track by setting GVN flags.
kTrackSideEffectDominators,
kCanOverflow,
@@ -1109,7 +1109,8 @@ class HValue: public ZoneObject {
// This function must be overridden for instructions which have the
// kTrackSideEffectDominators flag set, to track instructions that are
// dominating side effects.
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
UNREACHABLE();
}
@@ -1604,21 +1605,11 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
class HBranch: public HUnaryControlInstruction {
public:
HBranch(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::Types())
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
expected_input_types_(expected_input_types) {
- ASSERT(true_target != NULL && false_target != NULL);
- SetFlag(kAllowUndefinedAsNaN);
- }
- explicit HBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
- SetFlag(kAllowUndefinedAsNaN);
- }
- HBranch(HValue* value, ToBooleanStub::Types expected_input_types)
- : HUnaryControlInstruction(value, NULL, NULL),
- expected_input_types_(expected_input_types) {
SetFlag(kAllowUndefinedAsNaN);
}
@@ -1642,12 +1633,10 @@ class HCompareMap: public HUnaryControlInstruction {
public:
HCompareMap(HValue* value,
Handle<Map> map,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(true_target != NULL);
- ASSERT(false_target != NULL);
+ map_(map) {
ASSERT(!map.is_null());
}
@@ -2786,7 +2775,8 @@ class HCheckMaps: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator);
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator);
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
@@ -2848,6 +2838,8 @@ class HCheckFunction: public HUnaryOperation {
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
+ virtual HValue* Canonicalize();
+
#ifdef DEBUG
virtual void Verify();
#endif
@@ -2934,6 +2926,49 @@ class HCheckInstanceType: public HUnaryOperation {
};
+class HCheckSmi: public HUnaryOperation {
+ public:
+ explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Smi());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType();
+
+ virtual HValue* Canonicalize() {
+ HType value_type = value()->type();
+ if (value_type.IsSmi()) {
+ return NULL;
+ }
+ return this;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HIsNumberAndBranch: public HUnaryControlInstruction {
+ public:
+ explicit HIsNumberAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) {
+ SetFlag(kFlexibleRepresentation);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch)
+};
+
+
class HCheckHeapObject: public HUnaryOperation {
public:
explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
@@ -3246,6 +3281,7 @@ class HConstant: public HTemplateInstruction<0> {
HType type,
bool is_internalized_string,
bool is_not_in_new_space,
+ bool is_cell,
bool boolean_value);
Handle<Object> handle() {
@@ -3294,6 +3330,10 @@ class HConstant: public HTemplateInstruction<0> {
unique_id_ == UniqueValueId(heap->empty_string());
}
+ bool IsCell() const {
+ return is_cell_;
+ }
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -3305,7 +3345,7 @@ class HConstant: public HTemplateInstruction<0> {
return Representation::Tagged();
}
- virtual bool EmitAtUses() { return !representation().IsDouble(); }
+ virtual bool EmitAtUses();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() { return handle()->IsSmi(); }
@@ -3373,6 +3413,10 @@ class HConstant: public HTemplateInstruction<0> {
}
}
+ bool UniqueValueIdsMatch(UniqueValueId other) {
+ return !has_double_value_ && unique_id_ == other;
+ }
+
#ifdef DEBUG
virtual void Verify() { }
#endif
@@ -3420,6 +3464,7 @@ class HConstant: public HTemplateInstruction<0> {
bool has_double_value_ : 1;
bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
bool is_not_in_new_space_ : 1;
+ bool is_cell_ : 1;
bool boolean_value_ : 1;
int32_t int32_value_;
double double_value_;
@@ -3909,9 +3954,9 @@ class HCompareGeneric: public HBinaryOperation {
};
-class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
+class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
public:
- HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
+ HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
SetFlag(kFlexibleRepresentation);
ASSERT(Token::IsCompareOp(token));
@@ -3941,7 +3986,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
virtual void AddInformativeDefinitions();
- DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
Representation observed_input_representation_[2];
@@ -4718,6 +4763,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
public:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
SetGVNFlag(kChangesOsrEntries);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
BailoutId ast_id() const { return ast_id_; }
@@ -4912,48 +4958,6 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
-class HAllocateObject: public HTemplateInstruction<1> {
- public:
- HAllocateObject(HValue* context, Handle<JSFunction> constructor)
- : constructor_(constructor) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- constructor_initial_map_ = constructor->has_initial_map()
- ? Handle<Map>(constructor->initial_map())
- : Handle<Map>::null();
- // If slack tracking finished, the instance size and property counts
- // remain unchanged so that we can allocate memory for the object.
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- }
-
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxSize = 64 * kPointerSize;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSFunction> constructor() { return constructor_; }
- Handle<Map> constructor_initial_map() { return constructor_initial_map_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
- ASSERT(!constructor_initial_map_.is_null());
- return constructor_initial_map_;
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
-
- private:
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-
- Handle<JSFunction> constructor_;
- Handle<Map> constructor_initial_map_;
-};
-
-
class HAllocate: public HTemplateInstruction<2> {
public:
enum Flags {
@@ -4969,9 +4973,14 @@ class HAllocate: public HTemplateInstruction<2> {
SetOperandAt(0, context);
SetOperandAt(1, size);
set_representation(Representation::Tagged());
+ SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kChangesNewSpacePromotion);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
}
+ // Maximum instance size for which allocations will be inlined.
+ static const int kMaxInlineSize = 64 * kPointerSize;
+
static Flags DefaultFlags() {
return CAN_ALLOCATE_IN_NEW_SPACE;
}
@@ -4987,6 +4996,7 @@ class HAllocate: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); }
+ HType type() { return type_; }
virtual Representation RequiredInputRepresentation(int index) {
if (index == 0) {
@@ -4996,6 +5006,14 @@ class HAllocate: public HTemplateInstruction<2> {
}
}
+ virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ return known_initial_map_;
+ }
+
+ void set_known_initial_map(Handle<Map> known_initial_map) {
+ known_initial_map_ = known_initial_map;
+ }
+
virtual HType CalculateInferredType();
bool CanAllocateInNewSpace() const {
@@ -5023,6 +5041,13 @@ class HAllocate: public HTemplateInstruction<2> {
return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
}
+ void UpdateSize(HValue* size) {
+ SetOperandAt(1, size);
+ }
+
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator);
+
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(Allocate)
@@ -5030,13 +5055,15 @@ class HAllocate: public HTemplateInstruction<2> {
private:
HType type_;
Flags flags_;
+ Handle<Map> known_initial_map_;
};
class HInnerAllocatedObject: public HTemplateInstruction<1> {
public:
- HInnerAllocatedObject(HValue* value, int offset)
- : offset_(offset) {
+ HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
+ : offset_(offset),
+ type_(type) {
ASSERT(value->IsAllocate());
SetOperandAt(0, value);
set_representation(Representation::Tagged());
@@ -5049,12 +5076,15 @@ class HInnerAllocatedObject: public HTemplateInstruction<1> {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType() { return type_; }
+
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
private:
int offset_;
+ HType type_;
};
@@ -5072,8 +5102,10 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HInnerAllocatedObject::cast(object)->base_object(),
new_space_dominator);
}
+ if (object->IsConstant() && HConstant::cast(object)->IsCell()) {
+ return false;
+ }
if (object != new_space_dominator) return true;
- if (object->IsAllocateObject()) return false;
if (object->IsAllocate()) {
return !HAllocate::cast(object)->GuaranteedInNewSpace();
}
@@ -5297,6 +5329,14 @@ class HObjectAccess {
return HObjectAccess(kArrayLengths, JSArray::kLengthOffset);
}
+ static HObjectAccess ForAllocationSiteTransitionInfo() {
+ return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
+ }
+
+ static HObjectAccess ForAllocationSiteWeakNext() {
+ return HObjectAccess(kInobject, AllocationSite::kWeakNextOffset);
+ }
+
static HObjectAccess ForFixedArrayLength() {
return HObjectAccess(kArrayLengths, FixedArray::kLengthOffset);
}
@@ -5313,8 +5353,16 @@ class HObjectAccess {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
- static HObjectAccess ForAllocationSitePayload() {
- return HObjectAccess(kInobject, AllocationSiteInfo::kPayloadOffset);
+ static HObjectAccess ForPropertyCellValue() {
+ return HObjectAccess(kInobject, PropertyCell::kValueOffset);
+ }
+
+ static HObjectAccess ForCellValue() {
+ return HObjectAccess(kInobject, Cell::kValueOffset);
+ }
+
+ static HObjectAccess ForAllocationMementoSite() {
+ return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
}
// Create an access to an offset in a fixed array header.
@@ -5333,6 +5381,9 @@ class HObjectAccess {
static HObjectAccess ForField(Handle<Map> map,
LookupResult *lookup, Handle<String> name = Handle<String>::null());
+ // Create an access for the payload of a Cell or JSGlobalPropertyCell.
+ static HObjectAccess ForCellPayload(Isolate* isolate);
+
void PrintTo(StringStream* stream);
inline bool Equals(HObjectAccess that) const {
@@ -5376,6 +5427,38 @@ class HObjectAccess {
};
+class HLinkObjectInList: public HUnaryOperation {
+ public:
+ // There needs to be a mapping from every KnownList to an external reference
+ enum KnownList {
+ ALLOCATION_SITE_LIST
+ };
+
+ HLinkObjectInList(HValue* object, HObjectAccess store_field,
+ KnownList known_list)
+ : HUnaryOperation(object),
+ store_field_(store_field),
+ known_list_(known_list) {
+ set_representation(Representation::Tagged());
+ }
+
+ HObjectAccess store_field() const { return store_field_; }
+ KnownList known_list() const { return known_list_; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList)
+
+ private:
+ HObjectAccess store_field_;
+ KnownList known_list_;
+};
+
+
class HLoadNamedField: public HTemplateInstruction<2> {
public:
HLoadNamedField(HValue* object,
@@ -5739,7 +5822,8 @@ class HStoreNamedField: public HTemplateInstruction<2> {
field_representation_(field_representation),
transition_(),
transition_unique_id_(),
- new_space_dominator_(NULL) {
+ new_space_dominator_(NULL),
+ write_barrier_mode_(UPDATE_WRITE_BARRIER) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
access.SetGVNFlags(this, true);
@@ -5758,12 +5842,18 @@ class HStoreNamedField: public HTemplateInstruction<2> {
}
return Representation::Tagged();
}
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
virtual void PrintDataTo(StringStream* stream);
+ void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; }
+ bool IsSkipWriteBarrier() const {
+ return write_barrier_mode_ == SKIP_WRITE_BARRIER;
+ }
+
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@@ -5782,6 +5872,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
bool NeedsWriteBarrier() {
ASSERT(!(FLAG_track_double_fields && field_representation_.IsDouble()) ||
transition_.is_null());
+ if (IsSkipWriteBarrier()) return false;
return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
// If there is a transition, a new storage object needs to be allocated.
!(FLAG_track_double_fields && field_representation_.IsDouble()) &&
@@ -5790,6 +5881,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
}
bool NeedsWriteBarrierForMap() {
+ if (IsSkipWriteBarrier()) return false;
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
@@ -5807,6 +5899,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
HValue* new_space_dominator_;
+ WriteBarrierMode write_barrier_mode_;
};
@@ -5952,7 +6045,8 @@ class HStoreKeyed
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
}
- virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+ virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
@@ -6087,7 +6181,10 @@ class HStringAdd: public HBinaryOperation {
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right);
+ HValue* right,
+ StringAddFlags flags = STRING_ADD_CHECK_NONE);
+
+ StringAddFlags flags() const { return flags_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
@@ -6102,10 +6199,9 @@ class HStringAdd: public HBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
-
private:
- HStringAdd(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
+ HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
+ : HBinaryOperation(context, left, right), flags_(flags) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
@@ -6114,6 +6210,8 @@ class HStringAdd: public HBinaryOperation {
// TODO(svenpanne) Might be safe, but leave it out until we know for sure.
// virtual bool IsDeletable() const { return true; }
+
+ const StringAddFlags flags_;
};
@@ -6458,55 +6556,6 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
};
-class HDeleteProperty: public HBinaryOperation {
- public:
- HDeleteProperty(HValue* context, HValue* obj, HValue* key)
- : HBinaryOperation(context, obj, key) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
-
- HValue* object() { return left(); }
- HValue* key() { return right(); }
-};
-
-
-class HIn: public HTemplateInstruction<3> {
- public:
- HIn(HValue* context, HValue* key, HValue* object) {
- SetOperandAt(0, context);
- SetOperandAt(1, key);
- SetOperandAt(2, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* object() { return OperandAt(2); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() {
- return HType::Boolean();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(In)
-};
-
-
class HCheckMapValue: public HTemplateInstruction<2> {
public:
HCheckMapValue(HValue* value,
diff --git a/deps/v8/src/hydrogen-minus-zero.cc b/deps/v8/src/hydrogen-minus-zero.cc
new file mode 100644
index 0000000000..e9628959de
--- /dev/null
+++ b/deps/v8/src/hydrogen-minus-zero.cc
@@ -0,0 +1,83 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-minus-zero.h"
+
+namespace v8 {
+namespace internal {
+
+void HComputeMinusZeroChecksPhase::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsChange()) {
+ HChange* change = HChange::cast(current);
+ // Propagate flags for negative zero checks upwards from conversions
+ // int32-to-tagged and int32-to-double.
+ Representation from = change->value()->representation();
+ ASSERT(from.Equals(change->from()));
+ if (from.IsInteger32()) {
+ ASSERT(change->to().IsTagged() ||
+ change->to().IsDouble() ||
+ change->to().IsSmi());
+ ASSERT(visited_.IsEmpty());
+ PropagateMinusZeroChecks(change->value());
+ visited_.Clear();
+ }
+ }
+ }
+ }
+}
+
+
+void HComputeMinusZeroChecksPhase::PropagateMinusZeroChecks(HValue* value) {
+ for (HValue* current = value;
+ current != NULL && !visited_.Contains(current->id());
+ current = current->EnsureAndPropagateNotMinusZero(&visited_)) {
+ // For phis, we must propagate the check to all of its inputs.
+ if (current->IsPhi()) {
+ visited_.Add(current->id());
+ HPhi* phi = HPhi::cast(current);
+ for (int i = 0; i < phi->OperandCount(); ++i) {
+ PropagateMinusZeroChecks(phi->OperandAt(i));
+ }
+ break;
+ }
+
+ // For multiplication, division, and Math.min/max(), we must propagate
+ // to the left and the right side.
+ if (current->IsMul() || current->IsDiv() || current->IsMathMinMax()) {
+ HBinaryOperation* operation = HBinaryOperation::cast(current);
+ operation->EnsureAndPropagateNotMinusZero(&visited_);
+ PropagateMinusZeroChecks(operation->left());
+ PropagateMinusZeroChecks(operation->right());
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-minus-zero.h b/deps/v8/src/hydrogen-minus-zero.h
new file mode 100644
index 0000000000..d23ec1196b
--- /dev/null
+++ b/deps/v8/src/hydrogen-minus-zero.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_MINUS_ZERO_H_
+#define V8_HYDROGEN_MINUS_ZERO_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HComputeMinusZeroChecksPhase : public HPhase {
+ public:
+ explicit HComputeMinusZeroChecksPhase(HGraph* graph)
+ : HPhase("H_Compute minus zero checks", graph),
+ visited_(graph->GetMaximumValueID(), zone()) { }
+
+ void Run();
+
+ private:
+ void PropagateMinusZeroChecks(HValue* value);
+
+ BitVector visited_;
+
+ DISALLOW_COPY_AND_ASSIGN(HComputeMinusZeroChecksPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_MINUS_ZERO_H_
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
index 19a1c77442..a2fa0bfb20 100644
--- a/deps/v8/src/hydrogen-osr.cc
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -63,7 +63,8 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = new(zone) HBranch(true_value, non_osr_entry, osr_entry_);
+ HBranch* test = new(zone) HBranch(true_value, ToBooleanStub::Types(),
+ non_osr_entry, osr_entry_);
builder_->current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/hydrogen-range-analysis.cc
index 0d4d9700de..76fd5f35f2 100644
--- a/deps/v8/src/hydrogen-range-analysis.cc
+++ b/deps/v8/src/hydrogen-range-analysis.cc
@@ -31,6 +31,20 @@ namespace v8 {
namespace internal {
+class Pending {
+ public:
+ Pending(HBasicBlock* block, int last_changed_range)
+ : block_(block), last_changed_range_(last_changed_range) {}
+
+ HBasicBlock* block() const { return block_; }
+ int last_changed_range() const { return last_changed_range_; }
+
+ private:
+ HBasicBlock* block_;
+ int last_changed_range_;
+};
+
+
void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
if (FLAG_trace_range) {
va_list arguments;
@@ -41,40 +55,56 @@ void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
}
-void HRangeAnalysisPhase::Analyze(HBasicBlock* block) {
- TraceRange("Analyzing block B%d\n", block->block_id());
-
- int last_changed_range = changed_ranges_.length() - 1;
+void HRangeAnalysisPhase::Run() {
+ HBasicBlock* block(graph()->entry_block());
+ ZoneList<Pending> stack(graph()->blocks()->length(), zone());
+ while (block != NULL) {
+ TraceRange("Analyzing block B%d\n", block->block_id());
- // Infer range based on control flow.
- if (block->predecessors()->length() == 1) {
- HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsCompareIDAndBranch()) {
- InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
+ // Infer range based on control flow.
+ if (block->predecessors()->length() == 1) {
+ HBasicBlock* pred = block->predecessors()->first();
+ if (pred->end()->IsCompareNumericAndBranch()) {
+ InferControlFlowRange(HCompareNumericAndBranch::cast(pred->end()),
+ block);
+ }
}
- }
- // Process phi instructions.
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- InferRange(phi);
- }
+ // Process phi instructions.
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ InferRange(phi);
+ }
- // Go through all instructions of the current block.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- InferRange(it.Current());
- }
+ // Go through all instructions of the current block.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ InferRange(it.Current());
+ }
- // Continue analysis in all dominated blocks.
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- Analyze(block->dominated_blocks()->at(i));
+ // Continue analysis in all dominated blocks.
+ const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
+ if (!dominated_blocks->is_empty()) {
+ // Continue with first dominated block, and push the
+ // remaining blocks on the stack (in reverse order).
+ int last_changed_range = changed_ranges_.length();
+ for (int i = dominated_blocks->length() - 1; i > 0; --i) {
+ stack.Add(Pending(dominated_blocks->at(i), last_changed_range), zone());
+ }
+ block = dominated_blocks->at(0);
+ } else if (!stack.is_empty()) {
+ // Pop next pending block from stack.
+ Pending pending = stack.RemoveLast();
+ RollBackTo(pending.last_changed_range());
+ block = pending.block();
+ } else {
+ // All blocks done.
+ block = NULL;
+ }
}
-
- RollBackTo(last_changed_range);
}
-void HRangeAnalysisPhase::InferControlFlowRange(HCompareIDAndBranch* test,
+void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test,
HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
if (test->representation().IsSmiOrInteger32()) {
@@ -139,10 +169,11 @@ void HRangeAnalysisPhase::InferRange(HValue* value) {
void HRangeAnalysisPhase::RollBackTo(int index) {
- for (int i = index + 1; i < changed_ranges_.length(); ++i) {
+ ASSERT(index <= changed_ranges_.length());
+ for (int i = index; i < changed_ranges_.length(); ++i) {
changed_ranges_[i]->RemoveLastAddedRange();
}
- changed_ranges_.Rewind(index + 1);
+ changed_ranges_.Rewind(index);
}
diff --git a/deps/v8/src/hydrogen-range-analysis.h b/deps/v8/src/hydrogen-range-analysis.h
index 52ce109c87..a1e9737c5e 100644
--- a/deps/v8/src/hydrogen-range-analysis.h
+++ b/deps/v8/src/hydrogen-range-analysis.h
@@ -39,14 +39,12 @@ class HRangeAnalysisPhase : public HPhase {
explicit HRangeAnalysisPhase(HGraph* graph)
: HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
- void Run() {
- Analyze(graph()->entry_block());
- }
+ void Run();
private:
void TraceRange(const char* msg, ...);
- void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
+ void InferControlFlowRange(HCompareNumericAndBranch* test,
+ HBasicBlock* dest);
void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferRange(HValue* value);
void RollBackTo(int index);
diff --git a/deps/v8/src/hydrogen-redundant-phi.cc b/deps/v8/src/hydrogen-redundant-phi.cc
new file mode 100644
index 0000000000..9c38200577
--- /dev/null
+++ b/deps/v8/src/hydrogen-redundant-phi.cc
@@ -0,0 +1,76 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-redundant-phi.h"
+
+namespace v8 {
+namespace internal {
+
+void HRedundantPhiEliminationPhase::Run() {
+ // We do a simple fixed point iteration without any work list, because
+ // machine-generated JavaScript can lead to a very dense Hydrogen graph with
+ // an enormous work list and will consequently result in OOM. Experiments
+ // showed that this simple algorithm is good enough, and even e.g. tracking
+ // the set or range of blocks to consider is not a real improvement.
+ bool need_another_iteration;
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ ZoneList<HPhi*> redundant_phis(blocks->length(), zone());
+ do {
+ need_another_iteration = false;
+ for (int i = 0; i < blocks->length(); ++i) {
+ HBasicBlock* block = blocks->at(i);
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ HValue* replacement = phi->GetRedundantReplacement();
+ if (replacement != NULL) {
+ // Remember phi to avoid concurrent modification of the block's phis.
+ redundant_phis.Add(phi, zone());
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ value->SetOperandAt(it.index(), replacement);
+ need_another_iteration |= value->IsPhi();
+ }
+ }
+ }
+ for (int i = 0; i < redundant_phis.length(); i++) {
+ block->RemovePhi(redundant_phis[i]);
+ }
+ redundant_phis.Clear();
+ }
+ } while (need_another_iteration);
+
+#if DEBUG
+ // Make sure that we *really* removed all redundant phis.
+ for (int i = 0; i < blocks->length(); ++i) {
+ for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
+ ASSERT(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
+ }
+ }
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-redundant-phi.h b/deps/v8/src/hydrogen-redundant-phi.h
new file mode 100644
index 0000000000..6291fa5b78
--- /dev/null
+++ b/deps/v8/src/hydrogen-redundant-phi.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REDUNDANT_PHI_H_
+#define V8_HYDROGEN_REDUNDANT_PHI_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Replace all phis consisting of a single non-loop operand plus any number of
+// loop operands by that single non-loop operand.
+class HRedundantPhiEliminationPhase : public HPhase {
+ public:
+ explicit HRedundantPhiEliminationPhase(HGraph* graph)
+ : HPhase("H_Redundant phi elimination", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_REDUNDANT_PHI_H_
diff --git a/deps/v8/src/hydrogen-removable-simulates.cc b/deps/v8/src/hydrogen-removable-simulates.cc
new file mode 100644
index 0000000000..f952832431
--- /dev/null
+++ b/deps/v8/src/hydrogen-removable-simulates.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-removable-simulates.h"
+
+namespace v8 {
+namespace internal {
+
+void HMergeRemovableSimulatesPhase::Run() {
+ ZoneList<HSimulate*> mergelist(2, zone());
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ // Make sure the merge list is empty at the start of a block.
+ ASSERT(mergelist.is_empty());
+ // Nasty heuristic: Never remove the first simulate in a block. This
+ // just so happens to have a beneficial effect on register allocation.
+ bool first = true;
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsLeaveInlined()) {
+ // Never fold simulates from inlined environments into simulates
+ // in the outer environment.
+ // (Before each HEnterInlined, there is a non-foldable HSimulate
+ // anyway, so we get the barrier in the other direction for free.)
+ // Simply remove all accumulated simulates without merging. This
+ // is safe because simulates after instructions with side effects
+ // are never added to the merge list.
+ while (!mergelist.is_empty()) {
+ mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+ }
+ continue;
+ }
+ if (current->IsReturn()) {
+ // Drop mergeable simulates in the list. This is safe because
+ // simulates after instructions with side effects are never added
+ // to the merge list.
+ while (!mergelist.is_empty()) {
+ mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
+ }
+ continue;
+ }
+ // Skip the non-simulates and the first simulate.
+ if (!current->IsSimulate()) continue;
+ if (first) {
+ first = false;
+ continue;
+ }
+ HSimulate* current_simulate = HSimulate::cast(current);
+ if ((current_simulate->previous()->HasObservableSideEffects() &&
+ !current_simulate->next()->IsSimulate()) ||
+ !current_simulate->is_candidate_for_removal()) {
+ // This simulate is not suitable for folding.
+ // Fold the ones accumulated so far.
+ current_simulate->MergeWith(&mergelist);
+ continue;
+ } else {
+ // Accumulate this simulate for folding later on.
+ mergelist.Add(current_simulate, zone());
+ }
+ }
+
+ if (!mergelist.is_empty()) {
+ // Merge the accumulated simulates at the end of the block.
+ HSimulate* last = mergelist.RemoveLast();
+ last->MergeWith(&mergelist);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-removable-simulates.h b/deps/v8/src/hydrogen-removable-simulates.h
new file mode 100644
index 0000000000..f5bcd6ddfa
--- /dev/null
+++ b/deps/v8/src/hydrogen-removable-simulates.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#define V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HMergeRemovableSimulatesPhase : public HPhase {
+ public:
+ explicit HMergeRemovableSimulatesPhase(HGraph* graph)
+ : HPhase("H_Merge removable simulates", graph) { }
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
new file mode 100644
index 0000000000..e8f0140f66
--- /dev/null
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -0,0 +1,167 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-representation-changes.h"
+
+namespace v8 {
+namespace internal {
+
+void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
+ HValue* value, HValue* use_value, int use_index, Representation to) {
+ // Insert the representation change right before its use. For phi-uses we
+ // insert at the end of the corresponding predecessor.
+ HInstruction* next = NULL;
+ if (use_value->IsPhi()) {
+ next = use_value->block()->predecessors()->at(use_index)->end();
+ } else {
+ next = HInstruction::cast(use_value);
+ }
+ // For constants we try to make the representation change at compile
+ // time. When a representation change is not possible without loss of
+ // information we treat constants like normal instructions and insert the
+ // change instructions for them.
+ HInstruction* new_value = NULL;
+ bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
+ bool allow_undefined_as_nan =
+ use_value->CheckFlag(HValue::kAllowUndefinedAsNaN);
+ if (value->IsConstant()) {
+ HConstant* constant = HConstant::cast(value);
+ // Try to create a new copy of the constant with the new representation.
+ new_value = (is_truncating && to.IsInteger32())
+ ? constant->CopyToTruncatedInt32(graph()->zone())
+ : constant->CopyToRepresentation(to, graph()->zone());
+ }
+
+ if (new_value == NULL) {
+ new_value = new(graph()->zone()) HChange(value, to,
+ is_truncating,
+ allow_undefined_as_nan);
+ }
+
+ new_value->InsertBefore(next);
+ use_value->SetOperandAt(use_index, new_value);
+}
+
+
+void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
+ HValue* value) {
+ Representation r = value->representation();
+ if (r.IsNone()) return;
+ if (value->HasNoUses()) return;
+
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ HValue* use_value = it.value();
+ int use_index = it.index();
+ Representation req = use_value->RequiredInputRepresentation(use_index);
+ if (req.IsNone() || req.Equals(r)) continue;
+ InsertRepresentationChangeForUse(value, use_value, use_index, req);
+ }
+ if (value->HasNoUses()) {
+ ASSERT(value->IsConstant());
+ value->DeleteAndReplaceWith(NULL);
+ }
+
+ // The only purpose of a HForceRepresentation is to represent the value
+ // after the (possible) HChange instruction. We make it disappear.
+ if (value->IsForceRepresentation()) {
+ value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
+ }
+}
+
+
+void HRepresentationChangesPhase::Run() {
+ // Compute truncation flag for phis: Initially assume that all
+ // int32-phis allow truncation and iteratively remove the ones that
+ // are used in an operation that does not allow a truncating
+ // conversion.
+ ZoneList<HPhi*> worklist(8, zone());
+
+ const ZoneList<HPhi*>* phi_list(graph()->phi_list());
+ for (int i = 0; i < phi_list->length(); i++) {
+ HPhi* phi = phi_list->at(i);
+ if (phi->representation().IsInteger32()) {
+ phi->SetFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ for (int i = 0; i < phi_list->length(); i++) {
+ HPhi* phi = phi_list->at(i);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ // If a Phi is used as a non-truncating int32 or as a double,
+ // clear its "truncating" flag.
+ HValue* use = it.value();
+ Representation input_representation =
+ use->RequiredInputRepresentation(it.index());
+ if (!input_representation.IsInteger32() ||
+ !use->CheckFlag(HValue::kTruncatingToInt32)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ phi->id(), it.value()->id(), it.value()->Mnemonic());
+ }
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ worklist.Add(phi, zone());
+ break;
+ }
+ }
+ }
+
+ while (!worklist.is_empty()) {
+ HPhi* current = worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
+ }
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ worklist.Add(HPhi::cast(input), zone());
+ }
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ // Process phi instructions first.
+ const HBasicBlock* block(blocks->at(i));
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ InsertRepresentationChangesForValue(phis->at(j));
+ }
+
+ // Process normal instructions.
+ for (HInstruction* current = block->first(); current != NULL; ) {
+ HInstruction* next = current->next();
+ InsertRepresentationChangesForValue(current);
+ current = next;
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-representation-changes.h b/deps/v8/src/hydrogen-representation-changes.h
new file mode 100644
index 0000000000..77e899b60b
--- /dev/null
+++ b/deps/v8/src/hydrogen-representation-changes.h
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+#define V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HRepresentationChangesPhase : public HPhase {
+ public:
+ explicit HRepresentationChangesPhase(HGraph* graph)
+ : HPhase("H_Representation changes", graph) { }
+
+ void Run();
+
+ private:
+ void InsertRepresentationChangeForUse(HValue* value,
+ HValue* use_value,
+ int use_index,
+ Representation to);
+ void InsertRepresentationChangesForValue(HValue* value);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_REPRESENTATION_CHANGES_H_
diff --git a/deps/v8/src/hydrogen-sce.cc b/deps/v8/src/hydrogen-sce.cc
new file mode 100644
index 0000000000..a6995f647a
--- /dev/null
+++ b/deps/v8/src/hydrogen-sce.cc
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-sce.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+void HStackCheckEliminationPhase::Run() {
+ // For each loop block walk the dominator tree from the backwards branch to
+ // the loop header. If a call instruction is encountered the backwards branch
+ // is dominated by a call and the stack check in the backwards branch can be
+ // removed.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ if (block->IsLoopHeader()) {
+ HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+ HBasicBlock* dominator = back_edge;
+ while (true) {
+ for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
+ if (it.Current()->IsCall()) {
+ block->loop_information()->stack_check()->Eliminate();
+ break;
+ }
+ }
+
+ // Done when the loop header is processed.
+ if (dominator == block) break;
+
+ // Move up the dominator tree.
+ dominator = dominator->dominator();
+ }
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-sce.h b/deps/v8/src/hydrogen-sce.h
new file mode 100644
index 0000000000..55e153e0ed
--- /dev/null
+++ b/deps/v8/src/hydrogen-sce.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_SCE_H_
+#define V8_HYDROGEN_SCE_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HStackCheckEliminationPhase : public HPhase {
+ public:
+ explicit HStackCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Stack check elimination", graph) { }
+
+ void Run();
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_SCE_H_
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 7679f93257..57220e0de1 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,12 +33,23 @@
#include "codegen.h"
#include "full-codegen.h"
#include "hashmap.h"
+#include "hydrogen-bce.h"
+#include "hydrogen-canonicalize.h"
+#include "hydrogen-dce.h"
+#include "hydrogen-dehoist.h"
+#include "hydrogen-deoptimizing-mark.h"
#include "hydrogen-environment-liveness.h"
#include "hydrogen-escape-analysis.h"
#include "hydrogen-infer-representation.h"
+#include "hydrogen-infer-types.h"
#include "hydrogen-gvn.h"
+#include "hydrogen-minus-zero.h"
#include "hydrogen-osr.h"
#include "hydrogen-range-analysis.h"
+#include "hydrogen-redundant-phi.h"
+#include "hydrogen-removable-simulates.h"
+#include "hydrogen-representation-changes.h"
+#include "hydrogen-sce.h"
#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
@@ -649,6 +660,7 @@ HConstant* HGraph::GetConstant##Name() { \
htype, \
false, \
true, \
+ false, \
boolean_value); \
constant->InsertAfter(GetConstantUndefined()); \
constant_##name##_.set(constant); \
@@ -671,6 +683,19 @@ HConstant* HGraph::GetInvalidContext() {
}
+bool HGraph::IsStandardConstant(HConstant* constant) {
+ if (constant == GetConstantUndefined()) return true;
+ if (constant == GetConstant0()) return true;
+ if (constant == GetConstant1()) return true;
+ if (constant == GetConstantMinus1()) return true;
+ if (constant == GetConstantTrue()) return true;
+ if (constant == GetConstantFalse()) return true;
+ if (constant == GetConstantHole()) return true;
+ if (constant == GetConstantNull()) return true;
+ return false;
+}
+
+
HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position)
: builder_(builder),
position_(position),
@@ -711,26 +736,6 @@ HGraphBuilder::IfBuilder::IfBuilder(
}
-HInstruction* HGraphBuilder::IfBuilder::IfCompare(
- HValue* left,
- HValue* right,
- Token::Value token) {
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(left, right, token);
- AddCompare(compare);
- return compare;
-}
-
-
-HInstruction* HGraphBuilder::IfBuilder::IfCompareMap(HValue* left,
- Handle<Map> map) {
- HCompareMap* compare =
- new(zone()) HCompareMap(left, map, first_true_block_, first_false_block_);
- AddCompare(compare);
- return compare;
-}
-
-
void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) {
if (split_edge_merge_block_ != NULL) {
HEnvironment* env = first_false_block_->last_environment();
@@ -811,8 +816,8 @@ void HGraphBuilder::IfBuilder::Then() {
ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
boolean_type.Add(ToBooleanStub::BOOLEAN);
HBranch* branch =
- new(zone()) HBranch(constant_false, first_true_block_,
- first_false_block_, boolean_type);
+ new(zone()) HBranch(constant_false, boolean_type, first_true_block_,
+ first_false_block_);
builder_->current_block()->Finish(branch);
}
builder_->set_current_block(first_true_block_);
@@ -916,8 +921,8 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
body_env->Pop();
builder_->set_current_block(header_block_);
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(phi_, terminating, token);
+ HCompareNumericAndBranch* compare =
+ new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
compare->SetSuccessorAt(0, body_block_);
compare->SetSuccessorAt(1, exit_block_);
builder_->current_block()->Finish(compare);
@@ -1005,6 +1010,17 @@ HReturn* HGraphBuilder::AddReturn(HValue* value) {
}
+void HGraphBuilder::AddSoftDeoptimize(SoftDeoptimizeMode mode) {
+ isolate()->counters()->soft_deopts_requested()->Increment();
+ if (FLAG_always_opt && mode == CAN_OMIT_SOFT_DEOPT) return;
+ if (current_block()->IsDeoptimizing()) return;
+ Add<HSoftDeoptimize>();
+ isolate()->counters()->soft_deopts_inserted()->Increment();
+ current_block()->MarkAsDeoptimizing();
+ graph()->set_has_soft_deoptimize(true);
+}
+
+
HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
HBasicBlock* b = graph()->CreateBasicBlock();
b->SetInitialEnvironment(env);
@@ -1035,93 +1051,6 @@ HValue* HGraphBuilder::BuildCheckMap(HValue* obj,
}
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store) {
- Zone* zone = this->zone();
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = Add<HClampToUint8>(val);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return new(zone) HStoreKeyed(external_elements, checked_key,
- val, elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load =
- new(zone) HLoadKeyed(
- external_elements, checked_key, dependency, elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode) {
- Zone* zone = this->zone();
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- // It's an element load (!is_store).
- return new(zone) HLoadKeyed(elements,
- checked_key,
- load_dependency,
- elements_kind,
- load_mode);
-}
-
-
HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* elements,
ElementsKind kind,
@@ -1131,23 +1060,25 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
Zone* zone = this->zone();
IfBuilder length_checker(this);
- length_checker.IfCompare(length, key, Token::EQ);
+ Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
+ length_checker.If<HCompareNumericAndBranch>(key, length, token);
+
length_checker.Then();
HValue* current_capacity = AddLoadFixedArrayLength(elements);
IfBuilder capacity_checker(this);
- capacity_checker.IfCompare(length, current_capacity, Token::EQ);
+ capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
+ Token::GTE);
capacity_checker.Then();
HValue* context = environment()->LookupContext();
- HValue* new_capacity =
- BuildNewElementsCapacity(context, current_capacity);
+ HValue* new_capacity = BuildNewElementsCapacity(context, key);
HValue* new_elements = BuildGrowElementsCapacity(object, elements,
- kind, length,
+ kind, kind, length,
new_capacity);
environment()->Push(new_elements);
@@ -1158,7 +1089,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
if (is_js_array) {
HValue* new_length = AddInstruction(
- HAdd::New(zone, context, length, graph_->GetConstant1()));
+ HAdd::New(zone, context, key, graph_->GetConstant1()));
new_length->ClearFlag(HValue::kCanOverflow);
Representation representation = IsFastElementsKind(kind)
@@ -1168,10 +1099,9 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
}
length_checker.Else();
-
Add<HBoundsCheck>(key, length);
- environment()->Push(elements);
+ environment()->Push(elements);
length_checker.End();
return environment()->Pop();
@@ -1182,17 +1112,16 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length) {
- Heap* heap = isolate()->heap();
+ Factory* factory = isolate()->factory();
IfBuilder cow_checker(this);
- cow_checker.IfCompareMap(elements,
- Handle<Map>(heap->fixed_cow_array_map()));
+ cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
cow_checker.Then();
HValue* capacity = AddLoadFixedArrayLength(elements);
- HValue* new_elements = BuildGrowElementsCapacity(object, elements,
+ HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind,
kind, length, capacity);
environment()->Push(new_elements);
@@ -1207,6 +1136,48 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
}
+void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
+ HValue* map,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ bool is_jsarray) {
+ ASSERT(!IsFastHoleyElementsKind(from_kind) ||
+ IsFastHoleyElementsKind(to_kind));
+
+ if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ Add<HTrapAllocationMemento>(object);
+ }
+
+ if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ HInstruction* elements = AddLoadElements(object);
+
+ HInstruction* empty_fixed_array = Add<HConstant>(
+ isolate()->factory()->empty_fixed_array(), Representation::Tagged());
+
+ IfBuilder if_builder(this);
+
+ if_builder.IfNot<HCompareObjectEqAndBranch>(elements, empty_fixed_array);
+
+ if_builder.Then();
+
+ HInstruction* elements_length = AddLoadFixedArrayLength(elements);
+
+ HInstruction* array_length = is_jsarray
+ ? AddLoad(object, HObjectAccess::ForArrayLength(),
+ NULL, Representation::Smi())
+ : elements_length;
+ array_length->set_type(HType::Smi());
+
+ BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
+ array_length, elements_length);
+
+ if_builder.End();
+ }
+
+ AddStore(object, HObjectAccess::ForMap(), map);
+}
+
+
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* object,
HValue* key,
@@ -1256,16 +1227,14 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HLoadExternalArrayPointer* external_elements =
Add<HLoadExternalArrayPointer>(elements);
IfBuilder length_checker(this);
- length_checker.IfCompare(key, length, Token::LT);
+ length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
length_checker.Then();
IfBuilder negative_checker(this);
- HValue* bounds_check = negative_checker.IfCompare(
+ HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
- HInstruction* result = BuildExternalArrayElementAccess(
- external_elements, key, val, bounds_check,
- elements_kind, is_store);
- AddInstruction(result);
+ HInstruction* result = AddExternalArrayElementAccess(
+ external_elements, key, val, bounds_check, elements_kind, is_store);
negative_checker.ElseDeopt();
length_checker.End();
return result;
@@ -1274,9 +1243,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
checked_key = Add<HBoundsCheck>(key, length);
HLoadExternalArrayPointer* external_elements =
Add<HLoadExternalArrayPointer>(elements);
- return AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, mapcheck,
- elements_kind, is_store));
+ return AddExternalArrayElementAccess(
+ external_elements, checked_key, val,
+ mapcheck, elements_kind, is_store);
}
}
ASSERT(fast_smi_only_elements ||
@@ -1313,9 +1282,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
}
}
- return AddInstruction(
- BuildFastElementAccess(elements, checked_key, val, mapcheck,
- elements_kind, is_store, load_mode, store_mode));
+ return AddFastElementAccess(elements, checked_key, val, mapcheck,
+ elements_kind, is_store, load_mode, store_mode);
}
@@ -1395,14 +1363,14 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
AddStore(array, HObjectAccess::ForArrayLength(), length_field);
if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationSiteInfo(array,
- JSArray::kSize,
- allocation_site_payload);
+ BuildCreateAllocationMemento(array,
+ JSArray::kSize,
+ allocation_site_payload);
}
int elements_location = JSArray::kSize;
if (mode == TRACK_ALLOCATION_SITE) {
- elements_location += AllocationSiteInfo::kSize;
+ elements_location += AllocationMemento::kSize;
}
HInnerAllocatedObject* elements =
@@ -1412,6 +1380,86 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
}
+HInstruction* HGraphBuilder::AddExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ val = Add<HClampToUint8>(val);
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ break;
+ }
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ break;
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ return Add<HStoreKeyed>(external_elements, checked_key, val, elements_kind);
+ } else {
+ ASSERT(val == NULL);
+ HLoadKeyed* load = Add<HLoadKeyed>(external_elements, checked_key,
+ dependency, elements_kind);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
+ }
+}
+
+
+HInstruction* HGraphBuilder::AddFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* load_dependency,
+ ElementsKind elements_kind,
+ bool is_store,
+ LoadKeyedHoleMode load_mode,
+ KeyedAccessStoreMode store_mode) {
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+ // It's an element load (!is_store).
+ return Add<HLoadKeyed>(
+ elements, checked_key, load_dependency, elements_kind, load_mode);
+}
+
+
HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
HValue* typecheck) {
return AddLoad(object, HObjectAccess::ForElementsPointer(), typecheck);
@@ -1464,17 +1512,18 @@ void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
+ ElementsKind new_kind,
HValue* length,
HValue* new_capacity) {
HValue* context = environment()->LookupContext();
- BuildNewSpaceArrayCheck(new_capacity, kind);
+ BuildNewSpaceArrayCheck(new_capacity, new_kind);
HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
- context, kind, new_capacity);
+ context, new_kind, new_capacity);
BuildCopyElements(context, elements, kind,
- new_elements, kind,
+ new_elements, new_kind,
length, new_capacity);
AddStore(object, HObjectAccess::ForElementsPointer(), new_elements);
@@ -1586,6 +1635,7 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
HValue* boilerplate,
+ HValue* allocation_site,
AllocationSiteMode mode,
ElementsKind kind,
int length) {
@@ -1594,7 +1644,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// All sizes here are multiples of kPointerSize.
int size = JSArray::kSize;
if (mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
+ size += AllocationMemento::kSize;
}
int elems_offset = size;
if (length > 0) {
@@ -1622,7 +1672,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
// Create an allocation site info if requested.
if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationSiteInfo(object, JSArray::kSize, boilerplate);
+ BuildCreateAllocationMemento(object, JSArray::kSize, allocation_site);
}
if (length > 0) {
@@ -1654,6 +1704,39 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
}
+HInstruction* HGraphBuilder::BuildUnaryMathOp(
+ HValue* input, Handle<Type> type, Token::Value operation) {
+ // We only handle the numeric cases here
+ type = handle(
+ Type::Intersect(type, handle(Type::Number(), isolate())), isolate());
+
+ switch (operation) {
+ default:
+ UNREACHABLE();
+ case Token::SUB: {
+ HInstruction* instr =
+ HMul::New(zone(), environment()->LookupContext(),
+ input, graph()->GetConstantMinus1());
+ Representation rep = Representation::FromType(type);
+ if (type->Is(Type::None())) {
+ AddSoftDeoptimize();
+ }
+ if (instr->IsBinaryOperation()) {
+ HBinaryOperation* binop = HBinaryOperation::cast(instr);
+ binop->set_observed_input_representation(1, rep);
+ binop->set_observed_input_representation(2, rep);
+ }
+ return instr;
+ }
+ case Token::BIT_NOT:
+ if (type->Is(Type::None())) {
+ AddSoftDeoptimize();
+ }
+ return new(zone()) HBitNot(input);
+ }
+}
+
+
void HGraphBuilder::BuildCompareNil(
HValue* value,
Handle<Type> type,
@@ -1694,16 +1777,18 @@ void HGraphBuilder::BuildCompareNil(
}
-HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
- int previous_object_size,
- HValue* payload) {
- HInnerAllocatedObject* alloc_site = Add<HInnerAllocatedObject>(
+HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
+ int previous_object_size,
+ HValue* alloc_site) {
+ ASSERT(alloc_site != NULL);
+ HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
- Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
- AddStoreMapConstant(alloc_site, alloc_site_map);
- HObjectAccess access = HObjectAccess::ForAllocationSitePayload();
- AddStore(alloc_site, access, payload);
- return alloc_site;
+ Handle<Map> alloc_memento_map(
+ isolate()->heap()->allocation_memento_map());
+ AddStoreMapConstant(alloc_memento, alloc_memento_map);
+ HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
+ AddStore(alloc_memento, access, alloc_site);
+ return alloc_memento;
}
@@ -1736,7 +1821,7 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
constructor_function_(constructor_function) {
mode_ = override_mode == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
- : AllocationSiteInfo::GetMode(kind);
+ : AllocationSite::GetMode(kind);
}
@@ -1794,7 +1879,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
int base_size = JSArray::kSize;
if (mode_ == TRACK_ALLOCATION_SITE) {
- base_size += AllocationSiteInfo::kSize;
+ base_size += AllocationMemento::kSize;
}
if (IsFastDoubleElementsKind(kind_)) {
@@ -1821,7 +1906,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() {
int base_size = JSArray::kSize;
if (mode_ == TRACK_ALLOCATION_SITE) {
- base_size += AllocationSiteInfo::kSize;
+ base_size += AllocationMemento::kSize;
}
base_size += IsFastDoubleElementsKind(kind_)
@@ -1909,6 +1994,18 @@ HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
}
+HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin,
+ HValue* context) {
+ HGlobalObject* global_object = Add<HGlobalObject>(context);
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ GlobalObject::kBuiltinsOffset);
+ HValue* builtins = AddLoad(global_object, access);
+ HObjectAccess function_access = HObjectAccess::ForJSObjectOffset(
+ JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
+ return AddLoad(builtins, function_access);
+}
+
+
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
@@ -2029,32 +2126,6 @@ void HGraph::FinalizeUniqueValueIds() {
}
-void HGraph::Canonicalize() {
- HPhase phase("H_Canonicalize", this);
- // Before removing no-op instructions, save their semantic value.
- // We must be careful not to set the flag unnecessarily, because GVN
- // cannot identify two instructions when their flag value differs.
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->IsArithmeticBinaryOperation() &&
- instr->representation().IsInteger32() &&
- instr->HasAtLeastOneUseWithFlagAndNoneWithout(
- HInstruction::kTruncatingToInt32)) {
- instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
- }
- }
- }
- // Perform actual Canonicalization pass.
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- HValue* value = instr->Canonicalize();
- if (value != instr) instr->DeleteAndReplaceWith(value);
- }
- }
-}
-
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -2389,134 +2460,6 @@ void HGraph::AssignDominators() {
}
-// Mark all blocks that are dominated by an unconditional soft deoptimize to
-// prevent code motion across those blocks.
-void HGraph::PropagateDeoptimizingMark() {
- HPhase phase("H_Propagate deoptimizing mark", this);
- // Skip this phase if there is nothing to be done anyway.
- if (!has_soft_deoptimize()) return;
- MarkAsDeoptimizingRecursively(entry_block());
- NullifyUnreachableInstructions();
-}
-
-
-void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- HBasicBlock* dominated = block->dominated_blocks()->at(i);
- if (block->IsDeoptimizing()) dominated->MarkAsDeoptimizing();
- MarkAsDeoptimizingRecursively(dominated);
- }
-}
-
-
-void HGraph::NullifyUnreachableInstructions() {
- if (!FLAG_unreachable_code_elimination) return;
- int block_count = blocks_.length();
- for (int i = 0; i < block_count; ++i) {
- HBasicBlock* block = blocks_.at(i);
- bool nullify = false;
- const ZoneList<HBasicBlock*>* predecessors = block->predecessors();
- int predecessors_length = predecessors->length();
- bool all_predecessors_deoptimizing = (predecessors_length > 0);
- for (int j = 0; j < predecessors_length; ++j) {
- if (!predecessors->at(j)->IsDeoptimizing()) {
- all_predecessors_deoptimizing = false;
- break;
- }
- }
- if (all_predecessors_deoptimizing) nullify = true;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- // Leave the basic structure of the graph intact.
- if (instr->IsBlockEntry()) continue;
- if (instr->IsControlInstruction()) continue;
- if (instr->IsSimulate()) continue;
- if (instr->IsEnterInlined()) continue;
- if (instr->IsLeaveInlined()) continue;
- if (nullify) {
- HInstruction* last_dummy = NULL;
- for (int j = 0; j < instr->OperandCount(); ++j) {
- HValue* operand = instr->OperandAt(j);
- // Insert an HDummyUse for each operand, unless the operand
- // is an HDummyUse itself. If it's even from the same block,
- // remember it as a potential replacement for the instruction.
- if (operand->IsDummyUse()) {
- if (operand->block() == instr->block() &&
- last_dummy == NULL) {
- last_dummy = HInstruction::cast(operand);
- }
- continue;
- }
- if (operand->IsControlInstruction()) {
- // Inserting a dummy use for a value that's not defined anywhere
- // will fail. Some instructions define fake inputs on such
- // values as control flow dependencies.
- continue;
- }
- HDummyUse* dummy = new(zone()) HDummyUse(operand);
- dummy->InsertBefore(instr);
- last_dummy = dummy;
- }
- if (last_dummy == NULL) last_dummy = GetConstant1();
- instr->DeleteAndReplaceWith(last_dummy);
- continue;
- }
- if (instr->IsSoftDeoptimize()) {
- ASSERT(block->IsDeoptimizing());
- nullify = true;
- }
- }
- }
-}
-
-
-// Replace all phis consisting of a single non-loop operand plus any number of
-// loop operands by that single non-loop operand.
-void HGraph::EliminateRedundantPhis() {
- HPhase phase("H_Redundant phi elimination", this);
-
- // We do a simple fixed point iteration without any work list, because
- // machine-generated JavaScript can lead to a very dense Hydrogen graph with
- // an enormous work list and will consequently result in OOM. Experiments
- // showed that this simple algorithm is good enough, and even e.g. tracking
- // the set or range of blocks to consider is not a real improvement.
- bool need_another_iteration;
- ZoneList<HPhi*> redundant_phis(blocks_.length(), zone());
- do {
- need_another_iteration = false;
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_[i];
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- HValue* replacement = phi->GetRedundantReplacement();
- if (replacement != NULL) {
- // Remember phi to avoid concurrent modification of the block's phis.
- redundant_phis.Add(phi, zone());
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- need_another_iteration |= value->IsPhi();
- }
- }
- }
- for (int i = 0; i < redundant_phis.length(); i++) {
- block->RemovePhi(redundant_phis[i]);
- }
- redundant_phis.Clear();
- }
- } while (need_another_iteration);
-
-#if DEBUG
- // Make sure that we *really* removed all redundant phis.
- for (int i = 0; i < blocks_.length(); ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- ASSERT(blocks_[i]->phis()->at(j)->GetRedundantReplacement() == NULL);
- }
- }
-#endif
-}
-
-
bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
@@ -2557,344 +2500,6 @@ void HGraph::CollectPhis() {
}
-void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
- BitVector in_worklist(GetMaximumValueID(), zone());
- for (int i = 0; i < worklist->length(); ++i) {
- ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
- in_worklist.Add(worklist->at(i)->id());
- }
-
- while (!worklist->is_empty()) {
- HValue* current = worklist->RemoveLast();
- in_worklist.Remove(current->id());
- if (current->UpdateInferredType()) {
- for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!in_worklist.Contains(use->id())) {
- in_worklist.Add(use->id());
- worklist->Add(use, zone());
- }
- }
- }
- }
-}
-
-
-class HStackCheckEliminator BASE_EMBEDDED {
- public:
- explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
-
- void Process();
-
- private:
- HGraph* graph_;
-};
-
-
-void HStackCheckEliminator::Process() {
- HPhase phase("H_Stack check elimination", graph_);
- // For each loop block walk the dominator tree from the backwards branch to
- // the loop header. If a call instruction is encountered the backwards branch
- // is dominated by a call and the stack check in the backwards branch can be
- // removed.
- for (int i = 0; i < graph_->blocks()->length(); i++) {
- HBasicBlock* block = graph_->blocks()->at(i);
- if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- HBasicBlock* dominator = back_edge;
- while (true) {
- for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
- if (it.Current()->IsCall()) {
- block->loop_information()->stack_check()->Eliminate();
- break;
- }
- }
-
- // Done when the loop header is processed.
- if (dominator == block) break;
-
- // Move up the dominator tree.
- dominator = dominator->dominator();
- }
- }
- }
-}
-
-
-void HGraph::MergeRemovableSimulates() {
- HPhase phase("H_Merge removable simulates", this);
- ZoneList<HSimulate*> mergelist(2, zone());
- for (int i = 0; i < blocks()->length(); ++i) {
- HBasicBlock* block = blocks()->at(i);
- // Make sure the merge list is empty at the start of a block.
- ASSERT(mergelist.is_empty());
- // Nasty heuristic: Never remove the first simulate in a block. This
- // just so happens to have a beneficial effect on register allocation.
- bool first = true;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsLeaveInlined()) {
- // Never fold simulates from inlined environments into simulates
- // in the outer environment.
- // (Before each HEnterInlined, there is a non-foldable HSimulate
- // anyway, so we get the barrier in the other direction for free.)
- // Simply remove all accumulated simulates without merging. This
- // is safe because simulates after instructions with side effects
- // are never added to the merge list.
- while (!mergelist.is_empty()) {
- mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
- }
- continue;
- }
- if (current->IsReturn()) {
- // Drop mergeable simulates in the list. This is safe because
- // simulates after instructions with side effects are never added
- // to the merge list.
- while (!mergelist.is_empty()) {
- mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
- }
- continue;
- }
- // Skip the non-simulates and the first simulate.
- if (!current->IsSimulate()) continue;
- if (first) {
- first = false;
- continue;
- }
- HSimulate* current_simulate = HSimulate::cast(current);
- if ((current_simulate->previous()->HasObservableSideEffects() &&
- !current_simulate->next()->IsSimulate()) ||
- !current_simulate->is_candidate_for_removal()) {
- // This simulate is not suitable for folding.
- // Fold the ones accumulated so far.
- current_simulate->MergeWith(&mergelist);
- continue;
- } else {
- // Accumulate this simulate for folding later on.
- mergelist.Add(current_simulate, zone());
- }
- }
-
- if (!mergelist.is_empty()) {
- // Merge the accumulated simulates at the end of the block.
- HSimulate* last = mergelist.RemoveLast();
- last->MergeWith(&mergelist);
- }
- }
-}
-
-
-void HGraph::InitializeInferredTypes() {
- HPhase phase("H_Inferring types", this);
- InitializeInferredTypes(0, this->blocks_.length() - 1);
-}
-
-
-void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
- for (int i = from_inclusive; i <= to_inclusive; ++i) {
- HBasicBlock* block = blocks_[i];
-
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); j++) {
- phis->at(j)->UpdateInferredType();
- }
-
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- it.Current()->UpdateInferredType();
- }
-
- if (block->IsLoopHeader()) {
- HBasicBlock* last_back_edge =
- block->loop_information()->GetLastBackEdge();
- InitializeInferredTypes(i + 1, last_back_edge->block_id());
- // Skip all blocks already processed by the recursive call.
- i = last_back_edge->block_id();
- // Update phis of the loop header now after the whole loop body is
- // guaranteed to be processed.
- ZoneList<HValue*> worklist(block->phis()->length(), zone());
- for (int j = 0; j < block->phis()->length(); ++j) {
- worklist.Add(block->phis()->at(j), zone());
- }
- InferTypes(&worklist);
- }
- }
-}
-
-
-void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
- HValue* current = value;
- while (current != NULL) {
- if (visited->Contains(current->id())) return;
-
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited->Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i), visited);
- }
- break;
- }
-
- // For multiplication, division, and Math.min/max(), we must propagate
- // to the left and the right side.
- if (current->IsMul()) {
- HMul* mul = HMul::cast(current);
- mul->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(mul->left(), visited);
- PropagateMinusZeroChecks(mul->right(), visited);
- } else if (current->IsDiv()) {
- HDiv* div = HDiv::cast(current);
- div->EnsureAndPropagateNotMinusZero(visited);
- PropagateMinusZeroChecks(div->left(), visited);
- PropagateMinusZeroChecks(div->right(), visited);
- } else if (current->IsMathMinMax()) {
- HMathMinMax* minmax = HMathMinMax::cast(current);
- visited->Add(minmax->id());
- PropagateMinusZeroChecks(minmax->left(), visited);
- PropagateMinusZeroChecks(minmax->right(), visited);
- }
-
- current = current->EnsureAndPropagateNotMinusZero(visited);
- }
-}
-
-
-void HGraph::InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to) {
- // Insert the representation change right before its use. For phi-uses we
- // insert at the end of the corresponding predecessor.
- HInstruction* next = NULL;
- if (use_value->IsPhi()) {
- next = use_value->block()->predecessors()->at(use_index)->end();
- } else {
- next = HInstruction::cast(use_value);
- }
- // For constants we try to make the representation change at compile
- // time. When a representation change is not possible without loss of
- // information we treat constants like normal instructions and insert the
- // change instructions for them.
- HInstruction* new_value = NULL;
- bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
- bool allow_undefined_as_nan =
- use_value->CheckFlag(HValue::kAllowUndefinedAsNaN);
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- // Try to create a new copy of the constant with the new representation.
- new_value = (is_truncating && to.IsInteger32())
- ? constant->CopyToTruncatedInt32(zone())
- : constant->CopyToRepresentation(to, zone());
- }
-
- if (new_value == NULL) {
- new_value = new(zone()) HChange(value, to,
- is_truncating, allow_undefined_as_nan);
- }
-
- new_value->InsertBefore(next);
- use_value->SetOperandAt(use_index, new_value);
-}
-
-
-void HGraph::InsertRepresentationChangesForValue(HValue* value) {
- Representation r = value->representation();
- if (r.IsNone()) return;
- if (value->HasNoUses()) return;
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use_value = it.value();
- int use_index = it.index();
- Representation req = use_value->RequiredInputRepresentation(use_index);
- if (req.IsNone() || req.Equals(r)) continue;
- InsertRepresentationChangeForUse(value, use_value, use_index, req);
- }
- if (value->HasNoUses()) {
- ASSERT(value->IsConstant());
- value->DeleteAndReplaceWith(NULL);
- }
-
- // The only purpose of a HForceRepresentation is to represent the value
- // after the (possible) HChange instruction. We make it disappear.
- if (value->IsForceRepresentation()) {
- value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
- }
-}
-
-
-void HGraph::InsertRepresentationChanges() {
- HPhase phase("H_Representation changes", this);
-
- // Compute truncation flag for phis: Initially assume that all
- // int32-phis allow truncation and iteratively remove the ones that
- // are used in an operation that does not allow a truncating
- // conversion.
- ZoneList<HPhi*> worklist(8, zone());
-
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (phi->representation().IsInteger32()) {
- phi->SetFlag(HValue::kTruncatingToInt32);
- }
- }
-
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if (!input_representation.IsInteger32() ||
- !use->CheckFlag(HValue::kTruncatingToInt32)) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
- }
- phi->ClearFlag(HValue::kTruncatingToInt32);
- worklist.Add(phi, zone());
- break;
- }
- }
- }
-
- while (!worklist.is_empty()) {
- HPhi* current = worklist.RemoveLast();
- for (int i = 0; i < current->OperandCount(); ++i) {
- HValue* input = current->OperandAt(i);
- if (input->IsPhi() &&
- input->representation().IsInteger32() &&
- input->CheckFlag(HValue::kTruncatingToInt32)) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- input->id(), current->id(), current->Mnemonic());
- }
- input->ClearFlag(HValue::kTruncatingToInt32);
- worklist.Add(HPhi::cast(input), zone());
- }
- }
- }
-
- for (int i = 0; i < blocks_.length(); ++i) {
- // Process phi instructions first.
- const ZoneList<HPhi*>* phis = blocks_[i]->phis();
- for (int j = 0; j < phis->length(); j++) {
- InsertRepresentationChangesForValue(phis->at(j));
- }
-
- // Process normal instructions.
- HInstruction* current = blocks_[i]->first();
- while (current != NULL) {
- HInstruction* next = current->next();
- InsertRepresentationChangesForValue(current);
- current = next;
- }
- }
-}
-
-
void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
if (!phi->CheckFlag(HValue::kAllowUndefinedAsNaN)) return;
phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
@@ -2909,12 +2514,11 @@ void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
void HGraph::MarkDeoptimizeOnUndefined() {
HPhase phase("H_MarkDeoptimizeOnUndefined", this);
- // Compute DeoptimizeOnUndefined flag for phis.
- // Any phi that can reach a use with DeoptimizeOnUndefined set must
- // have DeoptimizeOnUndefined set. Currently only HCompareIDAndBranch, with
- // double input representation, has this flag set.
- // The flag is used by HChange tagged->double, which must deoptimize
- // if one of its uses has this flag set.
+ // Compute DeoptimizeOnUndefined flag for phis. Any phi that can reach a use
+ // with DeoptimizeOnUndefined set must have DeoptimizeOnUndefined set.
+ // Currently only HCompareNumericAndBranch, with double input representation,
+ // has this flag set. The flag is used by HChange tagged->double, which must
+ // deoptimize if one of its uses has this flag set.
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
@@ -2928,32 +2532,6 @@ void HGraph::MarkDeoptimizeOnUndefined() {
}
-void HGraph::ComputeMinusZeroChecks() {
- HPhase phase("H_Compute minus zero checks", this);
- BitVector visited(GetMaximumValueID(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (HInstructionIterator it(blocks_[i]); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsInteger32()) {
- ASSERT(change->to().IsTagged() ||
- change->to().IsDouble() ||
- change->to().IsSmi());
- ASSERT(visited.IsEmpty());
- PropagateMinusZeroChecks(change->value(), &visited);
- visited.Clear();
- }
- }
- }
- }
-}
-
-
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
@@ -3215,7 +2793,7 @@ void TestContext::BuildBranch(HValue* value) {
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
ToBooleanStub::Types expected(condition()->to_boolean_types());
- HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
+ HBranch* test = new(zone()) HBranch(value, expected, empty_true, empty_false);
builder->current_block()->Finish(test);
empty_true->Goto(if_true(), builder->function_state());
@@ -3400,13 +2978,13 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
Run<HEnvironmentLivenessAnalysisPhase>();
}
- PropagateDeoptimizingMark();
+ Run<HPropagateDeoptimizingMarkPhase>();
if (!CheckConstPhiUses()) {
*bailout_reason = SmartArrayPointer<char>(StrDup(
"Unsupported phi use of const variable"));
return false;
}
- EliminateRedundantPhis();
+ Run<HRedundantPhiEliminationPhase>();
if (!CheckArgumentsPhiUses()) {
*bailout_reason = SmartArrayPointer<char>(StrDup(
"Unsupported phi use of arguments"));
@@ -3414,9 +2992,7 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
}
// Remove dead code and phis
- if (FLAG_dead_code_elimination) {
- DeadCodeElimination("H_Eliminate early dead code");
- }
+ if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
@@ -3426,19 +3002,19 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
// Remove HSimulate instructions that have turned out not to be needed
// after all by folding them into the following HSimulate.
// This must happen after inferring representations.
- MergeRemovableSimulates();
+ Run<HMergeRemovableSimulatesPhase>();
MarkDeoptimizeOnUndefined();
- InsertRepresentationChanges();
+ Run<HRepresentationChangesPhase>();
- InitializeInferredTypes();
+ Run<HInferTypesPhase>();
// Must be performed before canonicalization to ensure that Canonicalize
// will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
// zero.
if (FLAG_opt_safe_uint32_operations) Run<HUint32AnalysisPhase>();
- if (FLAG_use_canonicalizing) Canonicalize();
+ if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
@@ -3446,20 +3022,17 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
if (FLAG_use_range) Run<HRangeAnalysisPhase>();
- ComputeMinusZeroChecks();
+ Run<HComputeMinusZeroChecksPhase>();
// Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(this);
- sce.Process();
+ Run<HStackCheckEliminationPhase>();
if (FLAG_idefs) SetupInformativeDefinitions();
if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) {
- EliminateRedundantBoundsChecks();
- }
- if (FLAG_array_index_dehoisting) DehoistSimpleArrayIndexComputations();
- if (FLAG_dead_code_elimination) {
- DeadCodeElimination("H_Eliminate late dead code");
+ Run<HBoundsCheckEliminationPhase>();
}
+ if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
+ if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
RestoreActualValues();
@@ -3511,528 +3084,6 @@ void HGraph::SetupInformativeDefinitions() {
}
-// We try to "factor up" HBoundsCheck instructions towards the root of the
-// dominator tree.
-// For now we handle checks where the index is like "exp + int32value".
-// If in the dominator tree we check "exp + v1" and later (dominated)
-// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
-// v2 > v1 we can use v2 in the 1st check and again remove the second.
-// To do so we keep a dictionary of all checks where the key if the pair
-// "exp, length".
-// The class BoundsCheckKey represents this key.
-class BoundsCheckKey : public ZoneObject {
- public:
- HValue* IndexBase() const { return index_base_; }
- HValue* Length() const { return length_; }
-
- uint32_t Hash() {
- return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
- }
-
- static BoundsCheckKey* Create(Zone* zone,
- HBoundsCheck* check,
- int32_t* offset) {
- if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
-
- HValue* index_base = NULL;
- HConstant* constant = NULL;
- bool is_sub = false;
-
- if (check->index()->IsAdd()) {
- HAdd* index = HAdd::cast(check->index());
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- } else if (check->index()->IsSub()) {
- HSub* index = HSub::cast(check->index());
- is_sub = true;
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- }
-
- if (constant != NULL && constant->HasInteger32Value()) {
- *offset = is_sub ? - constant->Integer32Value()
- : constant->Integer32Value();
- } else {
- *offset = 0;
- index_base = check->index();
- }
-
- return new(zone) BoundsCheckKey(index_base, check->length());
- }
-
- private:
- BoundsCheckKey(HValue* index_base, HValue* length)
- : index_base_(index_base),
- length_(length) { }
-
- HValue* index_base_;
- HValue* length_;
-};
-
-
-// Data about each HBoundsCheck that can be eliminated or moved.
-// It is the "value" in the dictionary indexed by "base-index, length"
-// (the key is BoundsCheckKey).
-// We scan the code with a dominator tree traversal.
-// Traversing the dominator tree we keep a stack (implemented as a singly
-// linked list) of "data" for each basic block that contains a relevant check
-// with the same key (the dictionary holds the head of the list).
-// We also keep all the "data" created for a given basic block in a list, and
-// use it to "clean up" the dictionary when backtracking in the dominator tree
-// traversal.
-// Doing this each dictionary entry always directly points to the check that
-// is dominating the code being examined now.
-// We also track the current "offset" of the index expression and use it to
-// decide if any check is already "covered" (so it can be removed) or not.
-class BoundsCheckBbData: public ZoneObject {
- public:
- BoundsCheckKey* Key() const { return key_; }
- int32_t LowerOffset() const { return lower_offset_; }
- int32_t UpperOffset() const { return upper_offset_; }
- HBasicBlock* BasicBlock() const { return basic_block_; }
- HBoundsCheck* LowerCheck() const { return lower_check_; }
- HBoundsCheck* UpperCheck() const { return upper_check_; }
- BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
- BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
-
- bool OffsetIsCovered(int32_t offset) const {
- return offset >= LowerOffset() && offset <= UpperOffset();
- }
-
- bool HasSingleCheck() { return lower_check_ == upper_check_; }
-
- // The goal of this method is to modify either upper_offset_ or
- // lower_offset_ so that also new_offset is covered (the covered
- // range grows).
- //
- // The precondition is that new_check follows UpperCheck() and
- // LowerCheck() in the same basic block, and that new_offset is not
- // covered (otherwise we could simply remove new_check).
- //
- // If HasSingleCheck() is true then new_check is added as "second check"
- // (either upper or lower; note that HasSingleCheck() becomes false).
- // Otherwise one of the current checks is modified so that it also covers
- // new_offset, and new_check is removed.
- //
- // If the check cannot be modified because the context is unknown it
- // returns false, otherwise it returns true.
- bool CoverCheck(HBoundsCheck* new_check,
- int32_t new_offset) {
- ASSERT(new_check->index()->representation().IsSmiOrInteger32());
- bool keep_new_check = false;
-
- if (new_offset > upper_offset_) {
- upper_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- upper_check_ = new_check;
- } else {
- bool result = BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- upper_check_->ReplaceAllUsesWith(upper_check_->index());
- upper_check_->SetOperandAt(0, added_upper_index_);
- }
- } else if (new_offset < lower_offset_) {
- lower_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- lower_check_ = new_check;
- } else {
- bool result = BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- lower_check_->ReplaceAllUsesWith(lower_check_->index());
- lower_check_->SetOperandAt(0, added_lower_index_);
- }
- } else {
- ASSERT(false);
- }
-
- if (!keep_new_check) {
- new_check->DeleteAndReplaceWith(new_check->ActualValue());
- }
-
- return true;
- }
-
- void RemoveZeroOperations() {
- RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
- RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
- }
-
- BoundsCheckBbData(BoundsCheckKey* key,
- int32_t lower_offset,
- int32_t upper_offset,
- HBasicBlock* bb,
- HBoundsCheck* lower_check,
- HBoundsCheck* upper_check,
- BoundsCheckBbData* next_in_bb,
- BoundsCheckBbData* father_in_dt)
- : key_(key),
- lower_offset_(lower_offset),
- upper_offset_(upper_offset),
- basic_block_(bb),
- lower_check_(lower_check),
- upper_check_(upper_check),
- added_lower_index_(NULL),
- added_lower_offset_(NULL),
- added_upper_index_(NULL),
- added_upper_offset_(NULL),
- next_in_bb_(next_in_bb),
- father_in_dt_(father_in_dt) { }
-
- private:
- BoundsCheckKey* key_;
- int32_t lower_offset_;
- int32_t upper_offset_;
- HBasicBlock* basic_block_;
- HBoundsCheck* lower_check_;
- HBoundsCheck* upper_check_;
- HInstruction* added_lower_index_;
- HConstant* added_lower_offset_;
- HInstruction* added_upper_index_;
- HConstant* added_upper_offset_;
- BoundsCheckBbData* next_in_bb_;
- BoundsCheckBbData* father_in_dt_;
-
- // Given an existing add instruction and a bounds check it tries to
- // find the current context (either of the add or of the check index).
- HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
- if (add != NULL && add->IsAdd()) {
- return HAdd::cast(add)->context();
- }
- if (check->index()->IsBinaryOperation()) {
- return HBinaryOperation::cast(check->index())->context();
- }
- return NULL;
- }
-
- // This function returns false if it cannot build the add because the
- // current context cannot be determined.
- bool BuildOffsetAdd(HBoundsCheck* check,
- HInstruction** add,
- HConstant** constant,
- HValue* original_value,
- Representation representation,
- int32_t new_offset) {
- HValue* index_context = IndexContext(*add, check);
- if (index_context == NULL) return false;
-
- HConstant* new_constant = new(BasicBlock()->zone()) HConstant(
- new_offset, representation);
- if (*add == NULL) {
- new_constant->InsertBefore(check);
- (*add) = HAdd::New(
- BasicBlock()->zone(), index_context, original_value, new_constant);
- (*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(check);
- } else {
- new_constant->InsertBefore(*add);
- (*constant)->DeleteAndReplaceWith(new_constant);
- }
- *constant = new_constant;
- return true;
- }
-
- void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
- if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
- (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
- (*constant)->DeleteAndReplaceWith(NULL);
- }
- }
-};
-
-
-static bool BoundsCheckKeyMatch(void* key1, void* key2) {
- BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
- BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
- return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
-}
-
-
-class BoundsCheckTable : private ZoneHashMap {
- public:
- BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone) {
- return reinterpret_cast<BoundsCheckBbData**>(
- &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
- }
-
- void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone) {
- Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
- }
-
- void Delete(BoundsCheckKey* key) {
- Remove(key, key->Hash());
- }
-
- explicit BoundsCheckTable(Zone* zone)
- : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)) { }
-};
-
-
-// Eliminates checks in bb and recursively in the dominated blocks.
-// Also replace the results of check instructions with the original value, if
-// the result is used. This is safe now, since we don't do code motion after
-// this point. It enables better register allocation since the value produced
-// by check instructions is really a copy of the original value.
-void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
- BoundsCheckTable* table) {
- BoundsCheckBbData* bb_data_list = NULL;
-
- for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
- HInstruction* i = it.Current();
- if (!i->IsBoundsCheck()) continue;
-
- HBoundsCheck* check = HBoundsCheck::cast(i);
- int32_t offset;
- BoundsCheckKey* key =
- BoundsCheckKey::Create(zone(), check, &offset);
- if (key == NULL) continue;
- BoundsCheckBbData** data_p = table->LookupOrInsert(key, zone());
- BoundsCheckBbData* data = *data_p;
- if (data == NULL) {
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- offset,
- offset,
- bb,
- check,
- check,
- bb_data_list,
- NULL);
- *data_p = bb_data_list;
- } else if (data->OffsetIsCovered(offset)) {
- check->DeleteAndReplaceWith(check->ActualValue());
- } else if (data->BasicBlock() != bb ||
- !data->CoverCheck(check, offset)) {
- // If the check is in the current BB we try to modify it by calling
- // "CoverCheck", but if also that fails we record the current offsets
- // in a new data instance because from now on they are covered.
- int32_t new_lower_offset = offset < data->LowerOffset()
- ? offset
- : data->LowerOffset();
- int32_t new_upper_offset = offset > data->UpperOffset()
- ? offset
- : data->UpperOffset();
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- new_lower_offset,
- new_upper_offset,
- bb,
- data->LowerCheck(),
- data->UpperCheck(),
- bb_data_list,
- data);
- table->Insert(key, bb_data_list, zone());
- }
- }
-
- for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
- EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i), table);
- }
-
- for (BoundsCheckBbData* data = bb_data_list;
- data != NULL;
- data = data->NextInBasicBlock()) {
- data->RemoveZeroOperations();
- if (data->FatherInDominatorTree()) {
- table->Insert(data->Key(), data->FatherInDominatorTree(), zone());
- } else {
- table->Delete(data->Key());
- }
- }
-}
-
-
-void HGraph::EliminateRedundantBoundsChecks() {
- HPhase phase("H_Eliminate bounds checks", this);
- BoundsCheckTable checks_table(zone());
- EliminateRedundantBoundsChecks(entry_block(), &checks_table);
-}
-
-
-static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
- HValue* index = array_operation->GetKey()->ActualValue();
- if (!index->representation().IsSmiOrInteger32()) return;
-
- HConstant* constant;
- HValue* subexpression;
- int32_t sign;
- if (index->IsAdd()) {
- sign = 1;
- HAdd* add = HAdd::cast(index);
- if (add->left()->IsConstant()) {
- subexpression = add->right();
- constant = HConstant::cast(add->left());
- } else if (add->right()->IsConstant()) {
- subexpression = add->left();
- constant = HConstant::cast(add->right());
- } else {
- return;
- }
- } else if (index->IsSub()) {
- sign = -1;
- HSub* sub = HSub::cast(index);
- if (sub->left()->IsConstant()) {
- subexpression = sub->right();
- constant = HConstant::cast(sub->left());
- } else if (sub->right()->IsConstant()) {
- subexpression = sub->left();
- constant = HConstant::cast(sub->right());
- } else {
- return;
- }
- } else {
- return;
- }
-
- if (!constant->HasInteger32Value()) return;
- int32_t value = constant->Integer32Value() * sign;
- // We limit offset values to 30 bits because we want to avoid the risk of
- // overflows when the offset is added to the object header size.
- if (value >= 1 << 30 || value < 0) return;
- array_operation->SetKey(subexpression);
- if (index->HasNoUses()) {
- index->DeleteAndReplaceWith(NULL);
- }
- ASSERT(value >= 0);
- array_operation->SetIndexOffset(static_cast<uint32_t>(value));
- array_operation->SetDehoisted(true);
-}
-
-
-void HGraph::DehoistSimpleArrayIndexComputations() {
- HPhase phase("H_Dehoist index computations", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- ArrayInstructionInterface* array_instruction = NULL;
- if (instr->IsLoadKeyed()) {
- HLoadKeyed* op = HLoadKeyed::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyed()) {
- HStoreKeyed* op = HStoreKeyed::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else {
- continue;
- }
- DehoistArrayIndex(array_instruction);
- }
- }
-}
-
-
-void HGraph::DeadCodeElimination(const char* phase_name) {
- HPhase phase(phase_name, this);
- MarkLiveInstructions();
- RemoveDeadInstructions();
-}
-
-
-void HGraph::MarkLiveInstructions() {
- ZoneList<HValue*> worklist(blocks_.length(), zone());
-
- // Mark initial root instructions for dead code elimination.
- for (int i = 0; i < blocks()->length(); ++i) {
- HBasicBlock* block = blocks()->at(i);
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->CannotBeEliminated()) MarkLive(NULL, instr, &worklist);
- }
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- if (phi->CannotBeEliminated()) MarkLive(NULL, phi, &worklist);
- }
- }
-
- // Transitively mark all inputs of live instructions live.
- while (!worklist.is_empty()) {
- HValue* instr = worklist.RemoveLast();
- for (int i = 0; i < instr->OperandCount(); ++i) {
- MarkLive(instr, instr->OperandAt(i), &worklist);
- }
- }
-}
-
-
-void HGraph::MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist) {
- if (!instr->CheckFlag(HValue::kIsLive)) {
- instr->SetFlag(HValue::kIsLive);
- worklist->Add(instr, zone());
-
- if (FLAG_trace_dead_code_elimination) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- if (ref != NULL) {
- ref->PrintTo(&stream);
- } else {
- stream.Add("root ");
- }
- stream.Add(" -> ");
- instr->PrintTo(&stream);
- PrintF("[MarkLive %s]\n", *stream.ToCString());
- }
- }
-}
-
-
-void HGraph::RemoveDeadInstructions() {
- ZoneList<HPhi*> dead_phis(blocks_.length(), zone());
-
- // Remove any instruction not marked kIsLive.
- for (int i = 0; i < blocks()->length(); ++i) {
- HBasicBlock* block = blocks()->at(i);
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (!instr->CheckFlag(HValue::kIsLive)) {
- // Instruction has not been marked live; assume it is dead and remove.
- // TODO(titzer): we don't remove constants because some special ones
- // might be used by later phases and are assumed to be in the graph
- if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
- } else {
- // Clear the liveness flag to leave the graph clean for the next DCE.
- instr->ClearFlag(HValue::kIsLive);
- }
- }
- // Collect phis that are dead and remove them in the next pass.
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- if (!phi->CheckFlag(HValue::kIsLive)) {
- dead_phis.Add(phi, zone());
- } else {
- phi->ClearFlag(HValue::kIsLive);
- }
- }
- }
-
- // Process phis separately to avoid simultaneously mutating the phi list.
- while (!dead_phis.is_empty()) {
- HPhi* phi = dead_phis.RemoveLast();
- HBasicBlock* block = phi->block();
- phi->DeleteAndReplaceWith(NULL);
- block->RecordDeletedPhi(phi->merged_index());
- }
-}
-
-
void HGraph::RestoreActualValues() {
HPhase phase("H_Restore actual values", this);
@@ -4067,17 +3118,6 @@ void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
}
-void HOptimizedGraphBuilder::AddSoftDeoptimize() {
- isolate()->counters()->soft_deopts_requested()->Increment();
- if (FLAG_always_opt) return;
- if (current_block()->IsDeoptimizing()) return;
- Add<HSoftDeoptimize>();
- isolate()->counters()->soft_deopts_inserted()->Increment();
- current_block()->MarkAsDeoptimizing();
- graph()->set_has_soft_deoptimize(true);
-}
-
-
template <class Instruction>
HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
@@ -4428,10 +3468,10 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
AddSoftDeoptimize();
}
- HCompareIDAndBranch* compare_ =
- new(zone()) HCompareIDAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
+ HCompareNumericAndBranch* compare_ =
+ new(zone()) HCompareNumericAndBranch(tag_value,
+ label_value,
+ Token::EQ_STRICT);
compare_->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
compare = compare_;
@@ -4715,8 +3755,8 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HValue* limit = environment()->ExpressionStackAt(1);
// Check that we still have more keys.
- HCompareIDAndBranch* compare_index =
- new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
+ HCompareNumericAndBranch* compare_index =
+ new(zone()) HCompareNumericAndBranch(index, limit, Token::LT);
compare_index->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
@@ -4952,9 +3992,20 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
- HLoadGlobalCell* instr =
- new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ if (cell->type()->IsConstant()) {
+ cell->AddDependentCompilationInfo(top_info());
+ Handle<Object> constant_object = cell->type()->AsConstant();
+ if (constant_object->IsConsString()) {
+ constant_object =
+ FlattenGetString(Handle<String>::cast(constant_object));
+ }
+ HConstant* constant = new(zone()) HConstant(constant_object);
+ return ast_context()->ReturnInstruction(constant, expr->id());
+ } else {
+ HLoadGlobalCell* instr =
+ new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
+ return ast_context()->ReturnInstruction(instr, expr->id());
+ }
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -5207,6 +4258,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
literal = BuildFastLiteral(context,
boilerplate_object,
original_boilerplate_object,
+ Handle<Object>::null(),
data_size,
pointer_size,
DONT_TRACK_ALLOCATION_SITE);
@@ -5314,25 +4366,37 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* context = environment()->LookupContext();
HInstruction* literal;
+ Handle<AllocationSite> site;
Handle<FixedArray> literals(environment()->closure()->literals(), isolate());
- Handle<Object> raw_boilerplate(literals->get(expr->literal_index()),
- isolate());
-
bool uninitialized = false;
- if (raw_boilerplate->IsUndefined()) {
+ Handle<Object> literals_cell(literals->get(expr->literal_index()),
+ isolate());
+ Handle<Object> raw_boilerplate;
+ if (literals_cell->IsUndefined()) {
uninitialized = true;
raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements());
if (raw_boilerplate.is_null()) {
return Bailout("array boilerplate creation failed");
}
- literals->set(expr->literal_index(), *raw_boilerplate);
+
+ site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(*raw_boilerplate);
+ literals->set(expr->literal_index(), *site);
+
if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
isolate()->counters()->cow_arrays_created_runtime()->Increment();
}
+ } else {
+ ASSERT(literals_cell->IsAllocationSite());
+ site = Handle<AllocationSite>::cast(literals_cell);
+ raw_boilerplate = Handle<Object>(site->transition_info(), isolate());
}
+ ASSERT(!raw_boilerplate.is_null());
+ ASSERT(site->IsLiteralSite());
+
Handle<JSObject> original_boilerplate_object =
Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
@@ -5341,7 +4405,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// TODO(mvstanton): This heuristic is only a temporary solution. In the
// end, we want to quit creating allocation site info after a certain number
// of GCs for a call site.
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(
+ AllocationSiteMode mode = AllocationSite::GetMode(
boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
@@ -5354,13 +4418,14 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
&data_size,
&pointer_size)) {
if (mode == TRACK_ALLOCATION_SITE) {
- pointer_size += AllocationSiteInfo::kSize;
+ pointer_size += AllocationMemento::kSize;
}
Handle<JSObject> boilerplate_object = DeepCopy(original_boilerplate_object);
literal = BuildFastLiteral(context,
boilerplate_object,
original_boilerplate_object,
+ site,
data_size,
pointer_size,
mode);
@@ -5440,6 +4505,7 @@ static bool ComputeLoadStoreField(Handle<Map> type,
Handle<String> name,
LookupResult* lookup,
bool is_store) {
+ ASSERT(!is_store || !type->is_observed());
if (type->has_named_interceptor()) {
lookup->InterceptorResult(NULL);
return false;
@@ -5577,19 +4643,6 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
}
-HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
- HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
- Add<HPushArgument>(object);
- Add<HPushArgument>(value);
- return new(zone()) HCallConstantFunction(setter, 2);
-}
-
-
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
HValue* object,
Handle<String> name,
@@ -5607,6 +4660,17 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
}
+static bool CanLoadPropertyFromPrototype(Handle<Map> map,
+ Handle<Name> name,
+ LookupResult* lookup) {
+ if (map->has_named_interceptor()) return false;
+ if (map->is_dictionary_map()) return false;
+ map->LookupDescriptor(NULL, *name, lookup);
+ if (lookup->IsFound()) return false;
+ return true;
+}
+
+
HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
Property* expr,
HValue* object,
@@ -5644,12 +4708,40 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
representation = representation.generalize(new_representation);
}
- if (count != types->length()) return NULL;
+ if (count == types->length()) {
+ // Everything matched; can use monomorphic load.
+ BuildCheckHeapObject(object);
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ return BuildLoadNamedField(object, access, representation);
+ }
+
+ if (count != 0) return NULL;
+
+ // Second chance: the property is on the prototype and all maps have the
+ // same prototype.
+ Handle<Map> map(types->at(0));
+ if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return NULL;
+
+ Handle<Object> prototype(map->prototype(), isolate());
+ for (count = 1; count < types->length(); ++count) {
+ Handle<Map> test_map(types->at(count));
+ if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return NULL;
+ if (test_map->prototype() != *prototype) return NULL;
+ }
+
+ LookupInPrototypes(map, name, &lookup);
+ if (!lookup.IsField()) return NULL;
- // Everything matched; can use monomorphic load.
BuildCheckHeapObject(object);
AddInstruction(HCheckMaps::New(object, types, zone()));
- return BuildLoadNamedField(object, access, representation);
+ Handle<JSObject> holder(lookup.holder());
+ Handle<Map> holder_map(holder->map());
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ Handle<JSObject>::cast(prototype), holder, zone(), top_info()));
+ HValue* holder_value = AddInstruction(new(zone()) HConstant(holder));
+ return BuildLoadNamedField(holder_value,
+ HObjectAccess::ForField(holder_map, &lookup, name),
+ ComputeLoadStoreRepresentation(map, &lookup));
}
@@ -5677,7 +4769,8 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
int position,
BailoutId assignment_id,
HValue* object,
- HValue* value,
+ HValue* store_value,
+ HValue* result_value,
SmallMapList* types,
Handle<String> name) {
// Use monomorphic store if property lookup results in the same field index
@@ -5694,6 +4787,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
Handle<Map> map = types->at(count);
// Pass false to ignore transitions.
if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+ ASSERT(!map->is_observed());
HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
Representation new_representation =
@@ -5723,27 +4817,29 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
HInstruction* store;
CHECK_ALIVE_OR_RETURN(
store = BuildStoreNamedField(
- object, name, value, types->at(count - 1), &lookup),
+ object, name, store_value, types->at(count - 1), &lookup),
true);
- Push(value);
+ if (!ast_context()->IsEffect()) Push(result_value);
store->set_position(position);
AddInstruction(store);
AddSimulate(assignment_id);
- ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) Drop(1);
+ ast_context()->ReturnValue(result_value);
return true;
}
void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- BailoutId id,
int position,
BailoutId assignment_id,
HValue* object,
- HValue* value,
+ HValue* store_value,
+ HValue* result_value,
SmallMapList* types,
Handle<String> name) {
if (TryStorePolymorphicAsMonomorphic(
- position, assignment_id, object, value, types, name)) {
+ position, assignment_id, object,
+ store_value, result_value, types, name)) {
return;
}
@@ -5764,17 +4860,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
+ new(zone()) HCompareMap(object, map, if_true, if_false);
current_block()->Finish(compare);
set_current_block(if_true);
HInstruction* instr;
- CHECK_ALIVE(
- instr = BuildStoreNamedField(object, name, value, map, &lookup));
+ CHECK_ALIVE(instr = BuildStoreNamedField(
+ object, name, store_value, map, &lookup));
instr->set_position(position);
// Goto will add the HSimulate for the store.
AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(value);
+ if (!ast_context()->IsEffect()) Push(result_value);
current_block()->Goto(join);
set_current_block(if_false);
@@ -5787,12 +4883,14 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
+ HInstruction* instr = BuildStoreNamedGeneric(object, name, store_value);
instr->set_position(position);
AddInstruction(instr);
if (join != NULL) {
- if (!ast_context()->IsEffect()) Push(value);
+ if (!ast_context()->IsEffect()) {
+ Push(result_value);
+ }
current_block()->Goto(join);
} else {
// The HSimulate for the store should not see the stored value in
@@ -5800,21 +4898,23 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(id, REMOVABLE_SIMULATE);
+ AddSimulate(assignment_id, REMOVABLE_SIMULATE);
} else {
- Push(value);
- AddSimulate(id, REMOVABLE_SIMULATE);
+ Push(result_value);
+ AddSimulate(assignment_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
- return ast_context()->ReturnValue(value);
+ return ast_context()->ReturnValue(result_value);
}
}
ASSERT(join != NULL);
- join->SetJoinId(id);
+ join->SetJoinId(assignment_id);
set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) {
+ ast_context()->ReturnValue(Pop());
+ }
}
@@ -5831,7 +4931,7 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
if (expr->IsUninitialized()) AddSoftDeoptimize();
return BuildStoreNamed(expr, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, value);
+ expr->AssignmentId(), prop, object, value, value);
} else {
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
@@ -5865,8 +4965,21 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr = Add<HStoreGlobalCell>(value, cell,
- lookup.GetPropertyDetails());
+ if (cell->type()->IsConstant()) {
+ IfBuilder builder(this);
+ HValue* constant = Add<HConstant>(cell->type()->AsConstant());
+ if (cell->type()->AsConstant()->IsNumber()) {
+ builder.If<HCompareNumericAndBranch>(value, constant, Token::EQ);
+ } else {
+ builder.If<HCompareObjectEqAndBranch>(value, constant);
+ }
+ builder.Then();
+ builder.Else();
+ AddSoftDeoptimize(MUST_EMIT_SOFT_DEOPT);
+ builder.End();
+ }
+ HInstruction* instr =
+ Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
if (instr->HasObservableSideEffects()) {
AddSimulate(ast_id, REMOVABLE_SIMULATE);
@@ -5890,7 +5003,8 @@ void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
BailoutId assignment_id,
Property* prop,
HValue* object,
- HValue* value) {
+ HValue* store_value,
+ HValue* result_value) {
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->value());
ASSERT(!name.is_null());
@@ -5908,38 +5022,42 @@ void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
Handle<JSObject> holder;
if (LookupSetter(map, name, &setter, &holder)) {
AddCheckConstantFunction(holder, object, map);
- if (FLAG_inline_accessors &&
- TryInlineSetter(setter, id, assignment_id, value)) {
+ // Don't try to inline if the result_value is different from the
+ // store_value. That case isn't handled yet by the inlining.
+ if (result_value == store_value &&
+ FLAG_inline_accessors &&
+ TryInlineSetter(setter, id, assignment_id, store_value)) {
return;
}
Drop(2);
Add<HPushArgument>(object);
- Add<HPushArgument>(value);
+ Add<HPushArgument>(store_value);
instr = new(zone()) HCallConstantFunction(setter, 2);
} else {
Drop(2);
CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
name,
- value,
+ store_value,
map));
}
-
} else if (types != NULL && types->length() > 1) {
Drop(2);
return HandlePolymorphicStoreNamedField(
- id, position, assignment_id, object, value, types, name);
+ position, id, object,
+ store_value, result_value, types, name);
} else {
Drop(2);
- instr = BuildStoreNamedGeneric(object, name, value);
+ instr = BuildStoreNamedGeneric(object, name, store_value);
}
- Push(value);
+ if (!ast_context()->IsEffect()) Push(result_value);
instr->set_position(position);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(assignment_id, REMOVABLE_SIMULATE);
+ AddSimulate(id, REMOVABLE_SIMULATE);
}
- return ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) Drop(1);
+ return ast_context()->ReturnValue(result_value);
}
@@ -6069,7 +5187,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
return BuildStoreNamed(prop, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, instr);
+ expr->AssignmentId(), prop, object, instr, instr);
} else {
// Keyed property.
CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -6481,7 +5599,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
*has_side_effects = false;
BuildCheckHeapObject(object);
SmallMapList* maps = prop->GetReceiverTypes();
- bool todo_external_array = false;
if (!is_store) {
HInstruction* consolidated_load =
@@ -6495,12 +5612,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
}
}
- static const int kNumElementTypes = kElementsKindCount;
- bool type_todo[kNumElementTypes];
- for (int i = 0; i < kNumElementTypes; ++i) {
- type_todo[i] = false;
- }
-
// Elements_kind transition support.
MapHandleList transition_target(maps->length());
// Collect possible transition targets.
@@ -6521,8 +5632,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
transition_target.Add(transitioned_map);
}
- int num_untransitionable_maps = 0;
- Handle<Map> untransitionable_map;
+ MapHandleList untransitionable_maps(maps->length());
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
@@ -6535,19 +5645,15 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
transition = Add<HTransitionElementsKind>(context, object, map,
transition_target.at(i));
} else {
- type_todo[map->elements_kind()] = true;
- if (IsExternalArrayElementsKind(map->elements_kind())) {
- todo_external_array = true;
- }
- num_untransitionable_maps++;
- untransitionable_map = map;
+ untransitionable_maps.Add(map);
}
}
// If only one map is left after transitioning, handle this case
// monomorphically.
- ASSERT(num_untransitionable_maps >= 1);
- if (num_untransitionable_maps == 1) {
+ ASSERT(untransitionable_maps.length() >= 1);
+ if (untransitionable_maps.length() == 1) {
+ Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind()) {
instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
@@ -6566,113 +5672,63 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
- HInstruction* elements_kind_instr = Add<HElementsKind>(object);
HInstruction* elements = AddLoadElements(object, checkspec);
- HLoadExternalArrayPointer* external_elements = NULL;
- HInstruction* checked_key = NULL;
-
- // Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
- // are handled before external arrays.
- STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-
- for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
- elements_kind <= LAST_ELEMENTS_KIND;
- elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
- // code that's executed for all external array cases.
- STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
- LAST_ELEMENTS_KIND);
- if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
- && todo_external_array) {
- HInstruction* length = AddLoadFixedArrayLength(elements);
- checked_key = Add<HBoundsCheck>(key, length);
- external_elements = Add<HLoadExternalArrayPointer>(elements);
- }
- if (type_todo[elements_kind]) {
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareConstantEqAndBranch* elements_kind_branch =
- new(zone()) HCompareConstantEqAndBranch(
- elements_kind_instr, elements_kind, Token::EQ_STRICT);
- elements_kind_branch->SetSuccessorAt(0, if_true);
- elements_kind_branch->SetSuccessorAt(1, if_false);
- current_block()->Finish(elements_kind_branch);
- set_current_block(if_true);
- HInstruction* access;
- if (IsFastElementsKind(elements_kind)) {
- if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
- AddInstruction(HCheckMaps::New(
- elements, isolate()->factory()->fixed_array_map(),
- zone(), elements_kind_branch));
- }
- // TODO(jkummerow): The need for these two blocks could be avoided
- // in one of two ways:
- // (1) Introduce ElementsKinds for JSArrays that are distinct from
- // those for fast objects.
- // (2) Put the common instructions into a third "join" block. This
- // requires additional AST IDs that we can deopt to from inside
- // that join block. They must be added to the Property class (when
- // it's a keyed property) and registered in the full codegen.
- HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
- HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
- HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_ARRAY_TYPE);
- typecheck->SetSuccessorAt(0, if_jsarray);
- typecheck->SetSuccessorAt(1, if_fastobject);
- current_block()->Finish(typecheck);
-
- set_current_block(if_jsarray);
+ for (int i = 0; i < untransitionable_maps.length(); ++i) {
+ Handle<Map> map = untransitionable_maps[i];
+ ElementsKind elements_kind = map->elements_kind();
+ HBasicBlock* this_map = graph()->CreateBasicBlock();
+ HBasicBlock* other_map = graph()->CreateBasicBlock();
+ HCompareMap* mapcompare =
+ new(zone()) HCompareMap(object, map, this_map, other_map);
+ current_block()->Finish(mapcompare);
+
+ set_current_block(this_map);
+ HInstruction* checked_key = NULL;
+ HInstruction* access = NULL;
+ if (IsFastElementsKind(elements_kind)) {
+ if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
+ AddInstruction(HCheckMaps::New(
+ elements, isolate()->factory()->fixed_array_map(),
+ zone(), mapcompare));
+ }
+ if (map->IsJSArray()) {
HInstruction* length = AddLoad(object, HObjectAccess::ForArrayLength(),
- typecheck, Representation::Smi());
+ mapcompare, Representation::Smi());
length->set_type(HType::Smi());
-
checked_key = Add<HBoundsCheck>(key, length);
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
- if (!is_store) {
- Push(access);
- }
-
- *has_side_effects |= access->HasObservableSideEffects();
- // The caller will use has_side_effects and add correct Simulate.
- access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (position != -1) {
- access->set_position(position);
- }
- if_jsarray->GotoNoSimulate(join);
-
- set_current_block(if_fastobject);
- length = AddLoadFixedArrayLength(elements);
+ } else {
+ HInstruction* length = AddLoadFixedArrayLength(elements);
checked_key = Add<HBoundsCheck>(key, length);
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
- } else if (elements_kind == DICTIONARY_ELEMENTS) {
- if (is_store) {
- access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
- } else {
- access = AddInstruction(BuildLoadKeyedGeneric(object, key));
- }
- } else { // External array elements.
- access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val,
- elements_kind_branch, elements_kind, is_store));
}
- *has_side_effects |= access->HasObservableSideEffects();
- // The caller will use has_side_effects and add correct Simulate.
- access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (position != RelocInfo::kNoPosition) access->set_position(position);
- if (!is_store) {
- Push(access);
+ access = AddFastElementAccess(
+ elements, checked_key, val, mapcompare,
+ elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE);
+ } else if (IsDictionaryElementsKind(elements_kind)) {
+ if (is_store) {
+ access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
+ } else {
+ access = AddInstruction(BuildLoadKeyedGeneric(object, key));
}
- current_block()->GotoNoSimulate(join);
- set_current_block(if_false);
+ } else {
+ ASSERT(IsExternalArrayElementsKind(elements_kind));
+ HInstruction* length = AddLoadFixedArrayLength(elements);
+ checked_key = Add<HBoundsCheck>(key, length);
+ HLoadExternalArrayPointer* external_elements =
+ Add<HLoadExternalArrayPointer>(elements);
+ access = AddExternalArrayElementAccess(
+ external_elements, checked_key, val,
+ mapcompare, elements_kind, is_store);
}
+ *has_side_effects |= access->HasObservableSideEffects();
+ // The caller will use has_side_effects and add a correct Simulate.
+ access->SetFlag(HValue::kHasNoObservableSideEffects);
+ if (position != RelocInfo::kNoPosition) access->set_position(position);
+ if (!is_store) {
+ Push(access);
+ }
+ current_block()->GotoNoSimulate(join);
+ set_current_block(other_map);
}
// Deopt if none of the cases matched.
@@ -6711,8 +5767,14 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
expr->GetStoreMode(), has_side_effects);
} else {
if (is_store) {
+ if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
+ AddSoftDeoptimize();
+ }
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
+ if (expr->AsProperty()->IsUninitialized()) {
+ AddSoftDeoptimize();
+ }
instr = BuildLoadKeyedGeneric(obj, key);
}
AddInstruction(instr);
@@ -6956,14 +6018,60 @@ inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
}
+bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name) {
+ if (types->length() > kMaxCallPolymorphism) return false;
+
+ Handle<Map> map(types->at(0));
+ LookupResult lookup(isolate());
+ if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return false;
+
+ Handle<Object> prototype(map->prototype(), isolate());
+ for (int count = 1; count < types->length(); ++count) {
+ Handle<Map> test_map(types->at(count));
+ if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return false;
+ if (test_map->prototype() != *prototype) return false;
+ }
+
+ if (!expr->ComputeTarget(map, name)) return false;
+
+ BuildCheckHeapObject(receiver);
+ AddInstruction(HCheckMaps::New(receiver, types, zone()));
+ AddCheckPrototypeMaps(expr->holder(), map);
+ if (FLAG_trace_inlining) {
+ Handle<JSFunction> caller = current_info()->closure();
+ SmartArrayPointer<char> caller_name =
+ caller->shared()->DebugName()->ToCString();
+ PrintF("Trying to inline the polymorphic call to %s from %s\n",
+ *name->ToCString(), *caller_name);
+ }
+
+ if (!TryInlineCall(expr)) {
+ int argument_count = expr->arguments()->length() + 1; // Includes receiver.
+ HCallConstantFunction* call =
+ new(zone()) HCallConstantFunction(expr->target(), argument_count);
+ call->set_position(expr->position());
+ PreProcessCall(call);
+ AddInstruction(call);
+ if (!ast_context()->IsEffect()) Push(call);
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ }
+
+ return true;
+}
+
+
void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
+ if (TryCallPolymorphicAsMonomorphic(expr, receiver, types, name)) return;
+
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
HBasicBlock* join = NULL;
FunctionSorter order[kMaxCallPolymorphism];
@@ -7192,11 +6300,11 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
#endif
- // Don't inline deeper than kMaxInliningLevels calls.
+ // Don't inline deeper than the maximum number of inlining levels.
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
- if (current_level == Compiler::kMaxInliningLevels) {
+ if (current_level == FLAG_max_inlining_levels) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
@@ -7969,12 +7077,11 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
-
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
return Bailout("possible direct call to eval");
}
+ bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
Variable* var = proxy->var();
bool known_global_function = false;
@@ -8097,7 +7204,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
+ constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
+ constructor->initial_map()->InitialPropertiesLength() == 0;
}
@@ -8107,6 +7215,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(current_block()->HasPredecessor());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* context = environment()->LookupContext();
+ Factory* factory = isolate()->factory();
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
@@ -8125,19 +7234,73 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
constructor->shared()->CompleteInobjectSlackTracking();
}
- // Replace the constructor function with a newly allocated receiver.
- HInstruction* receiver = Add<HAllocateObject>(context, constructor);
- // Index of the receiver from the top of the expression stack.
+ // Calculate instance size from initial map of constructor.
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->InitialPropertiesLength() == 0);
+
+ // Allocate an instance of the implicit receiver object.
+ HValue* size_in_bytes = Add<HConstant>(instance_size);
+ HAllocate::Flags flags = HAllocate::DefaultFlags();
+ if (FLAG_pretenuring_call_new &&
+ isolate()->heap()->ShouldGloballyPretenure()) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ }
+ HAllocate* receiver =
+ Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
+ receiver->set_known_initial_map(initial_map);
+
+ // Load the initial map from the constructor.
+ HValue* constructor_value = Add<HConstant>(constructor);
+ HValue* initial_map_value =
+ AddLoad(constructor_value, HObjectAccess::ForJSObjectOffset(
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Initialize map and fields of the newly allocated object.
+ { NoObservableSideEffectsScope no_effects(this);
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
+ initial_map_value);
+ HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
+ empty_fixed_array);
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
+ empty_fixed_array);
+ if (initial_map->inobject_properties() != 0) {
+ HConstant* undefined = graph()->GetConstantUndefined();
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ AddStore(receiver,
+ HObjectAccess::ForJSObjectOffset(property_offset),
+ undefined);
+ }
+ }
+ }
+
+ // Replace the constructor function with a newly allocated receiver using
+ // the index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineConstruct(expr, receiver)) return;
- // TODO(mstarzinger): For now we remove the previous HAllocateObject and
- // add HPushArgument for the arguments in case inlining failed. What we
- // actually should do is emit HInvokeFunction on the constructor instead
- // of using HCallNew as a fallback.
+ // TODO(mstarzinger): For now we remove the previous HAllocate and all
+ // corresponding instructions and instead add HPushArgument for the
+ // arguments in case inlining failed. What we actually should do is for
+ // inlining to try to build a subgraph without mutating the parent graph.
+ HInstruction* instr = current_block()->last();
+ while (instr != initial_map_value) {
+ HInstruction* prev_instr = instr->previous();
+ instr->DeleteAndReplaceWith(NULL);
+ instr = prev_instr;
+ }
+ initial_map_value->DeleteAndReplaceWith(NULL);
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
@@ -8248,7 +7411,13 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
HValue* key = Pop();
HValue* obj = Pop();
HValue* context = environment()->LookupContext();
- HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
+ HValue* function = AddLoadJSBuiltin(Builtins::DELETE, context);
+ Add<HPushArgument>(obj);
+ Add<HPushArgument>(key);
+ Add<HPushArgument>(Add<HConstant>(function_strict_mode_flag()));
+ // TODO(olivf) InvokeFunction produces a check for the parameter count,
+ // even though we are certain to pass the correct number of arguments here.
+ HInstruction* instr = new(zone()) HInvokeFunction(context, function, 3);
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -8292,18 +7461,8 @@ void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- HValue* context = environment()->LookupContext();
- HInstruction* instr =
- HMul::New(zone(), context, value, graph()->GetConstantMinus1());
- Handle<Type> operand_type = expr->expression()->lower_type();
- Representation rep = ToRepresentation(operand_type);
- if (operand_type->Is(Type::None())) {
- AddSoftDeoptimize();
- }
- if (instr->IsBinaryOperation()) {
- HBinaryOperation::cast(instr)->set_observed_input_representation(1, rep);
- HBinaryOperation::cast(instr)->set_observed_input_representation(2, rep);
- }
+ Handle<Type> operand_type = expr->expression()->bounds().lower;
+ HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::SUB);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -8311,11 +7470,8 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- Handle<Type> operand_type = expr->expression()->lower_type();
- if (operand_type->Is(Type::None())) {
- AddSoftDeoptimize();
- }
- HInstruction* instr = new(zone()) HBitNot(value);
+ Handle<Type> operand_type = expr->expression()->bounds().lower;
+ HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::BIT_NOT);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -8369,7 +7525,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
TypeInfo info = expr->type();
- Representation rep = ToRepresentation(info);
+ Representation rep = Representation::FromType(info);
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
}
@@ -8516,35 +7672,10 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
after = BuildIncrement(returns_original_input, expr);
- input = Pop();
-
- HInstruction* store;
- if (!monomorphic || map->is_observed()) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
- } else {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- store = BuildCallSetter(object, after, map, setter, holder);
- } else {
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
- name,
- after,
- map));
- }
- }
- AddInstruction(store);
-
- // Overwrite the receiver in the bailout environment with the result
- // of the operation, and the placeholder with the original value if
- // necessary.
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
+ HValue* result = returns_original_input ? Pop() : after;
+ return BuildStoreNamed(prop, expr->id(), expr->position(),
+ expr->AssignmentId(), prop, object, after, result);
} else {
// Keyed property.
if (returns_original_input) Push(graph()->GetConstantUndefined());
@@ -8610,6 +7741,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
return new(zone()) HStringCharCodeAt(context, string, checked_index);
}
+
// Checks if the given shift amounts have form: (sa) and (32 - sa).
static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
HValue* const32_minus_sa) {
@@ -8673,13 +7805,14 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
HValue* left,
HValue* right) {
HValue* context = environment()->LookupContext();
- Handle<Type> left_type = expr->left()->lower_type();
- Handle<Type> right_type = expr->right()->lower_type();
- Handle<Type> result_type = expr->result_type();
+ Handle<Type> left_type = expr->left()->bounds().lower;
+ Handle<Type> right_type = expr->right()->bounds().lower;
+ Handle<Type> result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
- Representation left_rep = ToRepresentation(left_type);
- Representation right_rep = ToRepresentation(right_type);
- Representation result_rep = ToRepresentation(result_type);
+ Representation left_rep = Representation::FromType(left_type);
+ Representation right_rep = Representation::FromType(right_type);
+ Representation result_rep = Representation::FromType(result_type);
+
if (left_type->Is(Type::None())) {
AddSoftDeoptimize();
// TODO(rossberg): we should be able to get rid of non-continuous defaults.
@@ -8840,8 +7973,8 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
HBasicBlock* eval_right = graph()->CreateBasicBlock();
ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
- ? new(zone()) HBranch(left_value, eval_right, empty_block, expected)
- : new(zone()) HBranch(left_value, empty_block, eval_right, expected);
+ ? new(zone()) HBranch(left_value, expected, eval_right, empty_block)
+ : new(zone()) HBranch(left_value, expected, empty_block, eval_right);
current_block()->Finish(test);
set_current_block(eval_right);
@@ -8907,89 +8040,17 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
}
-// TODO(rossberg): this should die eventually.
-Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
- if (info.IsUninitialized()) return Representation::None();
- // TODO(verwaest): Return Smi rather than Integer32.
- if (info.IsSmi()) return Representation::Integer32();
- if (info.IsInteger32()) return Representation::Integer32();
- if (info.IsDouble()) return Representation::Double();
- if (info.IsNumber()) return Representation::Double();
- return Representation::Tagged();
-}
-
-
-Representation HOptimizedGraphBuilder::ToRepresentation(Handle<Type> type) {
- if (type->Is(Type::None())) return Representation::None();
- if (type->Is(Type::Signed32())) return Representation::Integer32();
- if (type->Is(Type::Number())) return Representation::Double();
- return Representation::Tagged();
-}
-
-
void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
+ Expression* sub_expr,
Handle<String> check) {
- // Note: The HTypeof itself is removed during canonicalization, if possible.
- HValue* value = typeof_expr->value();
+ CHECK_ALIVE(VisitForTypeOf(sub_expr));
+ HValue* value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
-static bool MatchLiteralCompareNil(HValue* left,
- Token::Value op,
- HValue* right,
- Handle<Object> nil,
- HValue** expr) {
- if (left->IsConstant() &&
- HConstant::cast(left)->handle().is_identical_to(nil) &&
- Token::IsEqualityOp(op)) {
- *expr = right;
- return true;
- }
- return false;
-}
-
-
-static bool MatchLiteralCompareTypeof(HValue* left,
- Token::Value op,
- HValue* right,
- HTypeof** typeof_expr,
- Handle<String>* check) {
- if (left->IsTypeof() &&
- Token::IsEqualityOp(op) &&
- right->IsConstant() &&
- HConstant::cast(right)->handle()->IsString()) {
- *typeof_expr = HTypeof::cast(left);
- *check = Handle<String>::cast(HConstant::cast(right)->handle());
- return true;
- }
- return false;
-}
-
-
-static bool IsLiteralCompareTypeof(HValue* left,
- Token::Value op,
- HValue* right,
- HTypeof** typeof_expr,
- Handle<String>* check) {
- return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
- MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
-}
-
-
-static bool IsLiteralCompareNil(HValue* left,
- Token::Value op,
- HValue* right,
- Handle<Object> nil,
- HValue** expr) {
- return MatchLiteralCompareNil(left, op, right, nil, expr) ||
- MatchLiteralCompareNil(right, op, left, nil, expr);
-}
-
-
static bool IsLiteralCompareBool(HValue* left,
Token::Value op,
HValue* right) {
@@ -9003,6 +8064,22 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+
+ // Check for a few fast cases. The AST visiting behavior must be in sync
+ // with the full codegen: We don't push both left and right values onto
+ // the expression stack when one side is a special-case literal.
+ Expression* sub_expr = NULL;
+ Handle<String> check;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ return HandleLiteralCompareTypeof(expr, sub_expr, check);
+ }
+ if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+ return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+ }
+ if (expr->IsLiteralCompareNull(&sub_expr)) {
+ return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+ }
+
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
ASSERT(call->arguments()->length() == 1);
@@ -9016,12 +8093,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- Handle<Type> left_type = expr->left()->lower_type();
- Handle<Type> right_type = expr->right()->lower_type();
+ Handle<Type> left_type = expr->left()->bounds().lower;
+ Handle<Type> right_type = expr->right()->bounds().lower;
Handle<Type> combined_type = expr->combined_type();
- Representation combined_rep = ToRepresentation(combined_type);
- Representation left_rep = ToRepresentation(left_type);
- Representation right_rep = ToRepresentation(right_type);
+ Representation combined_rep = Representation::FromType(combined_type);
+ Representation left_rep = Representation::FromType(left_type);
+ Representation right_rep = Representation::FromType(right_type);
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
@@ -9031,19 +8108,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
- HTypeof* typeof_expr = NULL;
- Handle<String> check;
- if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
- return HandleLiteralCompareTypeof(expr, typeof_expr, check);
- }
- HValue* sub_expr = NULL;
- Factory* f = isolate()->factory();
- if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
- }
- if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
- }
if (IsLiteralCompareBool(left, op, right)) {
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -9092,7 +8156,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Code below assumes that we don't fall through.
UNREACHABLE();
} else if (op == Token::IN) {
- HIn* result = new(zone()) HIn(context, left, right);
+ HValue* function = AddLoadJSBuiltin(Builtins::IN, context);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ // TODO(olivf) InvokeFunction produces a check for the parameter count,
+ // even though we are certain to pass the correct number of arguments here.
+ HInstruction* result = new(zone()) HInvokeFunction(context, function, 2);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -9150,12 +8219,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- // TODO(verwaest): Remove once ToRepresentation properly returns Smi when
- // the IC measures Smi.
+ // TODO(verwaest): Remove once Representation::FromType properly
+ // returns Smi when the IC measures Smi.
if (left_type->Is(Type::Smi())) left_rep = Representation::Smi();
if (right_type->Is(Type::Smi())) right_rep = Representation::Smi();
- HCompareIDAndBranch* result =
- new(zone()) HCompareIDAndBranch(left, right, op);
+ HCompareNumericAndBranch* result =
+ new(zone()) HCompareNumericAndBranch(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
@@ -9165,12 +8234,14 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
+ Expression* sub_expr,
NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
+ CHECK_ALIVE(VisitForValue(sub_expr));
+ HValue* value = Pop();
HIfContinuation continuation;
if (expr->op() == Token::EQ_STRICT) {
IfBuilder if_nil(this);
@@ -9205,6 +8276,7 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
HValue* context,
Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
+ Handle<Object> allocation_site,
int data_size,
int pointer_size,
AllocationSiteMode mode) {
@@ -9213,16 +8285,16 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
HInstruction* target = NULL;
HInstruction* data_target = NULL;
- HAllocate::Flags flags = HAllocate::DefaultFlags();
+ ElementsKind kind = boilerplate_object->map()->elements_kind();
if (isolate()->heap()->ShouldGloballyPretenure()) {
if (data_size != 0) {
HAllocate::Flags data_flags =
- static_cast<HAllocate::Flags>(HAllocate::DefaultFlags() |
+ static_cast<HAllocate::Flags>(HAllocate::DefaultFlags(kind) |
HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE);
HValue* size_in_bytes = Add<HConstant>(data_size);
- data_target = Add<HAllocate>(context, size_in_bytes,
- HType::JSObject(), data_flags);
+ data_target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(),
+ data_flags);
Handle<Map> free_space_map = isolate()->factory()->free_space_map();
AddStoreMapConstant(data_target, free_space_map);
HObjectAccess access =
@@ -9230,20 +8302,24 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
AddStore(data_target, access, size_in_bytes);
}
if (pointer_size != 0) {
- flags = static_cast<HAllocate::Flags>(
- flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ HAllocate::Flags pointer_flags =
+ static_cast<HAllocate::Flags>(HAllocate::DefaultFlags() |
+ HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
HValue* size_in_bytes = Add<HConstant>(pointer_size);
- target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
+ target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(),
+ pointer_flags);
}
} else {
+ HAllocate::Flags flags = HAllocate::DefaultFlags(kind);
HValue* size_in_bytes = Add<HConstant>(data_size + pointer_size);
target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
}
int offset = 0;
int data_offset = 0;
- BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, target,
- &offset, data_target, &data_offset, mode);
+ BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object,
+ allocation_site, target, &offset, data_target,
+ &data_offset, mode);
return target;
}
@@ -9251,11 +8327,30 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
void HOptimizedGraphBuilder::BuildEmitDeepCopy(
Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
+ Handle<Object> allocation_site_object,
HInstruction* target,
int* offset,
HInstruction* data_target,
int* data_offset,
AllocationSiteMode mode) {
+ Zone* zone = this->zone();
+
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ boilerplate_object->map()->CanTrackAllocationSite();
+
+ // If using allocation sites, then the payload on the site should already
+ // be filled in as a valid (boilerplate) array.
+ ASSERT(!create_allocation_site_info ||
+ AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
+
+ HInstruction* allocation_site = NULL;
+
+ if (create_allocation_site_info) {
+ allocation_site = AddInstruction(new(zone) HConstant(
+ allocation_site_object, Representation::Tagged()));
+ }
+
+ // Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(boilerplate_object->elements());
Handle<FixedArrayBase> original_elements(
original_boilerplate_object->elements());
@@ -9299,11 +8394,9 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
// Create allocation site info.
if (mode == TRACK_ALLOCATION_SITE &&
boilerplate_object->map()->CanTrackAllocationSite()) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- HInstruction* original_boilerplate =
- Add<HConstant>(original_boilerplate_object);
- BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate);
+ elements_offset += AllocationMemento::kSize;
+ *offset += AllocationMemento::kSize;
+ BuildCreateAllocationMemento(target, JSArray::kSize, allocation_site);
}
}
@@ -9401,9 +8494,10 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
*offset);
AddStore(object_properties, access, value_instruction);
-
- BuildEmitDeepCopy(value_object, original_value_object, target,
- offset, data_target, data_offset, DONT_TRACK_ALLOCATION_SITE);
+ BuildEmitDeepCopy(value_object, original_value_object,
+ Handle<Object>::null(), target,
+ offset, data_target, data_offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else {
Representation representation = details.representation();
HInstruction* value_instruction = Add<HConstant>(value);
@@ -9510,8 +8604,10 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
*offset);
Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
- BuildEmitDeepCopy(value_object, original_value_object, target,
- offset, data_target, data_offset, DONT_TRACK_ALLOCATION_SITE);
+ BuildEmitDeepCopy(value_object, original_value_object,
+ Handle<Object>::null(), target,
+ offset, data_target, data_offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else {
HInstruction* value_instruction =
Add<HLoadKeyed>(boilerplate_elements, key_constant,
@@ -9522,6 +8618,7 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
}
}
+
void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -9972,10 +9069,13 @@ void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
// Fast support for StringAdd.
void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* right = Pop();
+ HValue* left = Pop();
HValue* context = environment()->LookupContext();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
- Drop(2);
+ HInstruction* result = HStringAdd::New(
+ zone(), context, left, right, STRING_ADD_CHECK_BOTH);
return ast_context()->ReturnInstruction(result, call->id());
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index f80aca1e3c..797b444078 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -67,7 +67,6 @@ class HBasicBlock: public ZoneObject {
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
- HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
@@ -233,14 +232,21 @@ class HPredecessorIterator BASE_EMBEDDED {
class HInstructionIterator BASE_EMBEDDED {
public:
- explicit HInstructionIterator(HBasicBlock* block) : instr_(block->first()) { }
+ explicit HInstructionIterator(HBasicBlock* block)
+ : instr_(block->first()) {
+ next_ = Done() ? NULL : instr_->next();
+ }
- bool Done() { return instr_ == NULL; }
- HInstruction* Current() { return instr_; }
- void Advance() { instr_ = instr_->next(); }
+ inline bool Done() const { return instr_ == NULL; }
+ inline HInstruction* Current() { return instr_; }
+ inline void Advance() {
+ instr_ = next_;
+ next_ = Done() ? NULL : instr_->next();
+ }
private:
HInstruction* instr_;
+ HInstruction* next_;
};
@@ -291,24 +297,12 @@ class HGraph: public ZoneObject {
HEnvironment* start_environment() const { return start_environment_; }
void FinalizeUniqueValueIds();
- void InitializeInferredTypes();
- void InsertTypeConversions();
- void MergeRemovableSimulates();
- void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
- void ComputeMinusZeroChecks();
bool ProcessArgumentsObject();
- void EliminateRedundantPhis();
- void Canonicalize();
void OrderBlocks();
void AssignDominators();
void SetupInformativeDefinitions();
- void EliminateRedundantBoundsChecks();
- void DehoistSimpleArrayIndexComputations();
void RestoreActualValues();
- void DeadCodeElimination(const char *phase_name);
- void PropagateDeoptimizingMark();
- void AnalyzeAndPruneEnvironmentLiveness();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@@ -333,6 +327,8 @@ class HGraph: public ZoneObject {
HConstant* GetConstantNull();
HConstant* GetInvalidContext();
+ bool IsStandardConstant(HConstant* constant);
+
HBasicBlock* CreateBasicBlock();
HArgumentsObject* GetArgumentsObject() const {
return arguments_object_.get();
@@ -449,25 +445,10 @@ class HGraph: public ZoneObject {
phase.Run();
}
- void MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist);
- void MarkLiveInstructions();
- void RemoveDeadInstructions();
- void MarkAsDeoptimizingRecursively(HBasicBlock* block);
- void NullifyUnreachableInstructions();
- void InsertTypeConversions(HInstruction* instr);
- void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
- void InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to);
- void InsertRepresentationChangesForValue(HValue* value);
- void InferTypes(ZoneList<HValue*>* worklist);
- void InitializeInferredTypes(int from_inclusive, int to_inclusive);
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
void SetupInformativeDefinitionsRecursively(HBasicBlock* block);
- void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
Isolate* isolate_;
int next_block_id_;
@@ -1065,24 +1046,6 @@ class HGraphBuilder {
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
-
- HInstruction* BuildFastElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode);
-
HValue* BuildCheckForCapacityGrow(HValue* object,
HValue* elements,
ElementsKind kind,
@@ -1095,6 +1058,12 @@ class HGraphBuilder {
ElementsKind kind,
HValue* length);
+ void BuildTransitionElementsKind(HValue* object,
+ HValue* map,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ bool is_jsarray);
+
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* object,
HValue* key,
@@ -1117,6 +1086,24 @@ class HGraphBuilder {
HObjectAccess access,
Representation representation);
+ HInstruction* AddExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* AddFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store,
+ LoadKeyedHoleMode load_mode,
+ KeyedAccessStoreMode store_mode);
+
HStoreNamedField* AddStore(
HValue *object,
HObjectAccess access,
@@ -1129,6 +1116,15 @@ class HGraphBuilder {
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
+ HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin, HValue* context);
+
+ enum SoftDeoptimizeMode {
+ MUST_EMIT_SOFT_DEOPT,
+ CAN_OMIT_SOFT_DEOPT
+ };
+
+ void AddSoftDeoptimize(SoftDeoptimizeMode mode = CAN_OMIT_SOFT_DEOPT);
+
class IfBuilder {
public:
explicit IfBuilder(HGraphBuilder* builder,
@@ -1140,13 +1136,6 @@ class HGraphBuilder {
if (!finished_) End();
}
- HInstruction* IfCompare(
- HValue* left,
- HValue* right,
- Token::Value token);
-
- HInstruction* IfCompareMap(HValue* left, Handle<Map> map);
-
template<class Condition>
HInstruction* If(HValue *p) {
HControlInstruction* compare = new(zone()) Condition(p);
@@ -1161,6 +1150,13 @@ class HGraphBuilder {
return compare;
}
+ template<class Condition, class P2, class P3>
+ HInstruction* If(HValue* p1, P2 p2, P3 p3) {
+ HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
+ AddCompare(compare);
+ return compare;
+ }
+
template<class Condition, class P2>
HInstruction* IfNot(HValue* p1, P2 p2) {
HControlInstruction* compare = new(zone()) Condition(p1, p2);
@@ -1172,17 +1168,15 @@ class HGraphBuilder {
return compare;
}
- HInstruction* OrIfCompare(
- HValue* p1,
- HValue* p2,
- Token::Value token) {
- Or();
- return IfCompare(p1, p2, token);
- }
-
- HInstruction* OrIfCompareMap(HValue* left, Handle<Map> map) {
- Or();
- return IfCompareMap(left, map);
+ template<class Condition, class P2, class P3>
+ HInstruction* IfNot(HValue* p1, P2 p2, P3 p3) {
+ HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
+ AddCompare(compare);
+ HBasicBlock* block0 = compare->SuccessorAt(0);
+ HBasicBlock* block1 = compare->SuccessorAt(1);
+ compare->SetSuccessorAt(0, block1);
+ compare->SetSuccessorAt(1, block0);
+ return compare;
}
template<class Condition>
@@ -1197,17 +1191,10 @@ class HGraphBuilder {
return If<Condition>(p1, p2);
}
- HInstruction* AndIfCompare(
- HValue* p1,
- HValue* p2,
- Token::Value token) {
- And();
- return IfCompare(p1, p2, token);
- }
-
- HInstruction* AndIfCompareMap(HValue* left, Handle<Map> map) {
- And();
- return IfCompareMap(left, map);
+ template<class Condition, class P2, class P3>
+ HInstruction* OrIf(HValue* p1, P2 p2, P3 p3) {
+ Or();
+ return If<Condition>(p1, p2, p3);
}
template<class Condition>
@@ -1222,6 +1209,12 @@ class HGraphBuilder {
return If<Condition>(p1, p2);
}
+ template<class Condition, class P2, class P3>
+ HInstruction* AndIf(HValue* p1, P2 p2, P3 p3) {
+ And();
+ return If<Condition>(p1, p2, p3);
+ }
+
void Or();
void And();
@@ -1374,7 +1367,7 @@ class HGraphBuilder {
HValue* capacity);
// array must have been allocated with enough room for
- // 1) the JSArray, 2) a AllocationSiteInfo if mode requires it,
+ // 1) the JSArray, 2) a AllocationMemento if mode requires it,
// 3) a FixedArray or FixedDoubleArray.
// A pointer to the Fixed(Double)Array is returned.
HInnerAllocatedObject* BuildJSArrayHeader(HValue* array,
@@ -1386,6 +1379,7 @@ class HGraphBuilder {
HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
+ ElementsKind new_kind,
HValue* length,
HValue* new_capacity);
@@ -1405,19 +1399,23 @@ class HGraphBuilder {
HValue* BuildCloneShallowArray(HContext* context,
HValue* boilerplate,
+ HValue* allocation_site,
AllocationSiteMode mode,
ElementsKind kind,
int length);
+ HInstruction* BuildUnaryMathOp(
+ HValue* value, Handle<Type> type, Token::Value token);
+
void BuildCompareNil(
HValue* value,
Handle<Type> type,
int position,
HIfContinuation* continuation);
- HValue* BuildCreateAllocationSiteInfo(HValue* previous_object,
- int previous_object_size,
- HValue* payload);
+ HValue* BuildCreateAllocationMemento(HValue* previous_object,
+ int previous_object_size,
+ HValue* payload);
HInstruction* BuildGetNativeContext(HValue* context);
HInstruction* BuildGetArrayFunction(HValue* context);
@@ -1495,8 +1493,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool inline_bailout() { return inline_bailout_; }
- void AddSoftDeoptimize();
-
void Bailout(const char* reason);
HBasicBlock* CreateJoin(HBasicBlock* first,
@@ -1676,9 +1672,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
- static Representation ToRepresentation(TypeInfo info);
- static Representation ToRepresentation(Handle<Type> type);
-
void SetUpScope(Scope* scope);
virtual void VisitStatements(ZoneList<Statement*>* statements);
@@ -1748,28 +1741,33 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* object,
SmallMapList* types,
Handle<String> name);
- void HandlePolymorphicStoreNamedField(BailoutId id,
- int position,
+ void HandlePolymorphicStoreNamedField(int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
+ HValue* result,
SmallMapList* types,
Handle<String> name);
bool TryStorePolymorphicAsMonomorphic(int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
+ HValue* result,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name);
+ bool TryCallPolymorphicAsMonomorphic(Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name);
void HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
+ Expression* sub_expr,
Handle<String> check);
void HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
+ Expression* sub_expr,
NilValue nil);
HInstruction* BuildStringCharCodeAt(HValue* context,
@@ -1838,7 +1836,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BailoutId assignment_id,
Property* prop,
HValue* object,
- HValue* value);
+ HValue* store_value,
+ HValue* result_value);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
@@ -1848,11 +1847,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HInstruction* BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value);
- HInstruction* BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder);
HInstruction* BuildStoreNamedMonomorphic(HValue* object,
Handle<String> name,
HValue* value,
@@ -1868,12 +1862,14 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HInstruction* BuildFastLiteral(HValue* context,
Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
+ Handle<Object> allocation_site,
int data_size,
int pointer_size,
AllocationSiteMode mode);
void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
Handle<JSObject> object,
+ Handle<Object> allocation_site,
HInstruction* target,
int* offset,
HInstruction* data_target,
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 7bb643a16d..e0ae006655 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -65,7 +65,7 @@ int IntelDoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumAllocatableRegisters;
} else {
- return X87TopOfStackRegister::kNumAllocatableRegisters;
+ return X87Register::kNumAllocatableRegisters;
}
}
@@ -74,7 +74,7 @@ int IntelDoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumRegisters;
} else {
- return X87TopOfStackRegister::kNumRegisters;
+ return X87Register::kNumRegisters;
}
}
@@ -83,7 +83,7 @@ const char* IntelDoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::AllocationIndexToString(index);
} else {
- return X87TopOfStackRegister::AllocationIndexToString(index);
+ return X87Register::AllocationIndexToString(index);
}
}
@@ -1055,6 +1055,7 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
+
void Assembler::ror(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
@@ -1068,6 +1069,7 @@ void Assembler::ror(Register dst, uint8_t imm8) {
}
}
+
void Assembler::ror_cl(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
@@ -1782,6 +1784,12 @@ void Assembler::fisub_s(const Operand& adr) {
}
+void Assembler::fmul_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC8, i);
+}
+
+
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC8, i);
@@ -2137,6 +2145,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 353f265ab7..8380897f6f 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -229,30 +229,40 @@ struct XMMRegister : IntelDoubleRegister {
#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-struct X87TopOfStackRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 1;
- static const int kNumRegisters = 1;
+struct X87Register : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 5;
+ static const int kNumRegisters = 5;
- bool is(X87TopOfStackRegister reg) const {
+ bool is(X87Register reg) const {
return code_ == reg.code_;
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
- "st0",
+ "stX_0", "stX_1", "stX_2", "stX_3", "stX_4"
};
return names[index];
}
- static int ToAllocationIndex(X87TopOfStackRegister reg) {
- ASSERT(reg.code() == 0);
- return 0;
+ static X87Register FromAllocationIndex(int index) {
+ STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister));
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ X87Register result;
+ result.code_ = index;
+ return result;
+ }
+
+ static int ToAllocationIndex(X87Register reg) {
+ return reg.code_;
}
};
-#define x87tos \
- static_cast<const X87TopOfStackRegister&>(double_register_0)
+#define stX_0 static_cast<const X87Register&>(double_register_0)
+#define stX_1 static_cast<const X87Register&>(double_register_1)
+#define stX_2 static_cast<const X87Register&>(double_register_2)
+#define stX_3 static_cast<const X87Register&>(double_register_3)
+#define stX_4 static_cast<const X87Register&>(double_register_4)
typedef IntelDoubleRegister DoubleRegister;
@@ -947,6 +957,7 @@ class Assembler : public AssemblerBase {
void fadd(int i);
void fsub(int i);
void fmul(int i);
+ void fmul_i(int i);
void fdiv(int i);
void fisub_s(const Operand& adr);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 8aa6e4a603..b90a17f6c3 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -1114,7 +1114,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
ebx, // Result.
ecx, // Scratch 1.
edx, // Scratch 2.
- false, // Input is known to be smi?
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 29a4be2140..548cbaace7 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -65,6 +65,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
}
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -230,6 +240,39 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
}
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -619,462 +662,155 @@ class FloatingPointHelper : public AllStatic {
};
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-static void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
- // If the exponent is above 83, the number contains no significant
- // bits in the range 0..2^31, so the result is zero.
- static const uint32_t kResultIsZeroExponent = 83;
- __ cmp(scratch2, Immediate(kResultIsZeroExponent));
- __ j(above, &done);
- if (use_sse3) {
- CpuFeatureScope scope(masm, SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent = 63;
- __ cmp(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent = 30;
- __ cmp(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent, Label::kNear);
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent = 31;
- __ cmp(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, scratch2);
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, scratch);
- __ j(positive, &done, Label::kNear);
- __ neg(ecx);
- __ jmp(&done, Label::kNear);
- }
+ Label check_negative, process_64_bits, done, done_no_stash;
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent in scratch2. Zero in ecx.
- // We know that 0 <= exponent < 30.
- __ mov(ecx, Immediate(30));
- __ sub(ecx, scratch2);
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, ecx);
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative, Label::kNear);
- __ mov(ecx, scratch2);
- __ jmp(&done, Label::kNear);
- __ bind(&negative);
- __ sub(ecx, scratch2);
- }
- __ bind(&done);
-}
+ int double_offset = offset();
+ // Account for return address and saved regs if input is esp.
+ if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
-// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
-// |conversion_failure| if the heap number did not contain an int32 value.
-// Result is in ecx. Trashes ebx, xmm0, and xmm1.
-static void ConvertHeapNumberToInt32(MacroAssembler* masm,
- Register source,
- Label* conversion_failure) {
- __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, conversion_failure, xmm0, ecx, ebx, xmm1);
-}
-
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
+ Register scratch1;
+ {
+ Register scratch_candidates[3] = { ebx, edx, edi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ }
}
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
-
- __ push(eax); // the operand
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(mode_)));
- __ push(Immediate(Smi::FromInt(operand_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
+ // Since we must use ecx for shifts below, use some other register (eax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead for
+ // the result.
+ Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(esp);
+ __ mov(scratch1, mantissa_operand);
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(masm, SSE3);
+ // Load x87 register with heap number.
+ __ fld_d(mantissa_operand);
}
-}
+ __ mov(ecx, exponent_operand);
+ if (stash_exponent_copy) __ push(ecx);
+ __ and_(ecx, HeapNumber::kExponentMask);
+ __ shr(ecx, HeapNumber::kExponentShift);
+ __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
+ __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
- Label::kNear, Label::kNear, Label::kNear);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance undo_near,
- Label::Distance slow_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, eax);
- __ j(zero, slow, slow_near);
-
- // Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, eax);
- __ Set(eax, Immediate(0));
- __ sub(eax, edx);
- __ j(overflow, undo, undo_near);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(
- MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // Flip bits and revert inverted smi-tag.
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, edx);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ if (CpuFeatures::IsSupported(SSE3)) {
+ __ fstp(0);
}
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
+ __ sub(ecx, Immediate(delta));
+ __ xor_(result_reg, result_reg);
+ __ cmp(ecx, Immediate(31));
+ __ j(above, &done);
+ __ shl_cl(scratch1);
+ __ jmp(&check_negative);
- if (mode_ == UNARY_OVERWRITE) {
- __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
- Immediate(HeapNumber::kSignMask)); // Flip sign.
+ __ bind(&process_64_bits);
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(masm, SSE3);
+ if (stash_exponent_copy) {
+ // Already a copy of the exponent on the stack, overwrite it.
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ __ sub(esp, Immediate(kDoubleSize / 2));
+ } else {
+ // Reserve space for 64 bit answer.
+ __ sub(esp, Immediate(kDoubleSize)); // Nolint.
+ }
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done_no_stash);
} else {
- __ mov(edx, eax);
- // edx: operand
-
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated, Label::kNear);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
+ // Result must be extracted from shifted 32-bit mantissa
+ __ sub(ecx, Immediate(delta));
+ __ neg(ecx);
+ if (stash_exponent_copy) {
+ __ mov(result_reg, MemOperand(esp, 0));
+ } else {
+ __ mov(result_reg, exponent_operand);
}
-
- __ bind(&heapnumber_allocated);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, Label::kNear);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ ret(0);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ mov(ebx, eax);
- __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
+ __ and_(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
+ __ add(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
+ __ shrd(result_reg, scratch1);
+ __ shr_cl(result_reg);
+ __ test(ecx, Immediate(32));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatureScope use_cmov(masm, CMOV);
+ __ cmov(not_equal, scratch1, result_reg);
+ } else {
+ Label skip_mov;
+ __ j(equal, &skip_mov, Label::kNear);
+ __ mov(scratch1, result_reg);
+ __ bind(&skip_mov);
}
- // IntegerConvert uses ebx and edi as scratch registers.
- // This conversion won't go slow-case.
- IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
- __ not_(ecx);
+ }
- __ bind(&heapnumber_allocated);
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ mov(result_reg, scratch1);
+ __ neg(result_reg);
+ if (stash_exponent_copy) {
+ __ cmp(MemOperand(esp, 0), Immediate(0));
+ } else {
+ __ cmp(exponent_operand, Immediate(0));
}
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ecx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatureScope use_cmov(masm, CMOV);
+ __ cmov(greater, result_reg, scratch1);
} else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ Label skip_mov;
+ __ j(less_equal, &skip_mov, Label::kNear);
+ __ mov(result_reg, scratch1);
+ __ bind(&skip_mov);
}
- __ ret(0);
-}
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ add(esp, Immediate(kDoubleSize / 2));
}
+ __ bind(&done_no_stash);
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(ecx));
+ __ mov(final_result_reg, result_reg);
+ }
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
}
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the corresponding JavaScript builtin.
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
+// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
+// |conversion_failure| if the heap number did not contain an int32 value.
+// Result is in ecx. Trashes ebx, xmm0, and xmm1.
+static void ConvertHeapNumberToInt32(MacroAssembler* masm,
+ Register source,
+ Label* conversion_failure) {
+ __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, conversion_failure, xmm0, ecx, ebx, xmm1);
}
@@ -1617,8 +1353,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -2263,8 +1999,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2274,8 +2010,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_right_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -2683,7 +2419,9 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
CpuFeatureScope use_sse2(masm, SSE2);
ConvertHeapNumberToInt32(masm, edx, conversion_failure);
} else {
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ mov(edx, ecx);
@@ -2718,7 +2456,9 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
CpuFeatureScope use_sse2(masm, SSE2);
ConvertHeapNumberToInt32(masm, eax, conversion_failure);
} else {
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
+ DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ bind(&done);
@@ -3197,7 +2937,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3217,7 +2958,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
support_wrapper_);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3281,7 +3023,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -4229,7 +3972,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register result,
Register scratch1,
Register scratch2,
- bool object_is_smi,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@@ -4254,52 +3996,46 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Heap::GetNumberStringCache.
Label smi_hash_calculated;
Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ __ jmp(&smi_hash_calculated, Label::kNear);
+ __ bind(&not_smi);
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ __ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(masm, SSE2);
+ __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
} else {
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
+ __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ __ FCmp();
}
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache, Label::kNear);
__ bind(&smi_hash_calculated);
// Object is smi and hash is now in scratch. Calculate cache index.
__ and_(scratch, mask);
- Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmp(object,
FieldOperand(number_string_cache,
@@ -4326,7 +4062,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ mov(ebx, Operand(esp, kPointerSize));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+ GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
__ ret(1 * kPointerSize);
__ bind(&runtime);
@@ -4369,9 +4105,9 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
__ JumpIfSmi(object, label);
__ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsInternalizedMask | kIsNotStringMask);
- __ cmp(scratch, kInternalizedTag | kStringTag);
- __ j(not_equal, label);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
}
@@ -4697,17 +4433,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
__ j(equal, &done);
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- __ JumpIfNotSmi(ecx, &miss);
- if (FLAG_debug_code) {
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(ecx, Immediate(terminal_kind_sentinel));
- __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
- }
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
// Load the global or builtins object from the current context
__ LoadGlobalContext(ecx);
@@ -4739,14 +4473,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
__ j(not_equal, &not_array_function);
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(initial_kind_sentinel));
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ __ push(eax);
+ __ push(edi);
+ __ push(ebx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(ebx);
+ __ pop(edi);
+ __ pop(eax);
+ }
__ jmp(&done);
__ bind(&not_array_function);
@@ -4912,6 +4654,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -5633,7 +5376,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfSmi(eax, &call_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
@@ -5642,20 +5389,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(edx, &call_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -5941,7 +5684,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm, ecx);
// Build a frame
{
@@ -5956,7 +5699,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm, ecx);
// Build a frame
{
@@ -6009,7 +5752,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- false,
&not_cached);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
@@ -6859,14 +6601,10 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(tmp1, kInternalizedTag | kStringTag);
- __ j(not_equal, &miss, Label::kNear);
-
- __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(tmp2, kInternalizedTag | kStringTag);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
@@ -6905,7 +6643,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
__ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
@@ -6981,10 +6718,10 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// also know they are both strings.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
@@ -7789,18 +7526,20 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ j(zero, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ inc(edx);
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &normal_sequence);
-
- // The type cell may have gone megamorphic, don't overwrite if so
- __ mov(ecx, FieldOperand(ebx, kPointerSize));
- __ JumpIfNotSmi(ecx, &normal_sequence);
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &normal_sequence);
// Save the resulting elements kind in type info
__ SmiTag(edx);
- __ mov(FieldOperand(ebx, kPointerSize), edx);
+ __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
__ SmiUntag(edx);
__ bind(&normal_sequence);
@@ -7829,7 +7568,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7901,7 +7640,17 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ JumpIfNotSmi(edx, &no_info);
+
+ // The type cell may have undefined in its value.
+ __ cmp(edx, Immediate(undefined_sentinel));
+ __ j(equal, &no_info);
+
+ // The type cell has either an AllocationSite or a JSFunction
+ __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
+ masm->isolate()->heap()->allocation_site_map())));
+ __ j(not_equal, &no_info);
+
+ __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 6dc63bdd49..e80acc6ccf 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -86,80 +86,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance undo_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near = Label::kFar);
- void GenerateSmiCodeUndo(MacroAssembler* masm);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -218,20 +144,6 @@ class StringHelper : public AllStatic {
};
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 1 << 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
- // Stub needs a frame before calling the runtime
- ERECT_FRAME = 1 << 3,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -319,7 +231,6 @@ class NumberToStringStub: public PlatformCodeStub {
Register result,
Register scratch1,
Register scratch2,
- bool object_is_smi,
Label* not_found);
private:
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index da32c504fc..f488718dc6 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -651,7 +651,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -660,9 +660,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- esp[0] : return address
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, allocation_site_info_found);
+ ASSERT(allocation_memento_found != NULL);
+ __ TestJSArrayForAllocationMemento(edx, edi);
+ __ j(equal, allocation_memento_found);
}
// Set transitioned map.
@@ -689,7 +689,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ TestJSArrayForAllocationMemento(edx, edi);
__ j(equal, fail);
}
@@ -828,7 +828,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ TestJSArrayForAllocationMemento(edx, edi);
__ j(equal, fail);
}
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index db1d5a612a..68199f905b 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -91,6 +91,7 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
+
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 6af2445f45..505cd4fc1c 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -566,15 +566,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
- if (type() == EAGER || type() == SOFT) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
__ sub(edx, ebp);
__ neg(edx);
@@ -620,12 +616,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER || type() == SOFT) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
+ // Remove the bailout id, return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 14e580069f..c43f11c00e 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -575,6 +575,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
}
}
+
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index cf3132d33f..66a7c1c080 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -107,6 +107,7 @@ class JumpPatchSite BASE_EMBEDDED {
// formal parameter count expected by the function.
//
// The live registers are:
+// o ecx: CallKind
// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
@@ -3684,7 +3685,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -4363,10 +4364,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
+ UnaryOpStub stub(expr->op());
// UnaryOpStub expects the argument to be in the
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index eb6ccd90e1..1e9146847b 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -319,9 +319,10 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// Is the string internalized? We already know it's a string so a single
// bit test is enough.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
- __ j(zero, not_unique);
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+ kIsNotInternalizedMask);
+ __ j(not_zero, not_unique);
__ bind(&unique);
}
@@ -798,8 +799,8 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -811,7 +812,7 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -827,7 +828,7 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1421,8 +1422,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
no_reg);
@@ -1598,8 +1600,8 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(eax, edx);
__ Ret();
@@ -1626,8 +1628,8 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(eax, edx);
__ Ret();
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index defae1c162..2c234d834c 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -353,7 +353,6 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
- ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(x87_stack_depth_);
}
@@ -365,8 +364,7 @@ bool LCodeGen::GenerateBody() {
bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -382,56 +380,32 @@ bool LCodeGen::GenerateJumpTable() {
}
if (jump_table_[i].needs_frame) {
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
+ __ bind(&needs_frame);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
return !is_aborted();
@@ -501,68 +475,181 @@ Register LCodeGen::ToRegister(int index) const {
}
+X87Register LCodeGen::ToX87Register(int index) const {
+ return X87Register::FromAllocationIndex(index);
+}
+
+
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
return XMMRegister::FromAllocationIndex(index);
}
-bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
- return op->IsDoubleRegister();
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+ ASSERT(X87StackContains(reg));
+ X87Fxch(reg);
+ x87_stack_depth_--;
+}
+
+
+void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
+ ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
+ int i = X87ArrayIndex(reg);
+ int st = x87_st2idx(i);
+ if (st != other_slot) {
+ int other_i = x87_st2idx(other_slot);
+ X87Register other = x87_stack_[other_i];
+ x87_stack_[other_i] = reg;
+ x87_stack_[i] = other;
+ if (st == 0) {
+ __ fxch(other_slot);
+ } else if (other_slot == 0) {
+ __ fxch(st);
+ } else {
+ __ fxch(st);
+ __ fxch(other_slot);
+ __ fxch(st);
+ }
+ }
}
-void LCodeGen::ReadX87Operand(Operand dst) {
- ASSERT(x87_stack_depth_ == 1);
- __ fst_d(dst);
+int LCodeGen::x87_st2idx(int pos) {
+ return x87_stack_depth_ - pos - 1;
}
-void LCodeGen::PushX87DoubleOperand(Operand src) {
- ASSERT(x87_stack_depth_ == 0);
- x87_stack_depth_++;
- __ fld_d(src);
+int LCodeGen::X87ArrayIndex(X87Register reg) {
+ for (int i = 0; i < x87_stack_depth_; i++) {
+ if (x87_stack_[i].is(reg)) return i;
+ }
+ UNREACHABLE();
+ return -1;
}
-void LCodeGen::PushX87FloatOperand(Operand src) {
- ASSERT(x87_stack_depth_ == 0);
- x87_stack_depth_++;
- __ fld_s(src);
+bool LCodeGen::X87StackContains(X87Register reg) {
+ for (int i = 0; i < x87_stack_depth_; i++) {
+ if (x87_stack_[i].is(reg)) return true;
+ }
+ return false;
}
-void LCodeGen::PopX87() {
- ASSERT(x87_stack_depth_ == 1);
+void LCodeGen::X87Free(X87Register reg) {
+ ASSERT(X87StackContains(reg));
+ int i = X87ArrayIndex(reg);
+ int st = x87_st2idx(i);
+ if (st > 0) {
+ // keep track of how fstp(i) changes the order of elements
+ int tos_i = x87_st2idx(0);
+ x87_stack_[i] = x87_stack_[tos_i];
+ }
x87_stack_depth_--;
- __ fstp(0);
+ __ fstp(st);
}
-void LCodeGen::CurrentInstructionReturnsX87Result() {
- ASSERT(x87_stack_depth_ <= 1);
- if (x87_stack_depth_ == 0) {
- x87_stack_depth_ = 1;
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+ if (X87StackContains(dst)) {
+ X87Fxch(dst);
+ __ fstp(0);
+ } else {
+ ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
+ x87_stack_[x87_stack_depth_] = dst;
+ x87_stack_depth_++;
}
+ X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+ if (opts == kX87DoubleOperand) {
+ __ fld_d(src);
+ } else if (opts == kX87FloatOperand) {
+ __ fld_s(src);
+ } else if (opts == kX87IntOperand) {
+ __ fild_s(src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src) {
+ X87Fxch(src);
+ __ fst_d(dst);
+}
+
+
+void LCodeGen::X87PrepareToWrite(X87Register reg) {
+ if (X87StackContains(reg)) {
+ X87Free(reg);
+ }
+ // Mark this register as the next register to write to
+ x87_stack_[x87_stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87CommitWrite(X87Register reg) {
+ // Assert the reg is prepared to write, but not on the virtual stack yet
+ ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
+ x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
+ x87_stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result) {
+ // You need to use DefineSameAsFirst for x87 instructions
+ ASSERT(result.is(left));
+ X87Fxch(right, 1);
+ X87Fxch(left);
}
void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
- if (x87_stack_depth_ > 0) {
- if ((instr->ClobbersDoubleRegisters() ||
- instr->HasDoubleRegisterResult()) &&
- !instr->HasDoubleRegisterInput()) {
- PopX87();
+ if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+ bool double_inputs = instr->HasDoubleRegisterInput();
+
+ // Flush stack from tos down, since FreeX87() will mess with tos
+ for (int i = x87_stack_depth_-1; i >= 0; i--) {
+ X87Register reg = x87_stack_[i];
+ // Skip registers which contain the inputs for the next instruction
+ // when flushing the stack
+ if (double_inputs && instr->IsDoubleInput(reg, this)) {
+ continue;
+ }
+ X87Free(reg);
+ if (i < x87_stack_depth_-1) i++;
+ }
+ }
+ if (instr->IsReturn()) {
+ while (x87_stack_depth_ > 0) {
+ __ fstp(0);
+ x87_stack_depth_--;
}
}
}
+void LCodeGen::EmitFlushX87ForDeopt() {
+ for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToX87Register(op->index());
+}
+
+
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
@@ -835,8 +922,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
- // It's an error to deoptimize with the x87 fp stack in use.
- ASSERT(x87_stack_depth_ == 0);
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
Address entry =
@@ -847,49 +932,47 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Handle<SharedFunctionInfo> shared(info()->shared_info());
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfd();
__ push(eax);
- __ push(ebx);
- __ mov(ebx, shared);
- __ mov(eax,
- FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ mov(eax, Operand::StaticVariable(count));
+ __ sub(eax, Immediate(1));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
+ __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+ __ mov(Operand::StaticVariable(count), eax);
__ pop(eax);
__ popfd();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
+ __ mov(Operand::StaticVariable(count), eax);
__ pop(eax);
__ popfd();
}
+ // Before Instructions which can deopt, we normally flush the x87 stack. But
+ // we can have inputs or outputs of the current instruction on the stack,
+ // thus we need to flush them here from the physical stack to leave it in a
+ // consistent state.
+ if (x87_stack_depth_ > 0) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ EmitFlushX87ForDeopt();
+ __ bind(&done);
+ }
+
if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
__ bind(&done);
}
ASSERT(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- if (bailout_type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@@ -1139,11 +1222,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1721,11 +1799,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
- __ push(Immediate(lower));
__ push(Immediate(upper));
- PushX87DoubleOperand(Operand(esp, 0));
+ __ push(Immediate(lower));
+ X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
- CurrentInstructionReturnsX87Result();
} else {
CpuFeatureScope scope1(masm(), SSE2);
ASSERT(instr->result()->IsDoubleRegister());
@@ -1990,48 +2067,63 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // Modulo uses a fixed result register.
+ ASSERT(instr->op() == Token::MOD || left.is(result));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(left, right);
+ break;
+ case Token::SUB:
+ __ subsd(left, right);
+ break;
+ case Token::MUL:
+ __ mulsd(left, right);
+ break;
+ case Token::DIV:
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), left);
+ __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(result, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ X87Register left = ToX87Register(instr->left());
+ X87Register right = ToX87Register(instr->right());
+ X87Register result = ToX87Register(instr->result());
+ X87PrepareBinaryOp(left, right, result);
+ switch (instr->op()) {
+ case Token::MUL:
+ __ fmul_i(1);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
}
}
@@ -2058,12 +2150,12 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == no_condition) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@@ -2076,6 +2168,25 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
+void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32() || r.IsDouble()) {
+ EmitBranch(instr, no_condition);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsTaggedNumber()) {
+ EmitBranch(instr, no_condition);
+ }
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ EmitBranch(instr, equal);
+ }
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2256,7 +2367,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Condition cc = TokenToCondition(instr->op(), instr->is_double());
@@ -2868,6 +2979,20 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
}
+void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ ExternalReference sites_list_address = instr->GetReference(isolate());
+
+ __ mov(temp, Immediate(sites_list_address));
+ __ mov(temp, Operand(temp, 0));
+ __ mov(FieldOperand(object, instr->hydrogen()->store_field().offset()),
+ temp);
+ __ mov(temp, Immediate(sites_list_address));
+ __ mov(Operand(temp, 0), object);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2934,8 +3059,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, FieldOperand(object, offset));
} else {
- PushX87DoubleOperand(FieldOperand(object, offset));
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
return;
}
@@ -3180,16 +3304,14 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movss(result, operand);
__ cvtss2sd(result, result);
} else {
- PushX87FloatOperand(operand);
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
- PushX87DoubleOperand(operand);
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), operand);
}
} else {
Register result(ToRegister(instr->result()));
@@ -3260,8 +3382,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, double_load_operand);
} else {
- PushX87DoubleOperand(double_load_operand);
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), double_load_operand);
}
}
@@ -3777,6 +3898,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
}
+
void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
@@ -4168,7 +4290,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(ebx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
@@ -4254,7 +4376,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(object, offset), value);
} else {
- __ fstp_d(FieldOperand(object, offset));
+ X87Register value = ToX87Register(instr->value());
+ X87Mov(FieldOperand(object, offset), value);
}
return;
}
@@ -4380,7 +4503,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
- __ fst_d(operand);
+ X87Mov(operand, ToX87Register(instr->value()));
}
} else {
Register value = ToRegister(instr->value());
@@ -4462,7 +4585,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ mov(double_store_operand2, Immediate(upper));
} else {
Label no_special_nan_handling;
- ASSERT(x87_stack_depth_ > 0);
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
if (instr->NeedsCanonicalization()) {
__ fld(0);
@@ -4561,7 +4685,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
+ __ TestJSArrayForAllocationMemento(object, temp);
DeoptimizeIf(equal, instr->environment());
}
@@ -4744,7 +4868,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub stub(instr->hydrogen()->flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4932,10 +5056,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
convert_hole = load->UsesMustHandleHole();
}
+ bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+ if (!use_sse2) {
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->value());
+ X87LoadForUsage(src);
+ }
+
Label no_special_nan_handling;
Label done;
if (convert_hole) {
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -4989,12 +5119,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
- __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ bind(&done);
}
@@ -5045,12 +5175,14 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
+ X87Register res_reg,
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
+ X87PrepareToWrite(res_reg);
STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
NUMBER_CANDIDATE_IS_ANY_TAGGED);
if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -5111,6 +5243,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
__ pop(input_reg);
__ SmiTag(input_reg); // Retag smi.
__ bind(&done);
+ X87CommitWrite(res_reg);
}
@@ -5492,11 +5625,11 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
+ ToX87Register(instr->result()),
instr->hydrogen()->allow_undefined_as_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
- CurrentInstructionReturnsX87Result();
}
}
@@ -5511,93 +5644,22 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
+ __ cvttsd2si(result_reg, Operand(input_reg));
+
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- __ cvttsd2si(result_reg, Operand(input_reg));
+ Label fast_case_succeeded;
__ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
- // This will deoptimize if the exponent of the input in out of range.
- CpuFeatureScope scope(masm(), SSE3);
- Label convert, done;
- __ j(not_equal, &done, Label::kNear);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- // Get exponent alone and check for too-big exponent.
- __ mov(result_reg, Operand(esp, sizeof(int32_t)));
- __ and_(result_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- __ add(Operand(esp), Immediate(kDoubleSize));
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&convert);
- // Do conversion, which cannot fail because we checked the exponent.
- __ fld_d(Operand(esp, 0));
- __ fisttp_d(Operand(esp, 0));
- __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
- } else {
- Label done;
- Register temp_reg = ToRegister(instr->temp());
- XMMRegister xmm_scratch = xmm0;
-
- // If cvttsd2si succeeded, we're done. Otherwise, we attempt
- // manual conversion.
- __ j(not_equal, &done, Label::kNear);
-
- // Get high 32 bits of the input in result_reg and temp_reg.
- __ pshufd(xmm_scratch, input_reg, 1);
- __ movd(Operand(temp_reg), xmm_scratch);
- __ mov(result_reg, temp_reg);
-
- // Prepare negation mask in temp_reg.
- __ sar(temp_reg, kBitsPerInt - 1);
-
- // Extract the exponent from result_reg and subtract adjusted
- // bias from it. The adjustment is selected in a way such that
- // when the difference is zero, the answer is in the low 32 bits
- // of the input, otherwise a shift has to be performed.
- __ shr(result_reg, HeapNumber::kExponentShift);
- __ and_(result_reg,
- HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
- __ sub(Operand(result_reg),
- Immediate(HeapNumber::kExponentBias +
- HeapNumber::kExponentBits +
- HeapNumber::kMantissaBits));
- // Don't handle big (> kMantissaBits + kExponentBits == 63) or
- // special exponents.
- DeoptimizeIf(greater, instr->environment());
-
- // Zero out the sign and the exponent in the input (by shifting
- // it to the left) and restore the implicit mantissa bit,
- // i.e. convert the input to unsigned int64 shifted left by
- // kExponentBits.
- ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
- // Minus zero has the most significant bit set and the other
- // bits cleared.
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
- __ psllq(input_reg, HeapNumber::kExponentBits);
- __ por(input_reg, xmm_scratch);
-
- // Get the amount to shift the input right in xmm_scratch.
- __ neg(result_reg);
- __ movd(xmm_scratch, Operand(result_reg));
-
- // Shift the input right and extract low 32 bits.
- __ psrlq(input_reg, xmm_scratch);
- __ movd(Operand(result_reg), input_reg);
-
- // Use the prepared mask in temp_reg to negate the result if necessary.
- __ xor_(result_reg, Operand(temp_reg));
- __ sub(result_reg, Operand(temp_reg));
- __ bind(&done);
- }
+ __ j(not_equal, &fast_case_succeeded);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movdbl(MemOperand(esp, 0), input_reg);
+ DoubleToIStub stub(esp, result_reg, 0, true);
+ __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ __ add(esp, Immediate(kDoubleSize));
+ __ bind(&fast_case_succeeded);
} else {
Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
__ cvtsi2sd(xmm0, Operand(result_reg));
__ ucomisd(xmm0, input_reg);
DeoptimizeIf(not_equal, instr->environment());
@@ -5946,95 +6008,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- instance_size >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- initial_map->pre_allocated_property_fields());
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- initial_map->unused_property_fields());
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- initial_map->inobject_properties());
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ mov(FieldOperand(result, JSObject::kMapOffset), map);
- __ mov(scratch, factory()->empty_fixed_array());
- __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ mov(scratch, factory()->undefined_value());
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ mov(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -6346,24 +6319,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- __ push(ToOperand(obj));
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -6444,20 +6399,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index d05da8a084..eb75225b99 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -105,7 +105,7 @@ class LCodeGen BASE_EMBEDDED {
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsX87TopOfStack(LOperand* op) const;
+ X87Register ToX87Register(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
@@ -115,16 +115,23 @@ class LCodeGen BASE_EMBEDDED {
Immediate ToSmiImmediate(LOperand* op) const {
return Immediate(Smi::FromInt(ToInteger32(LConstantOperand::cast(op))));
}
+ double ToDouble(LConstantOperand* op) const;
// Support for non-sse2 (x87) floating point stack handling.
- // These functions maintain the depth of the stack (either 0 or 1)
- void PushX87DoubleOperand(Operand src);
- void PushX87FloatOperand(Operand src);
- void ReadX87Operand(Operand dst);
- bool X87StackNonEmpty() const { return x87_stack_depth_ > 0; }
- void PopX87();
- void CurrentInstructionReturnsX87Result();
- void FlushX87StackIfNecessary(LInstruction* instr);
+ // These functions maintain the mapping of physical stack registers to our
+ // virtual registers between instructions.
+ enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+ void X87Mov(X87Register reg, Operand src,
+ X87OperandType operand = kX87DoubleOperand);
+ void X87Mov(Operand src, X87Register reg);
+
+ void X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result);
+
+ void X87LoadForUsage(X87Register reg);
+ void X87PrepareToWrite(X87Register reg);
+ void X87CommitWrite(X87Register reg);
Handle<Object> ToHandle(LConstantOperand* op) const;
@@ -156,7 +163,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -291,9 +297,9 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ X87Register ToX87Register(int index) const;
int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
Representation key_representation,
@@ -331,6 +337,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitNumberUntagDNoSSE2(
Register input,
Register temp,
+ X87Register res_reg,
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
@@ -392,6 +399,16 @@ class LCodeGen BASE_EMBEDDED {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
+ void X87Fxch(X87Register reg, int other_slot = 0);
+ void X87Fld(Operand src, X87OperandType opts);
+ void X87Free(X87Register reg);
+
+ void FlushX87StackIfNecessary(LInstruction* instr);
+ void EmitFlushX87ForDeopt();
+ bool X87StackContains(X87Register reg);
+ int X87ArrayIndex(X87Register reg);
+ int x87_st2idx(int pos);
+
Zone* zone_;
LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
@@ -413,6 +430,7 @@ class LCodeGen BASE_EMBEDDED {
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
+ X87Register x87_stack_[X87Register::kNumAllocatableRegisters];
int x87_stack_depth_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 86bfe2fbf2..e884a9dbce 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -313,6 +313,29 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
+ } else {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ __ movdbl(dst, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
+ } else {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
@@ -342,10 +365,10 @@ void LGapResolver::EmitMove(int index) {
} else {
// load from the register onto the stack, store in destination, which must
// be a double stack slot in the non-SSE2 case.
- ASSERT(source->index() == 0); // source is on top of the stack
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- cgen_->ReadX87Operand(dst);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
if (CpuFeatures::IsSupported(SSE2)) {
@@ -378,10 +401,8 @@ void LGapResolver::EmitMove(int index) {
__ mov(dst1, tmp);
} else {
Operand src = cgen_->ToOperand(source);
- if (cgen_->X87StackNonEmpty()) {
- cgen_->PopX87();
- }
- cgen_->PushX87DoubleOperand(src);
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, src);
}
}
} else {
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 8231c4e8b7..aebe26b785 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -82,6 +82,17 @@ bool LInstruction::HasDoubleRegisterInput() {
}
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ if (cgen->ToX87Register(op).is(reg)) return true;
+ }
+ }
+ return false;
+}
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -201,7 +212,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -291,6 +302,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
+ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) {
+ switch (hydrogen()->known_list()) {
+ case HLinkObjectInList::ALLOCATION_SITE_LIST:
+ return ExternalReference::allocation_sites_list_address(isolate);
+ }
+
+ UNREACHABLE();
+ // Return a dummy value
+ return ExternalReference::isolate_address(isolate);
+}
+
+
+void LLinkObjectInList::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" offset %d", hydrogen()->store_field().offset());
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -350,7 +379,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add(" ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -495,12 +523,6 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
}
-LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- X87TopOfStackRegister::ToAllocationIndex(reg));
-}
-
-
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
@@ -511,11 +533,6 @@ LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
}
-LOperand* LChunkBuilder::UseX87TopOfStack(HValue* value) {
- return Use(value, ToUnallocated(x87tos));
-}
-
-
LOperand* LChunkBuilder::UseRegister(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
@@ -643,13 +660,6 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineX87TOS(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, ToUnallocated(x87tos));
-}
-
-
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
@@ -1695,8 +1705,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
@@ -1704,7 +1714,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1718,7 +1728,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@@ -1928,11 +1938,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
? TempRegister()
: NULL;
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- return AssignEnvironment(DefineX87TOS(res));
- }
+ return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1967,9 +1973,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = CpuFeatures::IsSupported(SSE2)
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
+ LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
// Make sure that temp and result_temp are different registers.
@@ -2038,6 +2042,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
+ return new(zone())
+ LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
+}
+
+
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2119,12 +2135,8 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- LOperand* temp = value_is_zero ? NULL : TempRegister();
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else {
- return DefineX87TOS(new(zone()) LConstantD(NULL));
- }
+ LOperand* temp = value_is_zero ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -2168,6 +2180,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LLinkObjectInList* result = new(zone()) LLinkObjectInList(object, temp);
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2316,11 +2336,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
LOperand* object = UseRegisterAtStart(instr->elements());
LOperand* val = NULL;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- val = UseRegisterAtStart(instr->value());
- } else if (!instr->IsConstantHoleStore()) {
- val = UseX87TopOfStack(instr->value());
- }
+ val = UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyed(object, key, val);
} else {
@@ -2450,11 +2466,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseX87TopOfStack(instr->value());
- }
+ val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
}
@@ -2524,15 +2536,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
@@ -2559,15 +2562,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2748,15 +2742,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(context, key, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->enumerable(), eax);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index e48e881eb5..a938ee56ba 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -44,7 +44,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -75,7 +74,7 @@ class LCodeGen;
V(ClampTToUint8) \
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -87,7 +86,6 @@ class LCodeGen;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
@@ -101,7 +99,6 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
@@ -114,9 +111,11 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
+ V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@@ -265,7 +264,11 @@ class LInstruction: public ZoneObject {
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
virtual bool ClobbersDoubleRegisters() const {
- return is_call_ || !CpuFeatures::IsSupported(SSE2);
+ return is_call_ ||
+ (!CpuFeatures::IsSupported(SSE2) &&
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle deoptimization nor phi-nodes.
+ (HasEnvironment() || IsControl()));
}
virtual bool HasResult() const = 0;
@@ -273,6 +276,7 @@ class LInstruction: public ZoneObject {
bool HasDoubleRegisterResult();
bool HasDoubleRegisterInput();
+ bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -377,7 +381,6 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool ClobbersDoubleRegisters() const { return false; }
virtual bool HasInterestingComment(LCodeGen* gen) const {
return !IsRedundant();
@@ -676,9 +679,9 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -686,8 +689,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -883,6 +887,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNumberAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+};
+
+
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1196,10 +1213,6 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
temps_[0] = temp;
}
- virtual bool ClobbersDoubleRegisters() const {
- return false;
- }
-
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@@ -1694,6 +1707,25 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
};
+class LLinkObjectInList: public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LLinkObjectInList(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ ExternalReference GetReference(Isolate* isolate);
+
+ DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list")
+ DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2180,9 +2212,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- virtual bool ClobbersDoubleRegisters() const {
- return false;
- }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
@@ -2561,21 +2591,6 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocateObject(LOperand* context, LOperand* temp) {
- inputs_[0] = context;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2663,22 +2678,6 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
- public:
- LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2706,22 +2705,6 @@ class LStackCheck: public LTemplateInstruction<0, 1, 0> {
};
-class LIn: public LTemplateInstruction<1, 3, 0> {
- public:
- LIn(LOperand* context, LOperand* key, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = key;
- inputs_[2] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* object() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
@@ -2858,14 +2841,13 @@ class LChunkBuilder BASE_EMBEDDED {
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
- LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
+ LUnallocated* ToUnallocated(X87Register reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
XMMRegister fixed_register);
- MUST_USE_RESULT LOperand* UseX87TopOfStack(HValue* value);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a9a0268aef..ef90c10df0 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -842,6 +842,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
LeaveExitFrameEpilogue();
}
+
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
@@ -2811,11 +2812,14 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
void MacroAssembler::JumpIfNotUniqueName(Operand operand,
Label* not_unique_name,
Label::Distance distance) {
- STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
- cmp(operand, Immediate(kInternalizedTag));
- j(less, not_unique_name, distance);
- cmp(operand, Immediate(SYMBOL_TYPE));
- j(greater, not_unique_name, distance);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ j(zero, &succeed);
+ cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ j(not_equal, not_unique_name, distance);
+
+ bind(&succeed);
}
@@ -3162,10 +3166,10 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
}
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg) {
- Label no_info_available;
+ Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
@@ -3173,14 +3177,14 @@ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
ExternalReference::new_space_allocation_top_address(isolate());
lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_info_available);
+ j(less, &no_memento_available);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_info_available);
- cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
+ j(greater, &no_memento_available);
+ cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ bind(&no_memento_available);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 5cb8286bae..3bca930d66 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -905,14 +905,14 @@ class MacroAssembler: public Assembler {
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, conditional code is set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg);
private:
bool generating_stub_;
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index f478e574f5..dfcc869567 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1030,6 +1030,7 @@ void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
__ bind(&after_position);
}
+
void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(register_location(register_index), Immediate(to));
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 28e043d641..2b391e0b33 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -779,87 +779,53 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* miss_restore_name,
- Label* slow) {
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
- }
-
+void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1, miss_label);
-
- // Check that we are allowed to write this.
- if (object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- // holder == object indicates that no property was found.
- if (lookup->holder() != *object) {
- holder = lookup->holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- Register holder_reg = CheckPrototypes(
- object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
- // If no property was found, and the holder (the last object in the
- // prototype chain) is in slow mode, we need to do a negative lookup on the
- // holder.
- if (lookup->holder() == *object) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm,
- Handle<GlobalObject>(GlobalObject::cast(holder)),
- name,
- scratch1,
- miss_restore_name);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
- }
- }
- }
-
- Register storage_reg = name_reg;
-
if (details.type() == CONSTANT_FUNCTION) {
Handle<HeapObject> constant(
HeapObject::cast(descriptors->GetValue(descriptor)));
__ LoadHeapObject(scratch1, constant);
__ cmp(value_reg, scratch1);
- __ j(not_equal, miss_restore_name);
+ __ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_restore_name);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_restore_name);
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
@@ -879,7 +845,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_restore_name, DONT_DO_SMI_CHECK);
+ miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
@@ -959,15 +925,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
- name_reg,
+ storage_reg,
scratch1,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@@ -986,15 +949,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
- name_reg,
+ storage_reg,
receiver_reg,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@@ -1010,24 +970,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
- }
-
+void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1181,6 +1132,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ mov(scratch1, Handle<Map>(object->map()));
+
Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1286,7 +1241,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
@@ -1296,6 +1252,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
}
+void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
+
+
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
@@ -1351,7 +1318,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
return reg;
}
@@ -1372,7 +1339,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
}
@@ -1724,11 +1691,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
- Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
- Handle<Cell> kind_feedback_cell =
- isolate()->factory()->NewCell(kind);
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
__ mov(eax, Immediate(argc));
- __ mov(ebx, kind_feedback_cell);
+ __ mov(ebx, site_feedback_cell);
__ mov(edi, function);
ArrayConstructorStub stub(isolate());
@@ -2899,19 +2866,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label miss, miss_restore_name;
- // Check that the maps haven't changed, preserving the value register.
- __ JumpIfSmi(receiver(), &miss);
- CheckPrototypes(object, receiver(), holder,
- scratch1(), this->name(), scratch2(),
- name, &miss_restore_name);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -2925,13 +2886,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- GenerateRestoreName(masm(), &miss_restore_name, name);
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2985,20 +2941,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), scratch2(), &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ push(this->name());
@@ -3011,12 +2953,8 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -3220,7 +3158,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- HandlerFrontendFooter(&success, &miss);
+ HandlerFrontendFooter(name, &success, &miss);
__ bind(&success);
Counters* counters = isolate()->counters();
@@ -3233,7 +3171,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
}
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
CodeHandleList* handlers,
Handle<Name> name,
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index ff3a94d18c..f0f5c302b3 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -159,7 +159,7 @@ Address IC::OriginalCodeAddress() const {
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
Code* code = shared->code();
ASSERT(Debug::HasDebugInfo(shared));
@@ -217,9 +217,11 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
map->RemoveFromCodeCache(String::cast(name), target, index);
- // For loads, handlers are stored in addition to the ICs on the map. Remove
- // those, too.
- if (target->is_load_stub() || target->is_keyed_load_stub()) {
+ // For loads and stores, handlers are stored in addition to the ICs on the
+ // map. Remove those, too.
+ if ((target->is_load_stub() || target->is_keyed_load_stub() ||
+ target->is_store_stub() || target->is_keyed_store_stub()) &&
+ target->type() != Code::NORMAL) {
Code* handler = target->FindFirstCode();
index = map->IndexInCodeCache(name, handler);
if (index >= 0) {
@@ -229,18 +231,32 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
return true;
}
+ // The stub is not in the cache. We've ruled out all other kinds of failure
+ // except for proptotype chain changes, a deprecated map, a map that's
+ // different from the one that the stub expects, or a constant global property
+ // that will become mutable. Threat all those situations as prototype failures
+ // (stay monomorphic if possible).
+
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
- if (cache_holder != OWN_MAP) return false;
+ if (cache_holder == OWN_MAP) {
+ Map* old_map = target->FindFirstMap();
+ if (old_map == map) return true;
+ if (old_map != NULL && old_map->is_deprecated()) return true;
+ }
- // The stub is not in the cache. We've ruled out all other kinds of failure
- // except for proptotype chain changes, a deprecated map, or a map that's
- // different from the one that the stub expects. If the map hasn't changed,
- // assume it's a prototype failure. Treat deprecated maps in the same way as
- // prototype failures (stay monomorphic if possible).
- Map* old_map = target->FindFirstMap();
- if (old_map == NULL) return false;
- return old_map == map || old_map->is_deprecated();
+ if (receiver->IsGlobalObject()) {
+ if (!name->IsName()) return false;
+ Isolate* isolate = target->GetIsolate();
+ LookupResult lookup(isolate);
+ GlobalObject* global = GlobalObject::cast(receiver);
+ global->LocalLookupRealNamedProperty(Name::cast(name), &lookup);
+ if (!lookup.IsFound()) return false;
+ PropertyCell* cell = global->GetPropertyCell(&lookup);
+ return cell->type()->IsConstant();
+ }
+
+ return false;
}
@@ -277,7 +293,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
RelocInfo::Mode IC::ComputeMode() {
Address addr = address();
- Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
+ Code* code = Code::cast(isolate()->FindCodeObject(addr));
for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -972,10 +988,10 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
bool IC::UpdatePolymorphicIC(State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
- Handle<Code> code) {
+ Handle<Code> code,
+ StrictModeFlag strict_mode) {
if (code->type() == Code::NORMAL) return false;
if (target()->ic_state() == MONOMORPHIC &&
target()->type() == Code::NORMAL) {
@@ -1026,18 +1042,39 @@ bool IC::UpdatePolymorphicIC(State state,
handlers.Add(code);
}
- Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, number_of_valid_maps, name);
+ Handle<Code> ic = ComputePolymorphicIC(
+ &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode);
set_target(*ic);
return true;
}
+Handle<Code> LoadIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ return isolate()->stub_cache()->ComputePolymorphicLoadIC(
+ receiver_maps, handlers, number_of_valid_maps, name);
+}
+
+
+Handle<Code> StoreIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ return isolate()->stub_cache()->ComputePolymorphicStoreIC(
+ receiver_maps, handlers, number_of_valid_maps, name, strict_mode);
+}
+
+
void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
Handle<Code> handler,
- Handle<String> name) {
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
if (handler->type() == Code::NORMAL) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicLoadIC(
receiver, handler, name);
set_target(*ic);
}
@@ -1045,14 +1082,37 @@ void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
void KeyedLoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
Handle<Code> handler,
- Handle<String> name) {
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
if (handler->type() == Code::NORMAL) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedMonomorphicIC(
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedLoadIC(
receiver, handler, name);
set_target(*ic);
}
+void StoreIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
+ if (handler->type() == Code::NORMAL) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicStoreIC(
+ receiver, handler, name, strict_mode);
+ set_target(*ic);
+}
+
+
+void KeyedStoreIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
+ if (handler->type() == Code::NORMAL) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedStoreIC(
+ receiver, handler, name, strict_mode);
+ set_target(*ic);
+}
+
+
void IC::CopyICToMegamorphicCache(Handle<String> name) {
MapHandleList receiver_maps;
CodeHandleList handlers;
@@ -1094,12 +1154,12 @@ void IC::PatchCache(State state,
case UNINITIALIZED:
case PREMONOMORPHIC:
case MONOMORPHIC_PROTOTYPE_FAILURE:
- UpdateMonomorphicIC(receiver, code, name);
+ UpdateMonomorphicIC(receiver, code, name, strict_mode);
break;
case MONOMORPHIC:
// Only move to megamorphic if the target changes.
if (target() != *code) {
- if (target()->is_load_stub()) {
+ if (target()->is_load_stub() || target()->is_store_stub()) {
bool is_same_handler = false;
{
DisallowHeapAllocation no_allocation;
@@ -1108,10 +1168,10 @@ void IC::PatchCache(State state,
}
if (is_same_handler
&& IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
- UpdateMonomorphicIC(receiver, code, name);
+ UpdateMonomorphicIC(receiver, code, name, strict_mode);
break;
}
- if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
+ if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) {
break;
}
@@ -1131,13 +1191,15 @@ void IC::PatchCache(State state,
UpdateMegamorphicCache(receiver->map(), *name, *code);
break;
case POLYMORPHIC:
- if (target()->is_load_stub()) {
- if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
+ if (target()->is_load_stub() || target()->is_store_stub()) {
+ if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) {
break;
}
CopyICToMegamorphicCache(name);
UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target(*megamorphic_stub());
+ set_target((strict_mode == kStrictMode)
+ ? *megamorphic_stub_strict()
+ : *megamorphic_stub());
} else {
// When trying to patch a polymorphic keyed load/store element stub
// with anything other than another polymorphic stub, go generic.
@@ -1603,12 +1665,14 @@ MaybeObject* StoreIC::Store(State state,
// Use specialized code for setting the length of arrays with fast
// properties. Slow properties might indicate redefinition of the length
- // property.
+ // property. Note that when redefined using Object.freeze, it's possible
+ // to have fast properties but a read-only length.
if (FLAG_use_ic &&
receiver->IsJSArray() &&
name->Equals(isolate()->heap()->length_string()) &&
Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
- receiver->HasFastProperties()) {
+ receiver->HasFastProperties() &&
+ !receiver->map()->is_frozen()) {
Handle<Code> stub =
StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate());
set_target(*stub);
@@ -1668,7 +1732,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
ASSERT(!lookup->IsHandler());
Handle<Code> code = ComputeStoreMonomorphic(
- lookup, strict_mode, receiver, name);
+ lookup, strict_mode, receiver, name, value);
if (code.is_null()) {
Handle<Code> stub = strict_mode == kStrictMode
? generic_stub_strict() : generic_stub();
@@ -1684,7 +1748,8 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<Object> value) {
Handle<JSObject> holder(lookup->holder());
switch (lookup->type()) {
case FIELD:
@@ -1699,7 +1764,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
return isolate()->stub_cache()->ComputeStoreGlobal(
- name, global, cell, strict_mode);
+ name, global, cell, value, strict_mode);
}
ASSERT(holder.is_identical_to(receiver));
return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
@@ -1811,7 +1876,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
KeyedAccessStoreMode old_store_mode =
Code::GetKeyedAccessStoreMode(target()->extra_ic_state());
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
- if (ic_state == MONOMORPHIC && old_store_mode == STANDARD_STORE) {
+ if (ic_state == MONOMORPHIC) {
// If the "old" and "new" maps are in the same elements map family, stay
// MONOMORPHIC and use the map for the most generic ElementsKind.
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1824,16 +1889,16 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
transitioned_receiver_map, strict_mode, store_mode);
- } else if (*previous_receiver_map == receiver->map()) {
- if (IsGrowStoreMode(store_mode) ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
- // A "normal" IC that handles stores can switch to a version that can
- // grow at the end of the array, handle OOB accesses or copy COW arrays
- // and still stay MONOMORPHIC.
- return isolate()->stub_cache()->ComputeKeyedStoreElement(
- receiver_map, strict_mode, store_mode);
- }
+ } else if (*previous_receiver_map == receiver->map() &&
+ old_store_mode == STANDARD_STORE &&
+ (IsGrowStoreMode(store_mode) ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+ // A "normal" IC that handles stores can switch to a version that can
+ // grow at the end of the array, handle OOB accesses or copy COW arrays
+ // and still stay MONOMORPHIC.
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ receiver_map, strict_mode, store_mode);
}
}
@@ -2093,7 +2158,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<Object> value) {
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
// current state.
@@ -2245,6 +2311,20 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
}
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ return ic.Store(state,
+ Code::GetStrictMode(extra_ic_state),
+ args.at<Object>(0),
+ args.at<String>(1),
+ args.at<Object>(2));
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
SealHandleScope shs(isolate);
@@ -2401,83 +2481,21 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
}
-void UnaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
-const char* UnaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
- case SMI: return "Smi";
- case NUMBER: return "Number";
- case GENERIC: return "Generic";
- default: return "Invalid";
- }
-}
-
-
-UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return v8::internal::UNINITIALIZED;
- case SMI:
- case NUMBER:
- return MONOMORPHIC;
- case GENERIC:
- return v8::internal::GENERIC;
- }
- UNREACHABLE();
- return v8::internal::UNINITIALIZED;
-}
-
-
-Handle<Type> UnaryOpIC::TypeInfoToType(TypeInfo type_info, Isolate* isolate) {
- switch (type_info) {
- case UNINITIALIZED:
- return handle(Type::None(), isolate);
- case SMI:
- return handle(Type::Smi(), isolate);
- case NUMBER:
- return handle(Type::Number(), isolate);
- case GENERIC:
- return handle(Type::Any(), isolate);
- }
- UNREACHABLE();
- return handle(Type::Any(), isolate);
-}
-
-
-UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
- v8::internal::TypeInfo operand_type =
- v8::internal::TypeInfo::FromValue(operand);
- if (operand_type.IsSmi()) {
- return SMI;
- } else if (operand_type.IsNumber()) {
- return NUMBER;
- } else {
- return GENERIC;
- }
-}
-
-
-UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
- TypeInfo current_type,
- TypeInfo previous_type) {
- switch (previous_type) {
- case UNINITIALIZED:
- return current_type;
- case SMI:
- return (current_type == GENERIC) ? GENERIC : NUMBER;
- case NUMBER:
- return GENERIC;
- case GENERIC:
- // We should never do patching if we are in GENERIC state.
- UNREACHABLE();
- return GENERIC;
- }
- UNREACHABLE();
- return GENERIC;
+RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
+ SealHandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> object = args.at<Object>(3);
+ StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ NONE,
+ strict_mode);
}
@@ -2558,57 +2576,24 @@ void BinaryOpIC::StubInfoToType(int minor_key,
}
-RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
- ASSERT(args.length() == 4);
-
- HandleScope scope(isolate);
- Handle<Object> operand = args.at<Object>(0);
- Token::Value op = static_cast<Token::Value>(args.smi_at(1));
- UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
- UnaryOpIC::TypeInfo previous_type =
- static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
+MaybeObject* UnaryOpIC::Transition(Handle<Object> object) {
+ Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ UnaryOpStub stub(extra_ic_state);
- UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
- type = UnaryOpIC::ComputeNewType(type, previous_type);
+ stub.UpdateStatus(object);
- UnaryOpStub stub(op, mode, type);
- Handle<Code> code = stub.GetCode(isolate);
- if (!code.is_null()) {
- if (FLAG_trace_ic) {
- PrintF("[UnaryOpIC in ");
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" %s => %s #%s @ %p]\n",
- UnaryOpIC::GetName(previous_type),
- UnaryOpIC::GetName(type),
- Token::Name(op),
- static_cast<void*>(*code));
- }
- UnaryOpIC ic(isolate);
- ic.patch(*code);
- }
+ Handle<Code> code = stub.GetCode(isolate());
+ set_target(*code);
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
- break;
- case Token::BIT_NOT:
- builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
- break;
- default:
- UNREACHABLE();
- }
+ return stub.Result(object, isolate());
+}
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
- bool caught_exception;
- Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
+RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ UnaryOpIC ic(isolate);
+ return ic.Transition(object);
}
@@ -3069,9 +3054,7 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
// types must be supported as a result of the miss.
bool already_monomorphic = stub.IsMonomorphic();
- CompareNilICStub::State old_state = stub.GetState();
- stub.Record(object);
- old_state.TraceTransition(stub.GetState());
+ stub.UpdateStatus(object);
NilValue nil = stub.GetNilValue();
@@ -3108,7 +3091,7 @@ RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
Code::ExtraICState extra_ic_state) {
ToBooleanStub stub(extra_ic_state);
- bool to_boolean_value = stub.Record(object);
+ bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
return Smi::FromInt(to_boolean_value ? 1 : 0);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 829c6b1547..c9f521f145 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -57,7 +57,6 @@ namespace internal {
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
- ICU(UnaryOp_Patch) \
ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
ICU(CompareNilIC_Miss) \
@@ -170,14 +169,25 @@ class IC {
virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
Handle<Code> handler,
- Handle<String> name) {
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
set_target(*handler);
}
bool UpdatePolymorphicIC(State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
- Handle<Code> code);
+ Handle<Code> code,
+ StrictModeFlag strict_mode);
+
+ virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ };
+
void CopyICToMegamorphicCache(Handle<String> name);
bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
void PatchCache(State state,
@@ -392,9 +402,18 @@ class LoadIC: public IC {
State state,
Handle<Object> object,
Handle<String> name);
+
virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
Handle<Code> handler,
- Handle<String> name);
+ Handle<String> name,
+ StrictModeFlag strict_mode);
+
+ virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
+
virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name);
@@ -468,7 +487,8 @@ class KeyedLoadIC: public LoadIC {
// Update the inline cache.
virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
Handle<Code> handler,
- Handle<String> name);
+ Handle<String> name,
+ StrictModeFlag strict_mode);
virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name);
@@ -545,6 +565,16 @@ class StoreIC: public IC {
return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
}
+ virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name,
+ StrictModeFlag strict_mode);
+
+ virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -560,7 +590,8 @@ class StoreIC: public IC {
virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
- Handle<String> name);
+ Handle<String> name,
+ Handle<Object> value);
private:
void set_target(Code* code) {
@@ -627,7 +658,8 @@ class KeyedStoreIC: public StoreIC {
virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
- Handle<String> name);
+ Handle<String> name,
+ Handle<Object> value);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
virtual Handle<Code> megamorphic_stub() {
@@ -641,6 +673,11 @@ class KeyedStoreIC: public StoreIC {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
+ virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name,
+ StrictModeFlag strict_mode);
+
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
@@ -681,28 +718,9 @@ class KeyedStoreIC: public StoreIC {
class UnaryOpIC: public IC {
public:
- // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
- // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- NUMBER,
- GENERIC
- };
-
- static Handle<Type> TypeInfoToType(TypeInfo info, Isolate* isolate);
-
- explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
-
- static const char* GetName(TypeInfo type_info);
-
- static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> operand);
+ explicit UnaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- static TypeInfo ComputeNewType(TypeInfo type, TypeInfo previous);
+ MUST_USE_RESULT MaybeObject* Transition(Handle<Object> object);
};
@@ -838,6 +856,9 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
new file mode 100644
index 0000000000..91f45278ee
--- /dev/null
+++ b/deps/v8/src/icu_util.cc
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "icu_util.h"
+
+#if defined(_WIN32) && defined(ENABLE_I18N_SUPPORT)
+#include <windows.h>
+
+#include "unicode/putil.h"
+#include "unicode/udata.h"
+
+#define ICU_UTIL_DATA_SYMBOL "icudt" U_ICU_VERSION_SHORT "_dat"
+#define ICU_UTIL_DATA_SHARED_MODULE_NAME "icudt.dll"
+#endif
+
+namespace v8 {
+
+namespace internal {
+
+bool InitializeICU() {
+#if defined(_WIN32) && defined(ENABLE_I18N_SUPPORT)
+ // We expect to find the ICU data module alongside the current module.
+ HMODULE module = LoadLibraryA(ICU_UTIL_DATA_SHARED_MODULE_NAME);
+ if (!module) return false;
+
+ FARPROC addr = GetProcAddress(module, ICU_UTIL_DATA_SYMBOL);
+ if (!addr) return false;
+
+ UErrorCode err = U_ZERO_ERROR;
+ udata_setCommonData(reinterpret_cast<void*>(addr), &err);
+ return err == U_ZERO_ERROR;
+#else
+ // Mac/Linux bundle the ICU data in.
+ return true;
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/icu_util.h b/deps/v8/src/icu_util.h
new file mode 100644
index 0000000000..478abce508
--- /dev/null
+++ b/deps/v8/src/icu_util.h
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_ICU_UTIL_H_
+#define V8_ICU_UTIL_H_
+
+namespace v8 {
+
+namespace internal {
+
+// Call this function to load ICU's data tables for the current process. This
+// function should be called before ICU is used.
+bool InitializeICU();
+
+} } // namespace v8::internal
+
+#endif // V8_ICU_UTIL_H_
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 80dc8eaca0..df0f14a74c 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -273,11 +273,12 @@ class IncrementalMarkingMarkingVisitor
VisitNativeContext(map, context);
}
- static void VisitJSWeakMap(Map* map, HeapObject* object) {
+ static void VisitWeakCollection(Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
VisitPointers(heap,
- HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
- HeapObject::RawField(object, JSWeakMap::kSize));
+ HeapObject::RawField(object,
+ JSWeakCollection::kPropertiesOffset),
+ HeapObject::RawField(object, JSWeakCollection::kSize));
}
static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
@@ -563,6 +564,7 @@ void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
}
}
+
void IncrementalMarking::UncommitMarkingDeque() {
if (state_ == STOPPED && marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Uncommit(
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 6a8758026a..4adcd69d3f 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -509,6 +509,7 @@ void Isolate::Iterate(ObjectVisitor* v) {
Iterate(v, current_t);
}
+
void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
for (DeferredHandles* deferred = deferred_handles_head_;
deferred != NULL;
@@ -621,10 +622,8 @@ static bool IsVisibleInStackTrace(StackFrame* raw_frame,
// Only display JS frames.
if (!raw_frame->is_java_script()) return false;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Object* raw_fun = frame->function();
- // Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction()) return false;
- if ((raw_fun == caller) && !(*seen_caller)) {
+ JSFunction* fun = frame->function();
+ if ((fun == caller) && !(*seen_caller)) {
*seen_caller = true;
return false;
}
@@ -636,7 +635,6 @@ static bool IsVisibleInStackTrace(StackFrame* raw_frame,
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
if (!FLAG_builtins_in_stack_traces) {
- JSFunction* fun = JSFunction::cast(raw_fun);
if (frame->receiver()->IsJSBuiltinsObject() ||
(fun->IsBuiltin() && !fun->shared()->native())) {
return false;
@@ -671,7 +669,7 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
if (cursor + 4 > elements->length()) {
@@ -754,7 +752,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
JavaScriptFrame* frame = it.frame();
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
// Create a JSObject to hold the information for the StackFrame.
@@ -1200,7 +1198,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
int pos = frame->LookupCode()->SourcePosition(frame->pc());
Handle<Object> pos_obj(Smi::FromInt(pos), this);
// Fetch function and receiver.
- Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+ Handle<JSFunction> fun(frame->function());
Handle<Object> recv(frame->receiver(), this);
// Advance to the next JavaScript frame and determine if the
// current frame is the top-level frame.
@@ -1224,7 +1222,7 @@ void Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (!it.done()) {
JavaScriptFrame* frame = it.frame();
- JSFunction* fun = JSFunction::cast(frame->function());
+ JSFunction* fun = frame->function();
Object* script = fun->shared()->script();
if (script->IsScript() &&
!(Script::cast(script)->source()->IsUndefined())) {
@@ -1788,7 +1786,8 @@ Isolate::Isolate()
optimizing_compiler_thread_(this),
marking_thread_(NULL),
sweeper_thread_(NULL),
- callback_table_(NULL) {
+ callback_table_(NULL),
+ stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1876,6 +1875,10 @@ void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debugger()->UnloadDebugger();
+#endif
+
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
if (FLAG_sweeper_threads > 0) {
@@ -1896,6 +1899,10 @@ void Isolate::Deinit() {
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
+ if (FLAG_print_deopt_stress) {
+ PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
+ }
+
// We must stop the logger before we tear down other components.
Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
@@ -2130,6 +2137,8 @@ bool Isolate::Init(Deserializer* des) {
ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
+ stress_deopt_count_ = FLAG_deopt_every_n_times;
+
if (function_entry_hook() != NULL) {
// When function entry hooking is in effect, we have to create the code
// stubs from scratch to get entry hooks, rather than loading the previously
@@ -2501,6 +2510,11 @@ CodeStubInterfaceDescriptor*
}
+Object* Isolate::FindCodeObject(Address a) {
+ return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index a0aecd8b27..6e5d5c674a 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -124,6 +124,15 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
} \
} while (false)
+#define RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, T) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (__isolate__->has_scheduled_exception()) { \
+ __isolate__->PromoteScheduledException(); \
+ return Handle<T>::null(); \
+ } \
+ } while (false)
+
#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
@@ -1121,6 +1130,11 @@ class Isolate {
function_entry_hook_ = function_entry_hook;
}
+ void* stress_deopt_count_address() { return &stress_deopt_count_; }
+
+ // Given an address occupied by a live code object, return that object.
+ Object* FindCodeObject(Address a);
+
private:
Isolate();
@@ -1356,6 +1370,9 @@ class Isolate {
SweeperThread** sweeper_thread_;
CallbackTable* callback_table_;
+ // Counts deopt points if deopt_every_n_times is enabled.
+ unsigned int stress_deopt_count_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 31aebd6ddb..6e414ccee0 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -434,6 +434,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
return UNCHANGED;
}
case JS_ARRAY_TYPE:
+ if (object->IsAccessCheckNeeded()) break;
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSArray(Handle<JSArray>::cast(object));
case JS_VALUE_TYPE:
@@ -447,12 +448,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
SerializeString(Handle<String>::cast(object));
return SUCCESS;
} else if (object->IsJSObject()) {
+ if (object->IsAccessCheckNeeded()) break;
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSObject(Handle<JSObject>::cast(object));
- } else {
- return SerializeGeneric(object, key, comma, deferred_string_key);
}
}
+
+ return SerializeGeneric(object, key, comma, deferred_string_key);
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 5da73985de..666866ed32 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -2477,7 +2477,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
QuickCheckDetails* details,
bool fall_through_on_failure) {
if (details->characters() == 0) return false;
- GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
+ GetQuickCheckDetails(
+ details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE);
if (details->cannot_match()) return false;
if (!details->Rationalize(compiler->ascii())) return false;
ASSERT(details->characters() == 1 ||
@@ -3066,7 +3067,7 @@ static void EmitHat(RegExpCompiler* compiler,
void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
Trace::TriBool next_is_word_character = Trace::UNKNOWN;
- bool not_at_start = (trace->at_start() == Trace::FALSE);
+ bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
if (lookahead == NULL) {
int eats_at_least =
@@ -3077,12 +3078,15 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
BoyerMooreLookahead* bm =
new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
- if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
- if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
+ if (bm->at(0)->is_non_word())
+ next_is_word_character = Trace::FALSE_VALUE;
+ if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
}
} else {
- if (lookahead->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
- if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE;
+ if (lookahead->at(0)->is_non_word())
+ next_is_word_character = Trace::FALSE_VALUE;
+ if (lookahead->at(0)->is_word())
+ next_is_word_character = Trace::TRUE_VALUE;
}
bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
if (next_is_word_character == Trace::UNKNOWN) {
@@ -3102,10 +3106,10 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
assembler->Bind(&before_word);
BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
assembler->Bind(&ok);
- } else if (next_is_word_character == Trace::TRUE) {
+ } else if (next_is_word_character == Trace::TRUE_VALUE) {
BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
} else {
- ASSERT(next_is_word_character == Trace::FALSE);
+ ASSERT(next_is_word_character == Trace::FALSE_VALUE);
BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
}
}
@@ -3169,7 +3173,7 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
break;
}
case AT_START: {
- if (trace->at_start() == Trace::FALSE) {
+ if (trace->at_start() == Trace::FALSE_VALUE) {
assembler->GoTo(trace->backtrack());
return;
}
@@ -3986,7 +3990,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int first_normal_choice = greedy_loop ? 1 : 0;
- bool not_at_start = current_trace->at_start() == Trace::FALSE;
+ bool not_at_start = current_trace->at_start() == Trace::FALSE_VALUE;
const int kEatsAtLeastNotYetInitialized = -1;
int eats_at_least = kEatsAtLeastNotYetInitialized;
@@ -4057,7 +4061,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
new_trace.set_bound_checked_up_to(preload_characters);
}
new_trace.quick_check_performed()->Clear();
- if (not_at_start_) new_trace.set_at_start(Trace::FALSE);
+ if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
alt_gen->expects_preload = preload_is_current;
bool generate_full_check_inline = false;
if (FLAG_regexp_optimization &&
@@ -4157,7 +4161,7 @@ void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
Trace out_of_line_trace(*trace);
out_of_line_trace.set_characters_preloaded(preload_characters);
out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE);
+ if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
ZoneList<Guard*>* guards = alternative.guards();
int guard_count = (guards == NULL) ? 0 : guards->length();
if (next_expects_preload) {
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 528a9a2f46..20c0ac416f 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -1330,7 +1330,7 @@ class Trace {
// A value for a property that is either known to be true, know to be false,
// or not known.
enum TriBool {
- UNKNOWN = -1, FALSE = 0, TRUE = 1
+ UNKNOWN = -1, FALSE_VALUE = 0, TRUE_VALUE = 1
};
class DeferredAction {
@@ -1426,7 +1426,9 @@ class Trace {
at_start_ == UNKNOWN;
}
TriBool at_start() { return at_start_; }
- void set_at_start(bool at_start) { at_start_ = at_start ? TRUE : FALSE; }
+ void set_at_start(bool at_start) {
+ at_start_ = at_start ? TRUE_VALUE : FALSE_VALUE;
+ }
Label* backtrack() { return backtrack_; }
Label* loop_label() { return loop_label_; }
RegExpNode* stop_node() { return stop_node_; }
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index b22fdf6e28..3df8d6cc29 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -306,6 +306,7 @@ Label* LChunk::GetAssemblyLabel(int block_id) const {
return label->label();
}
+
void LChunk::MarkEmptyBlocks() {
LPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 3ec2da3327..bab2e101bc 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -61,6 +61,7 @@ void SetElementNonStrict(Handle<JSObject> object,
USE(no_failure);
}
+
// A simple implementation of dynamic programming algorithm. It solves
// the problem of finding the difference of 2 arrays. It uses a table of results
// of subproblems. Each cell contains a number together with 2-bit flag
@@ -1456,6 +1457,7 @@ class RelocInfoBuffer {
static const int kMaximalBufferSize = 512*MB;
};
+
// Patch positions in code (changes relocation info section) and possibly
// returns new instance of code.
static Handle<Code> PatchPositionsInCode(
@@ -1619,8 +1621,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
LiveEdit::FunctionPatchabilityStatus status) {
if (!frame->is_java_script()) return false;
- Handle<JSFunction> function(
- JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
+ Handle<JSFunction> function(JavaScriptFrame::cast(frame)->function());
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
@@ -1829,6 +1830,7 @@ class MultipleFunctionTarget {
Handle<JSArray> m_result;
};
+
// Drops all call frame matched by target and all frames above them.
template<typename TARGET>
static const char* DropActivationsInActiveThreadImpl(
@@ -1925,6 +1927,7 @@ static const char* DropActivationsInActiveThreadImpl(
return NULL;
}
+
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index a733b523ea..d8d8f5934f 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -35,26 +35,19 @@ namespace internal {
const char* const Log::kLogToTemporaryFile = "&";
+const char* const Log::kLogToConsole = "-";
Log::Log(Logger* logger)
: is_stopped_(false),
output_handle_(NULL),
- ll_output_handle_(NULL),
mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
}
-static void AddIsolateIdIfNeeded(StringStream* stream) {
- Isolate* isolate = Isolate::Current();
- if (isolate->IsDefaultIsolate()) return;
- stream->Add("isolate-%p-", isolate);
-}
-
-
-void Log::Initialize() {
+void Log::Initialize(const char* log_file_name) {
mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
@@ -81,55 +74,12 @@ void Log::Initialize() {
// If we're logging anything, we need to open the log file.
if (Log::InitLogAtStart()) {
- if (strcmp(FLAG_logfile, "-") == 0) {
+ if (strcmp(log_file_name, kLogToConsole) == 0) {
OpenStdout();
- } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
+ } else if (strcmp(log_file_name, kLogToTemporaryFile) == 0) {
OpenTemporaryFile();
} else {
- if (strchr(FLAG_logfile, '%') != NULL ||
- !Isolate::Current()->IsDefaultIsolate()) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- AddIsolateIdIfNeeded(&stream);
- for (const char* p = FLAG_logfile; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 'p':
- stream.Add("%d", OS::GetCurrentProcessId());
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
- }
- } else {
- stream.Put(*p);
- }
- }
- SmartArrayPointer<const char> expanded = stream.ToCString();
- OpenFile(*expanded);
- } else {
- OpenFile(FLAG_logfile);
- }
+ OpenFile(log_file_name);
}
}
}
@@ -147,27 +97,9 @@ void Log::OpenTemporaryFile() {
}
-// Extension added to V8 log file name to get the low-level log name.
-static const char kLowLevelLogExt[] = ".ll";
-
-// File buffer size of the low-level log. We don't use the default to
-// minimize the associated overhead.
-static const int kLowLevelLogBufferSize = 2 * MB;
-
-
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
- if (FLAG_ll_prof) {
- // Open the low-level log file.
- size_t len = strlen(name);
- ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
- OS::MemCopy(ll_name.start(), name, len);
- OS::MemCopy(ll_name.start() + len,
- kLowLevelLogExt, sizeof(kLowLevelLogExt));
- ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
- setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
- }
}
@@ -181,8 +113,6 @@ FILE* Log::Close() {
}
}
output_handle_ = NULL;
- if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
- ll_output_handle_ = NULL;
DeleteArray(message_buffer_);
message_buffer_ = NULL;
@@ -195,15 +125,15 @@ FILE* Log::Close() {
}
-LogMessageBuilder::LogMessageBuilder(Logger* logger)
- : log_(logger->log_),
+Log::MessageBuilder::MessageBuilder(Log* log)
+ : log_(log),
sl(log_->mutex_),
pos_(0) {
ASSERT(log_->message_buffer_ != NULL);
}
-void LogMessageBuilder::Append(const char* format, ...) {
+void Log::MessageBuilder::Append(const char* format, ...) {
Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
va_list args;
@@ -214,7 +144,7 @@ void LogMessageBuilder::Append(const char* format, ...) {
}
-void LogMessageBuilder::AppendVA(const char* format, va_list args) {
+void Log::MessageBuilder::AppendVA(const char* format, va_list args) {
Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
int result = v8::internal::OS::VSNPrintF(buf, format, args);
@@ -229,7 +159,7 @@ void LogMessageBuilder::AppendVA(const char* format, va_list args) {
}
-void LogMessageBuilder::Append(const char c) {
+void Log::MessageBuilder::Append(const char c) {
if (pos_ < Log::kMessageBufferSize) {
log_->message_buffer_[pos_++] = c;
}
@@ -237,7 +167,7 @@ void LogMessageBuilder::Append(const char c) {
}
-void LogMessageBuilder::AppendDoubleQuotedString(const char* string) {
+void Log::MessageBuilder::AppendDoubleQuotedString(const char* string) {
Append('"');
for (const char* p = string; *p != '\0'; p++) {
if (*p == '"') {
@@ -249,7 +179,7 @@ void LogMessageBuilder::AppendDoubleQuotedString(const char* string) {
}
-void LogMessageBuilder::Append(String* str) {
+void Log::MessageBuilder::Append(String* str) {
DisallowHeapAllocation no_gc; // Ensure string stay valid.
int length = str->length();
for (int i = 0; i < length; i++) {
@@ -258,12 +188,24 @@ void LogMessageBuilder::Append(String* str) {
}
-void LogMessageBuilder::AppendAddress(Address addr) {
+void Log::MessageBuilder::AppendAddress(Address addr) {
Append("0x%" V8PRIxPTR, addr);
}
-void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
+ ASSERT(symbol);
+ Append("symbol(");
+ if (!symbol->name()->IsUndefined()) {
+ Append("\"");
+ AppendDetailed(String::cast(symbol->name()), false);
+ Append("\" ");
+ }
+ Append("hash %x)", symbol->Hash());
+}
+
+
+void Log::MessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (str == NULL) return;
DisallowHeapAllocation no_gc; // Ensure string stay valid.
int len = str->length();
@@ -296,7 +238,7 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
}
-void LogMessageBuilder::AppendStringPart(const char* str, int len) {
+void Log::MessageBuilder::AppendStringPart(const char* str, int len) {
if (pos_ + len > Log::kMessageBufferSize) {
len = Log::kMessageBufferSize - pos_;
ASSERT(len >= 0);
@@ -310,7 +252,7 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) {
}
-void LogMessageBuilder::WriteToLogFile() {
+void Log::MessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = log_->WriteToFile(log_->message_buffer_, pos_);
if (written != pos_) {
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index c4995402ca..861a8263b8 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -39,7 +39,7 @@ class Logger;
class Log {
public:
// Performs process-wide initialization.
- void Initialize();
+ void Initialize(const char* log_file_name);
// Disables logging, but preserves acquired resources.
void stop() { is_stopped_ = true; }
@@ -66,6 +66,50 @@ class Log {
// This mode is only used in tests, as temporary files are automatically
// deleted on close and thus can't be accessed afterwards.
static const char* const kLogToTemporaryFile;
+ static const char* const kLogToConsole;
+
+ // Utility class for formatting log messages. It fills the message into the
+ // static buffer in Log.
+ class MessageBuilder BASE_EMBEDDED {
+ public:
+ // Create a message builder starting from position 0.
+ // This acquires the mutex in the log as well.
+ explicit MessageBuilder(Log* log);
+ ~MessageBuilder() { }
+
+ // Append string data to the log message.
+ void Append(const char* format, ...);
+
+ // Append string data to the log message.
+ void AppendVA(const char* format, va_list args);
+
+ // Append a character to the log message.
+ void Append(const char c);
+
+ // Append double quoted string to the log message.
+ void AppendDoubleQuotedString(const char* string);
+
+ // Append a heap string.
+ void Append(String* str);
+
+ // Appends an address.
+ void AppendAddress(Address addr);
+
+ void AppendSymbolName(Symbol* symbol);
+
+ void AppendDetailed(String* str, bool show_impl_info);
+
+ // Append a portion of a string.
+ void AppendStringPart(const char* str, int len);
+
+ // Write the log message to the log file currently opened.
+ void WriteToLogFile();
+
+ private:
+ Log* log_;
+ ScopedLock sl;
+ int pos_;
+ };
private:
explicit Log(Logger* logger);
@@ -96,9 +140,6 @@ class Log {
// destination. mutex_ should be acquired before using output_handle_.
FILE* output_handle_;
- // Used when low-level profiling is active.
- FILE* ll_output_handle_;
-
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
Mutex* mutex_;
@@ -110,51 +151,9 @@ class Log {
Logger* logger_;
friend class Logger;
- friend class LogMessageBuilder;
};
-// Utility class for formatting log messages. It fills the message into the
-// static buffer in Log.
-class LogMessageBuilder BASE_EMBEDDED {
- public:
- // Create a message builder starting from position 0. This acquires the mutex
- // in the log as well.
- explicit LogMessageBuilder(Logger* logger);
- ~LogMessageBuilder() { }
-
- // Append string data to the log message.
- void Append(const char* format, ...);
-
- // Append string data to the log message.
- void AppendVA(const char* format, va_list args);
-
- // Append a character to the log message.
- void Append(const char c);
-
- // Append double quoted string to the log message.
- void AppendDoubleQuotedString(const char* string);
-
- // Append a heap string.
- void Append(String* str);
-
- // Appends an address.
- void AppendAddress(Address addr);
-
- void AppendDetailed(String* str, bool show_impl_info);
-
- // Append a portion of a string.
- void AppendStringPart(const char* str, int len);
-
- // Write the log message to the log file currently opened.
- void WriteToLogFile();
-
- private:
- Log* log_;
- ScopedLock sl;
- int pos_;
-};
-
} } // namespace v8::internal
#endif // V8_LOG_UTILS_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index e95b96332e..d26279bb24 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -35,6 +35,7 @@
#include "deoptimizer.h"
#include "global-handles.h"
#include "log.h"
+#include "log-utils.h"
#include "macro-assembler.h"
#include "platform.h"
#include "runtime-profiler.h"
@@ -45,6 +46,617 @@
namespace v8 {
namespace internal {
+
+#define DECLARE_EVENT(ignore1, name) name,
+static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
+};
+#undef DECLARE_EVENT
+
+
+// ComputeMarker must only be used when SharedFunctionInfo is known.
+static const char* ComputeMarker(Code* code) {
+ switch (code->kind()) {
+ case Code::FUNCTION: return code->optimizable() ? "~" : "";
+ case Code::OPTIMIZED_FUNCTION: return "*";
+ default: return "";
+ }
+}
+
+
+class CodeEventLogger {
+ public:
+ virtual ~CodeEventLogger() { }
+
+ void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ const char* comment);
+ void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ Name* name);
+ void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ int args_count);
+ void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name);
+ void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* source,
+ int line);
+ void RegExpCodeCreateEvent(Code* code, String* source);
+
+ protected:
+ class NameBuffer {
+ public:
+ NameBuffer() { Reset(); }
+
+ void Reset() {
+ utf8_pos_ = 0;
+ }
+
+ void Init(Logger::LogEventsAndTags tag) {
+ Reset();
+ AppendBytes(kLogEventsNames[tag]);
+ AppendByte(':');
+ }
+
+ void AppendName(Name* name) {
+ if (name->IsString()) {
+ AppendString(String::cast(name));
+ } else {
+ Symbol* symbol = Symbol::cast(name);
+ AppendBytes("symbol(");
+ if (!symbol->name()->IsUndefined()) {
+ AppendBytes("\"");
+ AppendString(String::cast(symbol->name()));
+ AppendBytes("\" ");
+ }
+ AppendBytes("hash ");
+ AppendHex(symbol->Hash());
+ AppendByte(')');
+ }
+ }
+
+ void AppendString(String* str) {
+ if (str == NULL) return;
+ int uc16_length = Min(str->length(), kUtf16BufferSize);
+ String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
+ int previous = unibrow::Utf16::kNoPreviousCharacter;
+ for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
+ uc16 c = utf16_buffer[i];
+ if (c <= unibrow::Utf8::kMaxOneByteChar) {
+ utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
+ } else {
+ int char_length = unibrow::Utf8::Length(c, previous);
+ if (utf8_pos_ + char_length > kUtf8BufferSize) break;
+ unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
+ utf8_pos_ += char_length;
+ }
+ previous = c;
+ }
+ }
+
+ void AppendBytes(const char* bytes, int size) {
+ size = Min(size, kUtf8BufferSize - utf8_pos_);
+ OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
+ utf8_pos_ += size;
+ }
+
+ void AppendBytes(const char* bytes) {
+ AppendBytes(bytes, StrLength(bytes));
+ }
+
+ void AppendByte(char c) {
+ if (utf8_pos_ >= kUtf8BufferSize) return;
+ utf8_buffer_[utf8_pos_++] = c;
+ }
+
+ void AppendInt(int n) {
+ Vector<char> buffer(utf8_buffer_ + utf8_pos_,
+ kUtf8BufferSize - utf8_pos_);
+ int size = OS::SNPrintF(buffer, "%d", n);
+ if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+ utf8_pos_ += size;
+ }
+ }
+
+ void AppendHex(uint32_t n) {
+ Vector<char> buffer(utf8_buffer_ + utf8_pos_,
+ kUtf8BufferSize - utf8_pos_);
+ int size = OS::SNPrintF(buffer, "%x", n);
+ if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+ utf8_pos_ += size;
+ }
+ }
+
+ const char* get() { return utf8_buffer_; }
+ int size() const { return utf8_pos_; }
+
+ private:
+ static const int kUtf8BufferSize = 512;
+ static const int kUtf16BufferSize = 128;
+
+ int utf8_pos_;
+ char utf8_buffer_[kUtf8BufferSize];
+ uc16 utf16_buffer[kUtf16BufferSize];
+ };
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ NameBuffer* name_buffer) = 0;
+
+ NameBuffer name_buffer_;
+};
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ const char* comment) {
+ name_buffer_.Init(tag);
+ name_buffer_.AppendBytes(comment);
+ LogRecordedBuffer(code, NULL, &name_buffer_);
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ Name* name) {
+ name_buffer_.Init(tag);
+ name_buffer_.AppendName(name);
+ LogRecordedBuffer(code, NULL, &name_buffer_);
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name) {
+ name_buffer_.Init(tag);
+ name_buffer_.AppendBytes(ComputeMarker(code));
+ name_buffer_.AppendName(name);
+ LogRecordedBuffer(code, shared, &name_buffer_);
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* source, int line) {
+ name_buffer_.Init(tag);
+ name_buffer_.AppendBytes(ComputeMarker(code));
+ name_buffer_.AppendString(shared->DebugName());
+ name_buffer_.AppendByte(' ');
+ if (source->IsString()) {
+ name_buffer_.AppendString(String::cast(source));
+ } else {
+ name_buffer_.AppendBytes("symbol(hash ");
+ name_buffer_.AppendHex(Name::cast(source)->Hash());
+ name_buffer_.AppendByte(')');
+ }
+ name_buffer_.AppendByte(':');
+ name_buffer_.AppendInt(line);
+ LogRecordedBuffer(code, shared, &name_buffer_);
+}
+
+
+void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ int args_count) {
+ name_buffer_.Init(tag);
+ name_buffer_.AppendInt(args_count);
+ LogRecordedBuffer(code, NULL, &name_buffer_);
+}
+
+
+void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
+ name_buffer_.Init(Logger::REG_EXP_TAG);
+ name_buffer_.AppendString(source);
+ LogRecordedBuffer(code, NULL, &name_buffer_);
+}
+
+
+// Low-level logging support.
+class LowLevelLogger : public CodeEventLogger {
+ public:
+ explicit LowLevelLogger(const char* file_name);
+ virtual ~LowLevelLogger();
+
+ void CodeMoveEvent(Address from, Address to);
+ void CodeDeleteEvent(Address from);
+ void SnapshotPositionEvent(Address addr, int pos);
+ void CodeMovingGCEvent();
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ NameBuffer* name_buffer);
+
+ // Low-level profiling event structures.
+ struct CodeCreateStruct {
+ static const char kTag = 'C';
+
+ int32_t name_size;
+ Address code_address;
+ int32_t code_size;
+ };
+
+
+ struct CodeMoveStruct {
+ static const char kTag = 'M';
+
+ Address from_address;
+ Address to_address;
+ };
+
+
+ struct CodeDeleteStruct {
+ static const char kTag = 'D';
+
+ Address address;
+ };
+
+
+ struct SnapshotPositionStruct {
+ static const char kTag = 'P';
+
+ Address address;
+ int32_t position;
+ };
+
+
+ static const char kCodeMovingGCTag = 'G';
+
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kLogExt[];
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ void LogCodeInfo();
+ void LogWriteBytes(const char* bytes, int size);
+
+ template <typename T>
+ void LogWriteStruct(const T& s) {
+ char tag = T::kTag;
+ LogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
+ LogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
+ }
+
+ FILE* ll_output_handle_;
+};
+
+const char LowLevelLogger::kLogExt[] = ".ll";
+
+LowLevelLogger::LowLevelLogger(const char* name)
+ : ll_output_handle_(NULL) {
+ // Open the low-level log file.
+ size_t len = strlen(name);
+ ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
+ OS::MemCopy(ll_name.start(), name, len);
+ OS::MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
+ ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+ setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize);
+
+ LogCodeInfo();
+}
+
+
+LowLevelLogger::~LowLevelLogger() {
+ fclose(ll_output_handle_);
+ ll_output_handle_ = NULL;
+}
+
+
+void LowLevelLogger::LogCodeInfo() {
+#if V8_TARGET_ARCH_IA32
+ const char arch[] = "ia32";
+#elif V8_TARGET_ARCH_X64
+ const char arch[] = "x64";
+#elif V8_TARGET_ARCH_ARM
+ const char arch[] = "arm";
+#elif V8_TARGET_ARCH_MIPS
+ const char arch[] = "mips";
+#else
+ const char arch[] = "unknown";
+#endif
+ LogWriteBytes(arch, sizeof(arch));
+}
+
+
+void LowLevelLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ NameBuffer* name_buffer) {
+ CodeCreateStruct event;
+ event.name_size = name_buffer->size();
+ event.code_address = code->instruction_start();
+ ASSERT(event.code_address == code->address() + Code::kHeaderSize);
+ event.code_size = code->instruction_size();
+ LogWriteStruct(event);
+ LogWriteBytes(name_buffer->get(), name_buffer->size());
+ LogWriteBytes(
+ reinterpret_cast<const char*>(code->instruction_start()),
+ code->instruction_size());
+}
+
+
+void LowLevelLogger::CodeMoveEvent(Address from, Address to) {
+ CodeMoveStruct event;
+ event.from_address = from + Code::kHeaderSize;
+ event.to_address = to + Code::kHeaderSize;
+ LogWriteStruct(event);
+}
+
+
+void LowLevelLogger::CodeDeleteEvent(Address from) {
+ CodeDeleteStruct event;
+ event.address = from + Code::kHeaderSize;
+ LogWriteStruct(event);
+}
+
+
+void LowLevelLogger::SnapshotPositionEvent(Address addr, int pos) {
+ SnapshotPositionStruct event;
+ event.address = addr + Code::kHeaderSize;
+ event.position = pos;
+ LogWriteStruct(event);
+}
+
+
+void LowLevelLogger::LogWriteBytes(const char* bytes, int size) {
+ size_t rv = fwrite(bytes, 1, size, ll_output_handle_);
+ ASSERT(static_cast<size_t>(size) == rv);
+ USE(rv);
+}
+
+
+void LowLevelLogger::CodeMovingGCEvent() {
+ const char tag = kCodeMovingGCTag;
+
+ LogWriteBytes(&tag, sizeof(tag));
+}
+
+
+#define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
+
+
+class CodeAddressMap: public CodeEventLogger {
+ public:
+ CodeAddressMap() { }
+ virtual ~CodeAddressMap() { }
+
+ void CodeMoveEvent(Address from, Address to) {
+ address_to_name_map_.Move(from, to);
+ }
+
+ void CodeDeleteEvent(Address from) {
+ address_to_name_map_.Remove(from);
+ }
+
+ const char* Lookup(Address address) {
+ return address_to_name_map_.Lookup(address);
+ }
+
+ private:
+ class NameMap {
+ public:
+ NameMap() : impl_(&PointerEquals) {}
+
+ ~NameMap() {
+ for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+ DeleteArray(static_cast<const char*>(p->value));
+ }
+ }
+
+ void Insert(Address code_address, const char* name, int name_size) {
+ HashMap::Entry* entry = FindOrCreateEntry(code_address);
+ if (entry->value == NULL) {
+ entry->value = CopyName(name, name_size);
+ }
+ }
+
+ const char* Lookup(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+ }
+
+ void Remove(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ if (entry != NULL) {
+ DeleteArray(static_cast<char*>(entry->value));
+ RemoveEntry(entry);
+ }
+ }
+
+ void Move(Address from, Address to) {
+ if (from == to) return;
+ HashMap::Entry* from_entry = FindEntry(from);
+ ASSERT(from_entry != NULL);
+ void* value = from_entry->value;
+ RemoveEntry(from_entry);
+ HashMap::Entry* to_entry = FindOrCreateEntry(to);
+ ASSERT(to_entry->value == NULL);
+ to_entry->value = value;
+ }
+
+ private:
+ static bool PointerEquals(void* lhs, void* rhs) {
+ return lhs == rhs;
+ }
+
+ static char* CopyName(const char* name, int name_size) {
+ char* result = NewArray<char>(name_size + 1);
+ for (int i = 0; i < name_size; ++i) {
+ char c = name[i];
+ if (c == '\0') c = ' ';
+ result[i] = c;
+ }
+ result[name_size] = '\0';
+ return result;
+ }
+
+ HashMap::Entry* FindOrCreateEntry(Address code_address) {
+ return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
+ }
+
+ HashMap::Entry* FindEntry(Address code_address) {
+ return impl_.Lookup(code_address,
+ ComputePointerHash(code_address),
+ false);
+ }
+
+ void RemoveEntry(HashMap::Entry* entry) {
+ impl_.Remove(entry->key, entry->hash);
+ }
+
+ HashMap impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(NameMap);
+ };
+
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ NameBuffer* name_buffer) {
+ address_to_name_map_.Insert(code->address(),
+ name_buffer->get(),
+ name_buffer->size());
+ }
+
+ NameMap address_to_name_map_;
+};
+
+
+#define CODE_ADDRESS_MAP_LOG(Call)\
+ if (Serializer::enabled()) code_address_map_->Call;
+
+
+class JitLogger : public CodeEventLogger {
+ public:
+ explicit JitLogger(JitCodeEventHandler code_event_handler);
+
+ void CodeMovedEvent(Address from, Address to);
+ void CodeDeleteEvent(Address from);
+ void AddCodeLinePosInfoEvent(
+ void* jit_handler_data,
+ int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_type);
+ void* StartCodePosInfoEvent();
+ void EndCodePosInfoEvent(Code* code, void* jit_handler_data);
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ CodeEventLogger::NameBuffer* name_buffer);
+
+ JitCodeEventHandler code_event_handler_;
+};
+
+#define JIT_LOG(Call) if (jit_logger_) jit_logger_->Call;
+
+
+JitLogger::JitLogger(JitCodeEventHandler code_event_handler)
+ : code_event_handler_(code_event_handler) {
+}
+
+
+void JitLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ CodeEventLogger::NameBuffer* name_buffer) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = code->instruction_start();
+ event.code_len = code->instruction_size();
+ Handle<Script> script_handle;
+ if (shared && shared->script()->IsScript()) {
+ script_handle = Handle<Script>(Script::cast(shared->script()));
+ }
+ event.script = ToApiHandle<v8::Script>(script_handle);
+ event.name.str = name_buffer->get();
+ event.name.len = name_buffer->size();
+ code_event_handler_(&event);
+}
+
+
+void JitLogger::CodeMovedEvent(Address from, Address to) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_MOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ // Calculate the header size.
+ const size_t header_size =
+ from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
+
+ // Calculate the new start address of the instructions.
+ event.new_code_start =
+ reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
+
+ code_event_handler_(&event);
+}
+
+
+void JitLogger::CodeDeleteEvent(Address from) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_REMOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ code_event_handler_(&event);
+}
+
+void JitLogger::AddCodeLinePosInfoEvent(
+ void* jit_handler_data,
+ int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_type) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
+ event.user_data = jit_handler_data;
+ event.line_info.offset = pc_offset;
+ event.line_info.pos = position;
+ event.line_info.position_type = position_type;
+
+ code_event_handler_(&event);
+}
+
+
+void* JitLogger::StartCodePosInfoEvent() {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
+
+ code_event_handler_(&event);
+ return event.user_data;
+}
+
+
+void JitLogger::EndCodePosInfoEvent(Code* code, void* jit_handler_data) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
+ event.code_start = code->instruction_start();
+ event.user_data = jit_handler_data;
+
+ code_event_handler_(&event);
+}
+
+
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
@@ -210,193 +822,10 @@ void Profiler::Run() {
}
-// Low-level profiling event structures.
-
-struct LowLevelCodeCreateStruct {
- static const char kTag = 'C';
-
- int32_t name_size;
- Address code_address;
- int32_t code_size;
-};
-
-
-struct LowLevelCodeMoveStruct {
- static const char kTag = 'M';
-
- Address from_address;
- Address to_address;
-};
-
-
-struct LowLevelCodeDeleteStruct {
- static const char kTag = 'D';
-
- Address address;
-};
-
-
-struct LowLevelSnapshotPositionStruct {
- static const char kTag = 'P';
-
- Address address;
- int32_t position;
-};
-
-
-static const char kCodeMovingGCTag = 'G';
-
-
//
// Logger class implementation.
//
-class Logger::NameMap {
- public:
- NameMap() : impl_(&PointerEquals) {}
-
- ~NameMap() {
- for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
- DeleteArray(static_cast<const char*>(p->value));
- }
- }
-
- void Insert(Address code_address, const char* name, int name_size) {
- HashMap::Entry* entry = FindOrCreateEntry(code_address);
- if (entry->value == NULL) {
- entry->value = CopyName(name, name_size);
- }
- }
-
- const char* Lookup(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
- }
-
- void Remove(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) {
- DeleteArray(static_cast<char*>(entry->value));
- RemoveEntry(entry);
- }
- }
-
- void Move(Address from, Address to) {
- if (from == to) return;
- HashMap::Entry* from_entry = FindEntry(from);
- ASSERT(from_entry != NULL);
- void* value = from_entry->value;
- RemoveEntry(from_entry);
- HashMap::Entry* to_entry = FindOrCreateEntry(to);
- ASSERT(to_entry->value == NULL);
- to_entry->value = value;
- }
-
- private:
- static bool PointerEquals(void* lhs, void* rhs) {
- return lhs == rhs;
- }
-
- static char* CopyName(const char* name, int name_size) {
- char* result = NewArray<char>(name_size + 1);
- for (int i = 0; i < name_size; ++i) {
- char c = name[i];
- if (c == '\0') c = ' ';
- result[i] = c;
- }
- result[name_size] = '\0';
- return result;
- }
-
- HashMap::Entry* FindOrCreateEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
- }
-
- HashMap::Entry* FindEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
- }
-
- void RemoveEntry(HashMap::Entry* entry) {
- impl_.Remove(entry->key, entry->hash);
- }
-
- HashMap impl_;
-
- DISALLOW_COPY_AND_ASSIGN(NameMap);
-};
-
-
-class Logger::NameBuffer {
- public:
- NameBuffer() { Reset(); }
-
- void Reset() {
- utf8_pos_ = 0;
- }
-
- void AppendString(String* str) {
- if (str == NULL) return;
- int uc16_length = Min(str->length(), kUtf16BufferSize);
- String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
- uc16 c = utf16_buffer[i];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
- } else {
- int char_length = unibrow::Utf8::Length(c, previous);
- if (utf8_pos_ + char_length > kUtf8BufferSize) break;
- unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
- utf8_pos_ += char_length;
- }
- previous = c;
- }
- }
-
- void AppendBytes(const char* bytes, int size) {
- size = Min(size, kUtf8BufferSize - utf8_pos_);
- OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
- utf8_pos_ += size;
- }
-
- void AppendBytes(const char* bytes) {
- AppendBytes(bytes, StrLength(bytes));
- }
-
- void AppendByte(char c) {
- if (utf8_pos_ >= kUtf8BufferSize) return;
- utf8_buffer_[utf8_pos_++] = c;
- }
-
- void AppendInt(int n) {
- Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
- int size = OS::SNPrintF(buffer, "%d", n);
- if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
- utf8_pos_ += size;
- }
- }
-
- void AppendHex(uint32_t n) {
- Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
- int size = OS::SNPrintF(buffer, "%x", n);
- if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
- utf8_pos_ += size;
- }
- }
-
- const char* get() { return utf8_buffer_; }
- int size() const { return utf8_pos_; }
-
- private:
- static const int kUtf8BufferSize = 512;
- static const int kUtf16BufferSize = 128;
-
- int utf8_pos_;
- char utf8_buffer_[kUtf8BufferSize];
- uc16 utf16_buffer[kUtf16BufferSize];
-};
-
-
Logger::Logger(Isolate* isolate)
: isolate_(isolate),
ticker_(NULL),
@@ -405,10 +834,10 @@ Logger::Logger(Isolate* isolate)
logging_nesting_(0),
cpu_profiler_nesting_(0),
log_(new Log(this)),
- name_buffer_(new NameBuffer),
- address_to_name_map_(NULL),
+ ll_logger_(NULL),
+ jit_logger_(NULL),
+ code_address_map_(new CodeAddressMap),
is_initialized_(false),
- code_event_handler_(NULL),
last_address_(NULL),
prev_sp_(NULL),
prev_function_(NULL),
@@ -419,107 +848,14 @@ Logger::Logger(Isolate* isolate)
Logger::~Logger() {
- delete address_to_name_map_;
- delete name_buffer_;
+ delete code_address_map_;
delete log_;
}
-void Logger::IssueCodeAddedEvent(Code* code,
- Script* script,
- const char* name,
- size_t name_len) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = code->instruction_start();
- event.code_len = code->instruction_size();
- Handle<Script> script_handle =
- script != NULL ? Handle<Script>(script) : Handle<Script>();
- event.script = ToApiHandle<v8::Script>(script_handle);
- event.name.str = name;
- event.name.len = name_len;
-
- code_event_handler_(&event);
-}
-
-
-void Logger::IssueCodeMovedEvent(Address from, Address to) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_MOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
-
- // Calculate the header size.
- const size_t header_size =
- from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
-
- // Calculate the new start address of the instructions.
- event.new_code_start =
- reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
-
- code_event_handler_(&event);
-}
-
-
-void Logger::IssueCodeRemovedEvent(Address from) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_REMOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
-
- code_event_handler_(&event);
-}
-
-void Logger::IssueAddCodeLinePosInfoEvent(
- void* jit_handler_data,
- int pc_offset,
- int position,
- JitCodeEvent::PositionType position_type) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
- event.user_data = jit_handler_data;
- event.line_info.offset = pc_offset;
- event.line_info.pos = position;
- event.line_info.position_type = position_type;
-
- code_event_handler_(&event);
-}
-
-void* Logger::IssueStartCodePosInfoEvent() {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
-
- code_event_handler_(&event);
- return event.user_data;
-}
-
-void Logger::IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
- event.code_start = code->instruction_start();
- event.user_data = jit_handler_data;
-
- code_event_handler_(&event);
-}
-
-#define DECLARE_EVENT(ignore1, name) name,
-static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
-};
-#undef DECLARE_EVENT
-
-
void Logger::ProfilerBeginEvent() {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
msg.WriteToLogFile();
}
@@ -532,7 +868,7 @@ void Logger::StringEvent(const char* name, const char* value) {
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
}
@@ -550,7 +886,7 @@ void Logger::IntPtrTEvent(const char* name, intptr_t value) {
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
}
@@ -558,7 +894,7 @@ void Logger::UncheckedIntEvent(const char* name, int value) {
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
@@ -566,7 +902,7 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
void Logger::HandleEvent(const char* name, Object** location) {
if (!log_->IsEnabled() || !FLAG_log_handles) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
}
@@ -577,7 +913,7 @@ void Logger::HandleEvent(const char* name, Object** location) {
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
ASSERT(log_->IsEnabled() && FLAG_log_api);
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
va_list ap;
va_start(ap, format);
msg.AppendVA(format, ap);
@@ -616,7 +952,7 @@ void Logger::SharedLibraryEvent(const char* library_path,
uintptr_t start,
uintptr_t end) {
if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
@@ -629,7 +965,7 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
uintptr_t start,
uintptr_t end) {
if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
@@ -641,7 +977,7 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
void Logger::CodeDeoptEvent(Code* code) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
msg.WriteToLogFile();
@@ -651,7 +987,7 @@ void Logger::CodeDeoptEvent(Code* code) {
void Logger::TimerEvent(StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
: "timer-event-end,\"%s\",%ld\n";
@@ -692,7 +1028,7 @@ const char* Logger::TimerEventScope::v8_external = "V8.External";
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
Handle<Object> source = GetProperty(regexp, "source");
if (!source->IsString()) {
@@ -733,7 +1069,7 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
if (!log_->IsEnabled() || !FLAG_log_regexp) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
msg.Append(in_cache ? ",hit\n" : ",miss\n");
@@ -745,7 +1081,7 @@ void Logger::LogRuntime(Vector<const char> format,
JSArray* args) {
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
HandleScope scope(isolate_);
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
if (c == '%' && i <= format.length() - 2) {
@@ -828,6 +1164,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
}
+
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
@@ -845,7 +1182,7 @@ void Logger::ApiEntryCall(const char* name) {
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
@@ -854,7 +1191,7 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
}
@@ -869,10 +1206,11 @@ void Logger::DeleteEventStatic(const char* name, void* object) {
Isolate::Current()->logger()->DeleteEvent(name, object);
}
+
void Logger::CallbackEventInternal(const char* prefix, Name* name,
Address entry_point) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%s,-2,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
@@ -914,56 +1252,12 @@ void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
}
-void Logger::AppendName(Name* name) {
- if (name->IsString()) {
- name_buffer_->AppendString(String::cast(name));
- } else {
- Symbol* symbol = Symbol::cast(name);
- name_buffer_->AppendBytes("symbol(");
- if (!symbol->name()->IsUndefined()) {
- name_buffer_->AppendBytes("\"");
- name_buffer_->AppendString(String::cast(symbol->name()));
- name_buffer_->AppendBytes("\" ");
- }
- name_buffer_->AppendBytes("hash ");
- name_buffer_->AppendHex(symbol->Hash());
- name_buffer_->AppendByte(')');
- }
-}
-
-
-void Logger::InitNameBuffer(LogEventsAndTags tag) {
- name_buffer_->Reset();
- name_buffer_->AppendBytes(kLogEventsNames[tag]);
- name_buffer_->AppendByte(':');
-}
-
-
-void Logger::LogRecordedBuffer(Code* code, SharedFunctionInfo* shared) {
- if (code_event_handler_ != NULL) {
- Script* script = shared && shared->script()->IsScript() ?
- Script::cast(shared->script()) : NULL;
- IssueCodeAddedEvent(code,
- script,
- name_buffer_->get(),
- name_buffer_->size());
- }
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) {
- LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
- }
- if (Serializer::enabled()) {
- RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
- }
-}
-
-
-void Logger::AppendCodeCreateHeader(LogMessageBuilder* msg,
- LogEventsAndTags tag,
- Code* code) {
+static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
+ Logger::LogEventsAndTags tag,
+ Code* code) {
ASSERT(msg);
msg->Append("%s,%s,%d,",
- kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[Logger::CODE_CREATION_EVENT],
kLogEventsNames[tag],
code->kind());
msg->AppendAddress(code->address());
@@ -971,31 +1265,17 @@ void Logger::AppendCodeCreateHeader(LogMessageBuilder* msg,
}
-void Logger::AppendSymbolName(LogMessageBuilder* msg,
- Symbol* symbol) {
- ASSERT(symbol);
- msg->Append("symbol(");
- if (!symbol->name()->IsUndefined()) {
- msg->Append("\"");
- msg->AppendDetailed(String::cast(symbol->name()), false);
- msg->Append("\" ");
- }
- msg->Append("hash %x)", symbol->Hash());
-}
-
-
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- InitNameBuffer(tag);
- name_buffer_->AppendBytes(comment);
- LogRecordedBuffer(code, NULL);
- }
+
+ JIT_LOG(CodeCreateEvent(tag, code, comment));
+ LL_LOG(CodeCreateEvent(tag, code, comment));
+ CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, comment));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
msg.AppendDoubleQuotedString(comment);
msg.Append('\n');
@@ -1007,63 +1287,50 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
Name* name) {
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- InitNameBuffer(tag);
- AppendName(name);
- LogRecordedBuffer(code, NULL);
- }
+
+ JIT_LOG(CodeCreateEvent(tag, code, name));
+ LL_LOG(CodeCreateEvent(tag, code, name));
+ CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, name));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
if (name->IsString()) {
msg.Append('"');
msg.AppendDetailed(String::cast(name), false);
msg.Append('"');
} else {
- AppendSymbolName(&msg, Symbol::cast(name));
+ msg.AppendSymbolName(Symbol::cast(name));
}
msg.Append('\n');
msg.WriteToLogFile();
}
-// ComputeMarker must only be used when SharedFunctionInfo is known.
-static const char* ComputeMarker(Code* code) {
- switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
- }
-}
-
-
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* name) {
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- InitNameBuffer(tag);
- name_buffer_->AppendBytes(ComputeMarker(code));
- AppendName(name);
- LogRecordedBuffer(code, shared);
- }
+
+ JIT_LOG(CodeCreateEvent(tag, code, shared, info, name));
+ LL_LOG(CodeCreateEvent(tag, code, shared, info, name));
+ CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, shared, info, name));
if (!FLAG_log_code || !log_->IsEnabled()) return;
if (code == isolate_->builtins()->builtin(
Builtins::kLazyCompile))
return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
if (name->IsString()) {
SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\"", *str);
} else {
- AppendSymbolName(&msg, Symbol::cast(name));
+ msg.AppendSymbolName(Symbol::cast(name));
}
msg.Append(',');
msg.AppendAddress(shared->address());
@@ -1082,25 +1349,13 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
CompilationInfo* info,
Name* source, int line) {
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- InitNameBuffer(tag);
- name_buffer_->AppendBytes(ComputeMarker(code));
- name_buffer_->AppendString(shared->DebugName());
- name_buffer_->AppendByte(' ');
- if (source->IsString()) {
- name_buffer_->AppendString(String::cast(source));
- } else {
- name_buffer_->AppendBytes("symbol(hash ");
- name_buffer_->AppendHex(Name::cast(source)->Hash());
- name_buffer_->AppendByte(')');
- }
- name_buffer_->AppendByte(':');
- name_buffer_->AppendInt(line);
- LogRecordedBuffer(code, shared);
- }
+
+ JIT_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
+ LL_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
+ CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -1110,7 +1365,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s", *sourcestr);
} else {
- AppendSymbolName(&msg, Symbol::cast(source));
+ msg.AppendSymbolName(Symbol::cast(source));
}
msg.Append(":%d\",", line);
msg.AppendAddress(shared->address());
@@ -1120,16 +1375,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ int args_count) {
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- InitNameBuffer(tag);
- name_buffer_->AppendInt(args_count);
- LogRecordedBuffer(code, NULL);
- }
+
+ JIT_LOG(CodeCreateEvent(tag, code, args_count));
+ LL_LOG(CodeCreateEvent(tag, code, args_count));
+ CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, args_count));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
msg.Append("\"args_count: %d\"", args_count);
msg.Append('\n');
@@ -1139,21 +1395,20 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
void Logger::CodeMovingGCEvent() {
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
- LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
+ LL_LOG(CodeMovingGCEvent());
OS::SignalCodeMovingGC();
}
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
if (!is_logging_code_events()) return;
- if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
- InitNameBuffer(REG_EXP_TAG);
- name_buffer_->AppendString(source);
- LogRecordedBuffer(code, NULL);
- }
+
+ JIT_LOG(RegExpCodeCreateEvent(code, source));
+ LL_LOG(RegExpCodeCreateEvent(code, source));
+ CODE_ADDRESS_MAP_LOG(RegExpCodeCreateEvent(code, source));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, REG_EXP_TAG, code);
msg.Append('"');
msg.AppendDetailed(source, false);
@@ -1164,75 +1419,76 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
- if (code_event_handler_ != NULL) IssueCodeMovedEvent(from, to);
+ JIT_LOG(CodeMovedEvent(from, to));
if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- address_to_name_map_->Move(from, to);
- }
+ LL_LOG(CodeMoveEvent(from, to));
+ CODE_ADDRESS_MAP_LOG(CodeMoveEvent(from, to));
MoveEventInternal(CODE_MOVE_EVENT, from, to);
}
void Logger::CodeDeleteEvent(Address from) {
- if (code_event_handler_ != NULL) IssueCodeRemovedEvent(from);
+ JIT_LOG(CodeDeleteEvent(from));
if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- address_to_name_map_->Remove(from);
- }
- DeleteEventInternal(CODE_DELETE_EVENT, from);
+ LL_LOG(CodeDeleteEvent(from));
+ CODE_ADDRESS_MAP_LOG(CodeDeleteEvent(from));
+
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
+ msg.AppendAddress(from);
+ msg.Append('\n');
+ msg.WriteToLogFile();
}
+
void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
int pc_offset,
int position) {
- if (code_event_handler_ != NULL) {
- IssueAddCodeLinePosInfoEvent(jit_handler_data,
- pc_offset,
- position,
- JitCodeEvent::POSITION);
- }
+ JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
+ pc_offset,
+ position,
+ JitCodeEvent::POSITION));
}
+
void Logger::CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
int pc_offset,
int position) {
- if (code_event_handler_ != NULL) {
- IssueAddCodeLinePosInfoEvent(jit_handler_data,
- pc_offset,
- position,
- JitCodeEvent::STATEMENT_POSITION);
- }
+ JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
+ pc_offset,
+ position,
+ JitCodeEvent::STATEMENT_POSITION));
}
+
void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) {
- if (code_event_handler_ != NULL) {
- pos_recorder->AttachJITHandlerData(IssueStartCodePosInfoEvent());
+ if (jit_logger_ != NULL) {
+ pos_recorder->AttachJITHandlerData(jit_logger_->StartCodePosInfoEvent());
}
}
+
void Logger::CodeEndLinePosInfoRecordEvent(Code* code,
void* jit_handler_data) {
- if (code_event_handler_ != NULL) {
- IssueEndCodePosInfoEvent(code, jit_handler_data);
- }
+ JIT_LOG(EndCodePosInfoEvent(code, jit_handler_data));
}
+
void Logger::SnapshotPositionEvent(Address addr, int pos) {
if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
- if (Serializer::enabled() && address_to_name_map_ != NULL) {
- const char* code_name = address_to_name_map_->Lookup(addr);
+ LL_LOG(SnapshotPositionEvent(addr, pos));
+ if (Serializer::enabled()) {
+ const char* code_name = code_address_map_->Lookup(addr);
if (code_name == NULL) return; // Not a code object.
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
msg.AppendDoubleQuotedString(code_name);
msg.Append("\n");
msg.WriteToLogFile();
}
if (!FLAG_log_snapshot_positions) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
msg.AppendAddress(addr);
msg.Append(",%d", pos);
@@ -1250,7 +1506,7 @@ void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append(',');
@@ -1260,19 +1516,9 @@ void Logger::MoveEventInternal(LogEventsAndTags event,
}
-void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!log_->IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg(this);
- msg.Append("%s,", kLogEventsNames[event]);
- msg.AppendAddress(from);
- msg.Append('\n');
- msg.WriteToLogFile();
-}
-
-
void Logger::ResourceEvent(const char* name, const char* tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,%s,", name, tag);
uint32_t sec, usec;
@@ -1288,7 +1534,7 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
void Logger::SuspectReadEvent(Name* name, Object* obj) {
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
: isolate_->heap()->empty_string();
@@ -1300,7 +1546,7 @@ void Logger::SuspectReadEvent(Name* name, Object* obj) {
msg.Append(String::cast(name));
msg.Append('"');
} else {
- AppendSymbolName(&msg, Symbol::cast(name));
+ msg.AppendSymbolName(Symbol::cast(name));
}
msg.Append('\n');
msg.WriteToLogFile();
@@ -1309,7 +1555,7 @@ void Logger::SuspectReadEvent(Name* name, Object* obj) {
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
@@ -1320,7 +1566,7 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
}
@@ -1328,7 +1574,7 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
}
@@ -1336,7 +1582,7 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
void Logger::DebugTag(const char* call_site_tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
}
@@ -1349,7 +1595,7 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
s.AddCharacter(static_cast<char>(parameter[i]));
}
char* parameter_string = s.Finalize();
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("debug-queue-event,%s,%15.3f,%s\n",
event_type,
OS::TimeCurrentMillis(),
@@ -1361,7 +1607,7 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!log_->IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg(this);
+ Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
@@ -1559,84 +1805,6 @@ void Logger::LogCodeObject(Object* object) {
}
-void Logger::LogCodeInfo() {
- if (!log_->IsEnabled() || !FLAG_ll_prof) return;
-#if V8_TARGET_ARCH_IA32
- const char arch[] = "ia32";
-#elif V8_TARGET_ARCH_X64
- const char arch[] = "x64";
-#elif V8_TARGET_ARCH_ARM
- const char arch[] = "arm";
-#elif V8_TARGET_ARCH_MIPS
- const char arch[] = "mips";
-#else
- const char arch[] = "unknown";
-#endif
- LowLevelLogWriteBytes(arch, sizeof(arch));
-}
-
-
-void Logger::RegisterSnapshotCodeName(Code* code,
- const char* name,
- int name_size) {
- ASSERT(Serializer::enabled());
- if (address_to_name_map_ == NULL) {
- address_to_name_map_ = new NameMap;
- }
- address_to_name_map_->Insert(code->address(), name, name_size);
-}
-
-
-void Logger::LowLevelCodeCreateEvent(Code* code,
- const char* name,
- int name_size) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeCreateStruct event;
- event.name_size = name_size;
- event.code_address = code->instruction_start();
- ASSERT(event.code_address == code->address() + Code::kHeaderSize);
- event.code_size = code->instruction_size();
- LowLevelLogWriteStruct(event);
- LowLevelLogWriteBytes(name, name_size);
- LowLevelLogWriteBytes(
- reinterpret_cast<const char*>(code->instruction_start()),
- code->instruction_size());
-}
-
-
-void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeMoveStruct event;
- event.from_address = from + Code::kHeaderSize;
- event.to_address = to + Code::kHeaderSize;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelCodeDeleteEvent(Address from) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelCodeDeleteStruct event;
- event.address = from + Code::kHeaderSize;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
- if (log_->ll_output_handle_ == NULL) return;
- LowLevelSnapshotPositionStruct event;
- event.address = addr + Code::kHeaderSize;
- event.position = pos;
- LowLevelLogWriteStruct(event);
-}
-
-
-void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
- size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
- ASSERT(static_cast<size_t>(size) == rv);
- USE(rv);
-}
-
-
void Logger::LogCodeObjects() {
Heap* heap = isolate_->heap();
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
@@ -1654,15 +1822,15 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
+ int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
if (script->name()->IsString()) {
Handle<String> script_name(String::cast(script->name()));
- int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code, *shared, NULL,
- *script_name, line_num + 1));
+ *script_name, line_num));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(isolate_,
@@ -1674,7 +1842,8 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared, NULL, *func_name));
+ *code, *shared, NULL,
+ isolate_->heap()->empty_string(), line_num));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1738,6 +1907,63 @@ void Logger::LogAccessorCallbacks() {
}
+static void AddIsolateIdIfNeeded(StringStream* stream) {
+ Isolate* isolate = Isolate::Current();
+ if (isolate->IsDefaultIsolate()) return;
+ stream->Add("isolate-%p-", isolate);
+}
+
+
+static SmartArrayPointer<const char> PrepareLogFileName(const char* file_name) {
+ if (strchr(file_name, '%') != NULL ||
+ !Isolate::Current()->IsDefaultIsolate()) {
+ // If there's a '%' in the log file name we have to expand
+ // placeholders.
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ AddIsolateIdIfNeeded(&stream);
+ for (const char* p = file_name; *p; p++) {
+ if (*p == '%') {
+ p++;
+ switch (*p) {
+ case '\0':
+ // If there's a % at the end of the string we back up
+ // one character so we can escape the loop properly.
+ p--;
+ break;
+ case 'p':
+ stream.Add("%d", OS::GetCurrentProcessId());
+ break;
+ case 't': {
+ // %t expands to the current time in milliseconds.
+ double time = OS::TimeCurrentMillis();
+ stream.Add("%.0f", FmtElm(time));
+ break;
+ }
+ case '%':
+ // %% expands (contracts really) to %.
+ stream.Put('%');
+ break;
+ default:
+ // All other %'s expand to themselves.
+ stream.Put('%');
+ stream.Put(*p);
+ break;
+ }
+ } else {
+ stream.Put(*p);
+ }
+ }
+ return SmartArrayPointer<const char>(stream.ToCString());
+ }
+ int length = StrLength(file_name);
+ char* str = NewArray<char>(length + 1);
+ OS::MemCopy(str, file_name, length);
+ str[length] = '\0';
+ return SmartArrayPointer<const char>(str);
+}
+
+
bool Logger::SetUp(Isolate* isolate) {
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
@@ -1754,9 +1980,13 @@ bool Logger::SetUp(Isolate* isolate) {
FLAG_prof_auto = false;
}
- log_->Initialize();
+ SmartArrayPointer<const char> log_file_name =
+ PrepareLogFileName(FLAG_logfile);
+ log_->Initialize(*log_file_name);
- if (FLAG_ll_prof) LogCodeInfo();
+ if (FLAG_ll_prof) {
+ ll_logger_ = new LowLevelLogger(*log_file_name);
+ }
ticker_ = new Ticker(isolate, kSamplingIntervalMs);
@@ -1784,9 +2014,16 @@ bool Logger::SetUp(Isolate* isolate) {
void Logger::SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler) {
- code_event_handler_ = event_handler;
+ if (jit_logger_) {
+ delete jit_logger_;
+ jit_logger_ = NULL;
+ }
+
+ if (event_handler) {
+ jit_logger_ = new JitLogger(event_handler);
+ }
- if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
+ if (jit_logger_ != NULL && (options & kJitCodeEventEnumExisting)) {
HandleScope scope(isolate_);
LogCodeObjects();
LogCompiledFunctions();
@@ -1813,6 +2050,16 @@ FILE* Logger::TearDown() {
delete ticker_;
ticker_ = NULL;
+ if (ll_logger_) {
+ delete ll_logger_;
+ ll_logger_ = NULL;
+ }
+
+ if (jit_logger_) {
+ delete jit_logger_;
+ jit_logger_ = NULL;
+ }
+
return log_->Close();
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 07ecd0efe7..194ad9d015 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -31,7 +31,6 @@
#include "allocation.h"
#include "objects.h"
#include "platform.h"
-#include "log-utils.h"
namespace v8 {
namespace internal {
@@ -71,15 +70,16 @@ namespace internal {
// tick profiler requires code events, so --prof implies --log-code.
// Forward declarations.
-class LogMessageBuilder;
+class CodeAddressMap;
+class CompilationInfo;
+class CpuProfiler;
+class Isolate;
+class Log;
+class PositionsRecorder;
class Profiler;
class Semaphore;
-struct TickSample;
class Ticker;
-class Isolate;
-class PositionsRecorder;
-class CpuProfiler;
-class CompilationInfo;
+struct TickSample;
#undef LOG
#define LOG(isolate, Call) \
@@ -151,6 +151,8 @@ class CompilationInfo;
// original tags when writing to the log.
+class JitLogger;
+class LowLevelLogger;
class Sampler;
@@ -336,12 +338,8 @@ class Logger {
return logging_nesting_ > 0;
}
- bool is_code_event_handler_enabled() {
- return code_event_handler_ != NULL;
- }
-
bool is_logging_code_events() {
- return is_logging() || code_event_handler_ != NULL;
+ return is_logging() || jit_logger_ != NULL;
}
// Pause/Resume collection of profiling data.
@@ -376,25 +374,9 @@ class Logger {
void LogFailure();
private:
- class NameBuffer;
- class NameMap;
-
explicit Logger(Isolate* isolate);
~Logger();
- // Issue code notifications.
- void IssueCodeAddedEvent(Code* code,
- Script* script,
- const char* name,
- size_t name_len);
- void IssueCodeMovedEvent(Address from, Address to);
- void IssueCodeRemovedEvent(Address from);
- void IssueAddCodeLinePosInfoEvent(void* jit_handler_data,
- int pc_offset,
- int position,
- JitCodeEvent::PositionType position_Type);
- void* IssueStartCodePosInfoEvent();
- void IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data);
// Emits the profiler's first message.
void ProfilerBeginEvent();
@@ -406,9 +388,6 @@ class Logger {
// Internal configurable move event.
void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
- // Internal configurable move event.
- void DeleteEventInternal(LogEventsAndTags event, Address from);
-
// Emits the source code of a regexp. Used by regexp events.
void LogRegExpSource(Handle<JSRegExp> regexp);
@@ -418,42 +397,6 @@ class Logger {
// Helper method. It resets name_buffer_ and add tag name into it.
void InitNameBuffer(LogEventsAndTags tag);
- // Helper method. It push recorded buffer into different handlers.
- void LogRecordedBuffer(Code*, SharedFunctionInfo*);
-
- // Helper method. It dumps name into name_buffer_.
- void AppendName(Name* name);
-
- // Appends standard code header.
- void AppendCodeCreateHeader(LogMessageBuilder*, LogEventsAndTags, Code*);
-
- // Appends symbol for the name.
- void AppendSymbolName(LogMessageBuilder*, Symbol*);
-
- // Emits general information about generated code.
- void LogCodeInfo();
-
- void RegisterSnapshotCodeName(Code* code, const char* name, int name_size);
-
- // Low-level logging support.
-
- void LowLevelCodeCreateEvent(Code* code, const char* name, int name_size);
-
- void LowLevelCodeMoveEvent(Address from, Address to);
-
- void LowLevelCodeDeleteEvent(Address from);
-
- void LowLevelSnapshotPositionEvent(Address addr, int pos);
-
- void LowLevelLogWriteBytes(const char* bytes, int size);
-
- template <typename T>
- void LowLevelLogWriteStruct(const T& s) {
- char tag = T::kTag;
- LowLevelLogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
- LowLevelLogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
- }
-
// Emits a profiler tick event. Used by the profiler thread.
void TickEvent(TickSample* sample, bool overflow);
@@ -483,7 +426,6 @@ class Logger {
// private members.
friend class EventLog;
friend class Isolate;
- friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
template <StateTag Tag> friend class VMState;
@@ -495,18 +437,14 @@ class Logger {
int cpu_profiler_nesting_;
Log* log_;
-
- NameBuffer* name_buffer_;
-
- NameMap* address_to_name_map_;
+ LowLevelLogger* ll_logger_;
+ JitLogger* jit_logger_;
+ CodeAddressMap* code_address_map_;
// Guards against multiple calls to TearDown() that can happen in some tests.
// 'true' between SetUp() and TearDown().
bool is_initialized_;
- // The code event handler - if any.
- JitCodeEventHandler code_event_handler_;
-
// Support for 'incremental addresses' in compressed logs:
// LogMessageBuilder::AppendAddress(Address addr)
Address last_address_;
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index e442b4413a..d50231dcef 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -107,6 +107,7 @@ macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
+macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
@@ -145,6 +146,7 @@ const kBoundArgumentsStartIndex = 2;
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
+macro TO_INTEGER_FOR_SIDE_EFFECT(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToNumber(arg));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 638968506d..95f673c2a4 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -73,7 +73,7 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_maps_(NULL) { }
+ encountered_weak_collections_(NULL) { }
#ifdef VERIFY_HEAP
@@ -396,14 +396,14 @@ void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
ASSERT(state_ == PREPARE_GC);
- ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
+ ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
if (FLAG_collect_maps) ClearNonLiveReferences();
- ClearWeakMaps();
+ ClearWeakCollections();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -781,10 +781,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
+ "evacuation candidate limit: %d\n",
static_cast<double>(over_reserved) / MB,
static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold));
+ static_cast<int>(kFreenessThreshold),
+ max_evacuation_candidates);
}
intptr_t estimated_release = 0;
@@ -811,7 +813,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
// Don't try to release too many pages.
- if (estimated_release >= ((over_reserved * 3) / 4)) {
+ if (estimated_release >= over_reserved) {
continue;
}
@@ -828,7 +830,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
if (free_pct >= kFreenessThreshold) {
- estimated_release += 2 * p->area_size() - free_bytes;
+ estimated_release += free_bytes;
fragmentation = free_pct;
} else {
fragmentation = 0;
@@ -1447,35 +1449,36 @@ class MarkCompactMarkingVisitor
shared->BeforeVisitingPointers();
}
- static void VisitJSWeakMap(Map* map, HeapObject* object) {
+ static void VisitWeakCollection(Map* map, HeapObject* object) {
MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(object);
// Enqueue weak map in linked list of encountered weak maps.
- if (weak_map->next() == Smi::FromInt(0)) {
- weak_map->set_next(collector->encountered_weak_maps());
- collector->set_encountered_weak_maps(weak_map);
+ if (weak_collection->next() == Smi::FromInt(0)) {
+ weak_collection->set_next(collector->encountered_weak_collections());
+ collector->set_encountered_weak_collections(weak_collection);
}
// Skip visiting the backing hash table containing the mappings.
- int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
+ int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
- JSWeakMap::BodyDescriptor::kStartOffset,
- JSWeakMap::kTableOffset);
+ JSWeakCollection::BodyDescriptor::kStartOffset,
+ JSWeakCollection::kTableOffset);
BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
- JSWeakMap::kTableOffset + kPointerSize,
+ JSWeakCollection::kTableOffset + kPointerSize,
object_size);
// Mark the backing hash table without pushing it on the marking stack.
- Object* table_object = weak_map->table();
+ Object* table_object = weak_collection->table();
if (!table_object->IsHashTable()) return;
ObjectHashTable* table = ObjectHashTable::cast(table_object);
Object** table_slot =
- HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
+ HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
MarkBit table_mark = Marking::MarkBitFrom(table);
collector->RecordSlot(table_slot, table_slot, table);
if (!table_mark.Get()) collector->SetMark(table, table_mark);
@@ -1632,11 +1635,18 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
TRANSITION_ARRAY_SUB_TYPE,
fixed_array_size);
}
- if (map_obj->code_cache() != heap->empty_fixed_array()) {
+ if (map_obj->has_code_cache()) {
+ CodeCache* cache = CodeCache::cast(map_obj->code_cache());
heap->RecordObjectStats(
FIXED_ARRAY_TYPE,
MAP_CODE_CACHE_SUB_TYPE,
- FixedArray::cast(map_obj->code_cache())->Size());
+ cache->default_cache()->Size());
+ if (!cache->normal_type_cache()->IsUndefined()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ MAP_CODE_CACHE_SUB_TYPE,
+ FixedArray::cast(cache->normal_type_cache())->Size());
+ }
}
ObjectStatsVisitBase(kVisitMap, map, obj);
}
@@ -1929,47 +1939,36 @@ static void DiscoverGreyObjectsWithIterator(Heap* heap,
static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
+ MemoryChunk* p) {
ASSERT(!marking_deque->IsFull());
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- MarkBit::CellType* cells = p->markbits()->cells();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address cell_base = p->area_start();
- int cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base)));
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
-
- for (;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
-
- const MarkBit::CellType current_cell = cells[cell_index];
+ const MarkBit::CellType current_cell = *cell;
if (current_cell == 0) continue;
- const MarkBit::CellType next_cell = cells[cell_index + 1];
- MarkBit::CellType grey_objects = current_cell &
- ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+ MarkBit::CellType grey_objects;
+ if (it.HasNext()) {
+ const MarkBit::CellType next_cell = *(cell+1);
+ grey_objects = current_cell &
+ ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+ } else {
+ grey_objects = current_cell & (current_cell >> 1);
+ }
int offset = 0;
while (grey_objects != 0) {
int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
grey_objects >>= trailing_zeros;
offset += trailing_zeros;
- MarkBit markbit(&cells[cell_index], 1 << offset, false);
+ MarkBit markbit(cell, 1 << offset, false);
ASSERT(Marking::IsGrey(markbit));
Marking::GreyToBlack(markbit);
Address addr = cell_base + offset * kPointerSize;
@@ -1986,6 +1985,67 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
}
+int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
+ NewSpace* new_space,
+ NewSpacePage* p) {
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ MarkBit::CellType* cells = p->markbits()->cells();
+ int survivors_size = 0;
+
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+
+ MarkBit::CellType current_cell = *cell;
+ if (current_cell == 0) continue;
+
+ int offset = 0;
+ while (current_cell != 0) {
+ int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
+ current_cell >>= trailing_zeros;
+ offset += trailing_zeros;
+ Address address = cell_base + offset * kPointerSize;
+ HeapObject* object = HeapObject::FromAddress(address);
+
+ int size = object->Size();
+ survivors_size += size;
+
+ offset++;
+ current_cell >>= 1;
+ // Aggressively promote young survivors to the old space.
+ if (TryPromoteObject(object, size)) {
+ continue;
+ }
+
+ // Promotion failed. Just migrate object to another semispace.
+ MaybeObject* allocation = new_space->AllocateRaw(size);
+ if (allocation->IsFailure()) {
+ if (!new_space->AddFreshPage()) {
+ // Shouldn't happen. We are sweeping linearly, and to-space
+ // has the same number of pages as from-space, so there is
+ // always room.
+ UNREACHABLE();
+ }
+ allocation = new_space->AllocateRaw(size);
+ ASSERT(!allocation->IsFailure());
+ }
+ Object* target = allocation->ToObjectUnchecked();
+
+ MigrateObject(HeapObject::cast(target)->address(),
+ object->address(),
+ size,
+ NEW_SPACE);
+ }
+ *cells = 0;
+ }
+ return survivors_size;
+}
+
+
static void DiscoverGreyObjectsInSpace(Heap* heap,
MarkingDeque* marking_deque,
PagedSpace* space) {
@@ -2003,6 +2063,18 @@ static void DiscoverGreyObjectsInSpace(Heap* heap,
}
+static void DiscoverGreyObjectsInNewSpace(Heap* heap,
+ MarkingDeque* marking_deque) {
+ NewSpace* space = heap->new_space();
+ NewSpacePageIterator it(space->bottom(), space->top());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ DiscoverGreyObjectsOnPage(marking_deque, page);
+ if (marking_deque->IsFull()) return;
+ }
+}
+
+
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
@@ -2109,8 +2181,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
void MarkCompactCollector::RefillMarkingDeque() {
ASSERT(marking_deque_.overflowed());
- SemiSpaceIterator new_it(heap()->new_space());
- DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
+ DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
if (marking_deque_.IsFull()) return;
DiscoverGreyObjectsInSpace(heap(),
@@ -2175,7 +2246,7 @@ void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
isolate()->global_handles()->IterateObjectGroups(
visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups();
- ProcessWeakMaps();
+ ProcessWeakCollections();
work_to_do = !marking_deque_.IsEmpty();
ProcessMarkingDeque();
}
@@ -2584,13 +2655,15 @@ void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
}
-void MarkCompactCollector::ProcessWeakMaps() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKMAP_PROCESS);
- Object* weak_map_obj = encountered_weak_maps();
- while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+void MarkCompactCollector::ProcessWeakCollections() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
+ Object* weak_collection_obj = encountered_weak_collections();
+ while (weak_collection_obj != Smi::FromInt(0)) {
+ ASSERT(MarkCompactCollector::IsMarked(
+ HeapObject::cast(weak_collection_obj)));
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+ ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Object** anchor = reinterpret_cast<Object**>(table->address());
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
@@ -2605,27 +2678,29 @@ void MarkCompactCollector::ProcessWeakMaps() {
this, anchor, value_slot);
}
}
- weak_map_obj = weak_map->next();
+ weak_collection_obj = weak_collection->next();
}
}
-void MarkCompactCollector::ClearWeakMaps() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKMAP_CLEAR);
- Object* weak_map_obj = encountered_weak_maps();
- while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
- JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+void MarkCompactCollector::ClearWeakCollections() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+ Object* weak_collection_obj = encountered_weak_collections();
+ while (weak_collection_obj != Smi::FromInt(0)) {
+ ASSERT(MarkCompactCollector::IsMarked(
+ HeapObject::cast(weak_collection_obj)));
+ JSWeakCollection* weak_collection =
+ reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+ ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
for (int i = 0; i < table->Capacity(); i++) {
if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
table->RemoveEntry(i);
}
}
- weak_map_obj = weak_map->next();
- weak_map->set_next(Smi::FromInt(0));
+ weak_collection_obj = weak_collection->next();
+ weak_collection->set_next(Smi::FromInt(0));
}
- set_encountered_weak_maps(Smi::FromInt(0));
+ set_encountered_weak_collections(Smi::FromInt(0));
}
@@ -2881,45 +2956,10 @@ void MarkCompactCollector::EvacuateNewSpace() {
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
- SemiSpaceIterator from_it(from_bottom, from_top);
- for (HeapObject* object = from_it.Next();
- object != NULL;
- object = from_it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- // Don't bother decrementing live bytes count. We'll discard the
- // entire page at the end.
- int size = object->Size();
- survivors_size += size;
-
- // Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
- continue;
- }
-
- // Promotion failed. Just migrate object to another semispace.
- MaybeObject* allocation = new_space->AllocateRaw(size);
- if (allocation->IsFailure()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
- }
- allocation = new_space->AllocateRaw(size);
- ASSERT(!allocation->IsFailure());
- }
- Object* target = allocation->ToObjectUnchecked();
-
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
- size,
- NEW_SPACE);
- } else {
- // Mark dead objects in the new space with null in their map field.
- Memory::Address_at(object->address()) = NULL;
- }
+ NewSpacePageIterator it(from_bottom, from_top);
+ while (it.has_next()) {
+ NewSpacePage* p = it.next();
+ survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
}
heap_->IncrementYoungSurvivorsCounter(survivors_size);
@@ -2931,31 +2971,17 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
AlwaysAllocateScope always_allocate;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
- MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptPrecisely();
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- Address cell_base = p->area_start();
- int cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base)));
-
int offsets[16];
- for (;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
- if (cells[cell_index] == 0) continue;
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+
+ if (*cell == 0) continue;
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+ int live_objects = MarkWordToObjectStarts(*cell, offsets);
for (int i = 0; i < live_objects; i++) {
Address object_addr = cell_base + offsets[i] * kPointerSize;
HeapObject* object = HeapObject::FromAddress(object_addr);
@@ -2980,7 +3006,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
}
// Clear marking bits for current cell.
- cells[cell_index] = 0;
+ *cell = 0;
}
p->ResetLiveBytes();
}
@@ -3101,22 +3127,10 @@ static void SweepPrecisely(PagedSpace* space,
start_time = OS::TimeCurrentMillis();
}
- MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptPrecisely();
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
Address free_start = p->area_start();
- int cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(free_start)));
-
ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- Address object_address = free_start;
int offsets[16];
SkipList* skip_list = p->skip_list();
@@ -3125,17 +3139,13 @@ static void SweepPrecisely(PagedSpace* space,
skip_list->Clear();
}
- for (;
- cell_index < last_cell_index;
- cell_index++, object_address += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(object_address))));
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+ int live_objects = MarkWordToObjectStarts(*cell, offsets);
int live_index = 0;
for ( ; live_objects != 0; live_objects--) {
- Address free_end = object_address + offsets[live_index++] * kPointerSize;
+ Address free_end = cell_base + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
space->Free(free_start, static_cast<int>(free_end - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
@@ -3165,7 +3175,7 @@ static void SweepPrecisely(PagedSpace* space,
free_start = free_end + size;
}
// Clear marking bits for current cell.
- cells[cell_index] = 0;
+ *cell = 0;
}
if (free_start != p->area_end()) {
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
@@ -3340,7 +3350,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
StoreBufferRebuildScope scope(heap_,
heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+ heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
+ &UpdatePointer);
}
{ GCTracer::Scope gc_scope(tracer_,
@@ -3831,40 +3842,32 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
- MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptConservatively();
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_end())));
-
- int cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->area_start())));
-
intptr_t freed_bytes = 0;
-
- // This is the start of the 32 word block that we are currently looking at.
- Address block_address = p->area_start();
+ size_t size = 0;
// Skip over all the dead objects at the start of the page and mark them free.
- for (;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- if (cells[cell_index] != 0) break;
+ Address cell_base = 0;
+ MarkBit::CellType* cell = NULL;
+ MarkBitCellIterator it(p);
+ for (; !it.Done(); it.Advance()) {
+ cell_base = it.CurrentCellBase();
+ cell = it.CurrentCell();
+ if (*cell != 0) break;
}
- size_t size = block_address - p->area_start();
- if (cell_index == last_cell_index) {
+
+ if (it.Done()) {
+ size = p->area_end() - p->area_start();
freed_bytes += Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
+
// Grow the size of the start-of-page free space a little to get up to the
// first live object.
- Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
+ Address free_end = StartOfLiveObject(cell_base, *cell);
// Free the first free space.
size = free_end - p->area_start();
freed_bytes += Free<mode>(space, free_list, p->area_start(),
@@ -3876,45 +3879,40 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
// started. Unless we find a large free space in the bitmap we will not
// digest this pair into a real address. We start the iteration here at the
// first word in the marking bit map that indicates a live object.
- Address free_start = block_address;
- uint32_t free_start_cell = cells[cell_index];
-
- for ( ;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(block_address))));
- uint32_t cell = cells[cell_index];
- if (cell != 0) {
+ Address free_start = cell_base;
+ MarkBit::CellType free_start_cell = *cell;
+
+ for (; !it.Done(); it.Advance()) {
+ cell_base = it.CurrentCellBase();
+ cell = it.CurrentCell();
+ if (*cell != 0) {
// We have a live object. Check approximately whether it is more than 32
// words since the last live object.
- if (block_address - free_start > 32 * kPointerSize) {
+ if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- if (block_address - free_start > 32 * kPointerSize) {
+ if (cell_base - free_start > 32 * kPointerSize) {
// Now that we know the exact start of the free space it still looks
// like we have a large enough free space to be worth bothering with.
// so now we need to find the start of the first live object at the
// end of the free space.
- free_end = StartOfLiveObject(block_address, cell);
+ free_end = StartOfLiveObject(cell_base, *cell);
freed_bytes += Free<mode>(space, free_list, free_start,
static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
- free_start = block_address;
- free_start_cell = cell;
+ free_start = cell_base;
+ free_start_cell = *cell;
// Clear marking bits for current cell.
- cells[cell_index] = 0;
+ *cell = 0;
}
}
// Handle the free space at the end of the page.
- if (block_address - free_start > 32 * kPointerSize) {
+ if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(block_address - free_start));
+ static_cast<int>(p->area_end() - free_start));
}
p->ResetLiveBytes();
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index ab3711a386..4063bde2d3 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -695,9 +695,11 @@ class MarkCompactCollector {
bool TryPromoteObject(HeapObject* object, int object_size);
- inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
- inline void set_encountered_weak_maps(Object* weak_map) {
- encountered_weak_maps_ = weak_map;
+ inline Object* encountered_weak_collections() {
+ return encountered_weak_collections_;
+ }
+ inline void set_encountered_weak_collections(Object* weak_collection) {
+ encountered_weak_collections_ = weak_collection;
}
void InvalidateCode(Code* code);
@@ -893,15 +895,15 @@ class MarkCompactCollector {
// ClearNonLiveTransitions pass or by calling this function.
void ReattachInitialMaps();
- // Mark all values associated with reachable keys in weak maps encountered
- // so far. This might push new object or even new weak maps onto the
- // marking stack.
- void ProcessWeakMaps();
+ // Mark all values associated with reachable keys in weak collections
+ // encountered so far. This might push new object or even new weak maps onto
+ // the marking stack.
+ void ProcessWeakCollections();
// After all reachable objects have been marked those weak map entries
// with an unreachable key are removed from all encountered weak maps.
// The linked list of all encountered weak maps is destroyed.
- void ClearWeakMaps();
+ void ClearWeakCollections();
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
@@ -919,6 +921,9 @@ class MarkCompactCollector {
// regions to each space's free list.
void SweepSpaces();
+ int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
+ NewSpacePage* p);
+
void EvacuateNewSpace();
void EvacuateLiveObjectsFromPage(Page* p);
@@ -940,7 +945,7 @@ class MarkCompactCollector {
Heap* heap_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
- Object* encountered_weak_maps_;
+ Object* encountered_weak_collections_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
@@ -949,6 +954,50 @@ class MarkCompactCollector {
};
+class MarkBitCellIterator BASE_EMBEDDED {
+ public:
+ explicit MarkBitCellIterator(MemoryChunk* chunk)
+ : chunk_(chunk) {
+ last_cell_index_ = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+ cell_base_ = chunk_->area_start();
+ cell_index_ = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_)));
+ cells_ = chunk_->markbits()->cells();
+ }
+
+ inline bool Done() { return cell_index_ == last_cell_index_; }
+
+ inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
+
+ inline MarkBit::CellType* CurrentCell() {
+ ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
+ return &cells_[cell_index_];
+ }
+
+ inline Address CurrentCellBase() {
+ ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
+ return cell_base_;
+ }
+
+ inline void Advance() {
+ cell_index_++;
+ cell_base_ += 32 * kPointerSize;
+ }
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBit::CellType* cells_;
+ unsigned int last_cell_index_;
+ unsigned int cell_index_;
+ Address cell_base_;
+};
+
+
class SequentialSweepingScope BASE_EMBEDDED {
public:
explicit SequentialSweepingScope(MarkCompactCollector *collector) :
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 137d98fe7b..761b311371 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -73,7 +73,7 @@ var kMessages = {
invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
- null_to_object: ["Cannot convert null to object"],
+ undefined_or_null_to_object: ["Cannot convert undefined or null to object"],
reduce_no_initial: ["Reduce of empty array with no initial value"],
getter_must_be_callable: ["Getter must be a function: ", "%0"],
setter_must_be_callable: ["Setter must be a function: ", "%0"],
@@ -94,6 +94,7 @@ var kMessages = {
proxy_non_object_prop_names: ["Trap '", "%1", "' returned non-object ", "%0"],
proxy_repeated_prop_name: ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
invalid_weakmap_key: ["Invalid value used as weak map key"],
+ invalid_weakset_value: ["Invalid value used in weak set"],
not_date_object: ["this is not a Date object."],
observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
@@ -104,8 +105,6 @@ var kMessages = {
observe_perform_non_function: ["Cannot perform non-function"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
- parameterless_typed_array_constr:
- ["%0"," constructor should have at least one argument."],
not_typed_array: ["this is not a typed array."],
invalid_argument: ["invalid_argument"],
data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index c4fefcc512..a04d456ae9 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -501,11 +501,13 @@ bool Assembler::IsBranch(Instr instr) {
(opcode == COP1 && rs_field == BC1); // Coprocessor branch.
}
+
bool Assembler::IsEmittedConstant(Instr instr) {
uint32_t label_constant = GetLabelConst(instr);
return label_constant == 0; // Emitted label const in reg-exp engine.
}
+
bool Assembler::IsBeq(Instr instr) {
return GetOpcodeField(instr) == BEQ;
}
@@ -539,10 +541,12 @@ bool Assembler::IsJal(Instr instr) {
return GetOpcodeField(instr) == JAL;
}
+
bool Assembler::IsJr(Instr instr) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
}
+
bool Assembler::IsJalr(Instr instr) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
}
@@ -825,6 +829,7 @@ void Assembler::next(Label* L) {
}
}
+
bool Assembler::is_near(Label* L) {
if (L->is_bound()) {
return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
@@ -832,6 +837,7 @@ bool Assembler::is_near(Label* L) {
return false;
}
+
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
@@ -1669,6 +1675,7 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CFC1, rt, fs);
}
+
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
OS::MemCopy(&i, &d, 8);
@@ -1677,6 +1684,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
*hi = i >> 32;
}
+
// Arithmetic.
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
@@ -2257,6 +2265,7 @@ void Assembler::set_target_address_at(Address pc, Address target) {
CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
}
+
void Assembler::JumpLabelToJumpRegister(Address pc) {
// Address pc points to lui/ori instructions.
// Jump to label may follow at pc + 2 * kInstrSize.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index d12c0dace4..8d533b36f4 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -583,7 +583,8 @@ class Assembler : public AssemblerBase {
LAST_CODE_MARKER,
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
// Code aging
- CODE_AGE_MARKER_NOP = 6
+ CODE_AGE_MARKER_NOP = 6,
+ CODE_AGE_SEQUENCE_NOP
};
// Type == 0 is the default non-marking nop. For mips this is a
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 35d21f05e6..3f5dca0009 100755..100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -208,7 +208,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
a3, // Scratch.
t0, // Scratch.
t1, // Scratch.
- false, // Is it a Smi?
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
__ bind(&argument_is_string);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 69b957afa8..f984b3a7b7 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -61,6 +61,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
}
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -227,8 +237,42 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a2, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a3, a1, a2 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
#define __ ACCESS_MASM(masm)
+
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc);
@@ -1181,17 +1225,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Now that we have the types we might as well check for
// internalized-internalized.
- Label not_internalized;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(t2, a2, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ Branch(&not_internalized, ne, t2,
- Operand(kInternalizedTag | kStringTag));
-
- __ And(a3, a3, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ Branch(&return_not_equal, eq, a3,
- Operand(kInternalizedTag | kStringTag));
-
- __ bind(&not_internalized);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ Or(a2, a2, Operand(a3));
+ __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
}
@@ -1227,15 +1264,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
// a2 is object type of rhs.
Label object_test;
- STATIC_ASSERT(kInternalizedTag != 0);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
- __ And(at, a2, Operand(kIsInternalizedMask));
- __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ And(at, a2, Operand(kIsNotInternalizedMask));
+ __ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
__ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
- __ And(at, a3, Operand(kIsInternalizedMask));
- __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ And(at, a3, Operand(kIsNotInternalizedMask));
+ __ Branch(possible_strings, ne, at, Operand(zero_reg));
// Both are internalized strings. We already checked they weren't the same
// pointer so they are not equal.
@@ -1266,7 +1303,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- bool object_is_smi,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@@ -1289,37 +1325,35 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
Label is_smi;
Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
- }
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ __ lw(scratch1, MemOperand(scratch1, 0));
+ __ Xor(scratch1, scratch1, Operand(scratch2));
+ __ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ __ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ __ lw(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ __ Branch(not_found);
__ bind(&is_smi);
Register scratch = scratch1;
@@ -1332,7 +1366,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ Addu(scratch, number_string_cache, scratch);
// Check if the entry is the smi we are looking for.
- Register probe = mask;
__ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Branch(not_found, ne, object, Operand(probe));
@@ -1354,7 +1387,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+ GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime);
__ DropAndRet(1);
__ bind(&runtime);
@@ -1586,294 +1619,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Argument is in a0 and v0 at this point, so we can overwrite a0.
- __ li(a2, Operand(Smi::FromInt(op_)));
- __ li(a1, Operand(Smi::FromInt(mode_)));
- __ li(a0, Operand(Smi::FromInt(operand_type_)));
- __ Push(v0, a2, a1, a0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(a0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ And(t0, a0, ~0x80000000);
- __ Branch(slow, eq, t0, Operand(zero_reg));
-
- // Return '0 - value'.
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, zero_reg, a0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(a0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ Neg(v0, a0);
- __ And(v0, v0, ~kSmiTagMask);
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
- // a0 is a heap number. Get a new heap number in a1.
- if (mode_ == UNARY_OVERWRITE) {
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ Ret(USE_DELAY_SLOT);
- __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a1, v0);
- __ pop(a0);
- }
-
- __ bind(&heapnumber_allocated);
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
- __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a1);
- }
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm,
- Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
- // Convert the heap number in a0 to an untagged integer in a1.
- __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ Neg(a1, a1);
- __ Addu(a2, a1, Operand(0x40000000));
- __ Branch(&try_float, lt, a2, Operand(zero_reg));
-
- // Tag the result as a smi and we're done.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a1);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping v0, which we need if it fails.
- __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a2, v0); // Move the new heap number into a2.
- // Get the heap number into v0, now that the new heap number is in a2.
- __ pop(v0);
- }
-
- // Convert the heap number in v0 to an untagged integer in a1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
- // Negate the result.
- __ Xor(a1, a1, -1);
-
- __ bind(&heapnumber_allocated);
- __ mov(v0, a2); // Move newly allocated heap number to v0.
- }
-
- // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
- __ mtc1(a1, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(
- MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(a0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void BinaryOpStub::Initialize() {
platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
@@ -2388,8 +2133,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ GetObjectType(right, a2, a2);
__ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
- StringAddStub string_add_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -2806,8 +2551,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ GetObjectType(left, a2, a2);
__ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
- StringAddStub string_add_left_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2817,8 +2562,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ GetObjectType(right, a2, a2);
__ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
- StringAddStub string_add_right_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -3344,6 +3089,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -3987,7 +3733,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -4018,7 +3765,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
support_wrapper_);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -4088,7 +3836,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -5043,20 +4792,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ Branch(&done, eq, a3, Operand(a1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, eq, a3, Operand(at));
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- __ JumpIfNotSmi(a3, &miss);
- if (FLAG_debug_code) {
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ Assert(le, "Array function sentinel is not an ElementsKind",
- a3, Operand(terminal_kind_sentinel));
- }
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ lw(t1, FieldMemOperand(a3, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
// Make sure the function is the Array() function
__ LoadArrayFunction(a3);
@@ -5083,14 +4829,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadArrayFunction(a3);
__ Branch(&not_array_function, ne, a1, Operand(a3));
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- GetInitialFastElementsKind());
- __ li(a3, Operand(initial_kind_sentinel));
- __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // The target function is the Array constructor.
+ // Create an AllocationSite if we don't already have it, store it in the cell.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6; // a2
+
+ __ MultiPush(kSavedRegs);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ MultiPop(kSavedRegs);
+ }
__ Branch(&done);
__ bind(&not_array_function);
@@ -6111,7 +5865,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfEitherSmi(a0, a1, &call_runtime);
// Load instance types.
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
@@ -6123,20 +5881,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Or(t4, t0, Operand(t1));
__ And(t4, t4, Operand(kIsNotStringMask));
__ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -6187,7 +5941,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&longer_than_two, ne, t2, Operand(2));
// Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -6231,7 +5985,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -6314,7 +6068,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// t2: sum of lengths.
Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -6400,7 +6154,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm);
// Build a frame.
{
@@ -6415,7 +6169,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm);
// Build a frame.
{
@@ -6467,7 +6221,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
- false,
&not_cached);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
@@ -6623,13 +6376,10 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
-
- __ And(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ Branch(&miss, ne, tmp1, Operand(kInternalizedTag | kStringTag));
-
- __ And(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
- __ Branch(&miss, ne, tmp2, Operand(kInternalizedTag | kStringTag));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ Or(tmp1, tmp1, Operand(tmp2));
+ __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ Branch(&miss, ne, at, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -6664,7 +6414,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
@@ -6738,11 +6487,11 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// strings.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(tmp3, tmp1, Operand(tmp2));
- __ And(tmp5, tmp3, Operand(kIsInternalizedMask));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ Or(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
Label is_symbol;
- __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
+ __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
@@ -6815,6 +6564,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
GenerateMiss(masm);
}
+
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
@@ -7145,6 +6895,7 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+
#define REG(Name) { kRegister_ ## Name ## _Code }
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
@@ -7207,6 +6958,9 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
stub1.GetCode(isolate)->set_is_pregenerated(true);
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7612,10 +7366,6 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ And(at, a3, Operand(1));
@@ -7626,17 +7376,19 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ Addu(a3, a3, Operand(1));
- __ Branch(&normal_sequence, eq, a2, Operand(undefined_sentinel));
-
- // The type cell may have gone megamorphic, don't overwrite if so.
- __ lw(t1, FieldMemOperand(a2, kPointerSize));
- __ JumpIfNotSmi(t1, &normal_sequence);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&normal_sequence, eq, a2, Operand(at));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ lw(t1, FieldMemOperand(t1, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&normal_sequence, ne, t1, Operand(at));
// Save the resulting elements kind in type info
__ SmiTag(a3);
- __ sw(a3, FieldMemOperand(a2, kPointerSize));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
__ bind(&normal_sequence);
@@ -7664,7 +7416,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7705,10 +7457,6 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -7723,10 +7471,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Assert(eq, "Unexpected initial map for Array function",
t0, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell
+ // We should either have undefined in a2 or a valid cell.
Label okay_here;
Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&okay_here, eq, a2, Operand(at));
__ lw(a3, FieldMemOperand(a2, 0));
__ Assert(eq, "Expected property cell in register a2",
a3, Operand(cell_map));
@@ -7735,9 +7484,20 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
- __ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&no_info, eq, a2, Operand(at));
__ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
- __ JumpIfNotSmi(a3, &no_info);
+
+ // The type cell may have undefined in its value.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&no_info, eq, a3, Operand(at));
+
+ // The type cell has either an AllocationSite or a JSFunction.
+ __ lw(t0, FieldMemOperand(a3, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&no_info, ne, t0, Operand(at));
+
+ __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index bf5db10f63..1ae1d3454f 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -81,71 +81,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -210,21 +145,6 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 1 << 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
- // Stub needs a frame before calling the runtime
- ERECT_FRAME = 1 << 3,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -363,7 +283,6 @@ class NumberToStringStub: public PlatformCodeStub {
Register scratch1,
Register scratch2,
Register scratch3,
- bool object_is_smi,
Label* not_found);
private:
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 7a95bc426b..3f74154f58 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -120,6 +120,7 @@ UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -136,6 +137,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->set_has_frame(false);
}
+
// -------------------------------------------------------------------------
// Code generators
@@ -143,7 +145,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -153,9 +155,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- t0 : scratch (elements)
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
- allocation_site_info_found);
+ ASSERT(allocation_memento_found != NULL);
+ masm->TestJSArrayForAllocationMemento(a2, t0, eq,
+ allocation_memento_found);
}
// Set transitioned map.
@@ -186,7 +188,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
+ masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -314,7 +316,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
+ masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -601,7 +603,7 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
if (!initialized) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
initialized = true;
}
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index a20ec5479a..2dd7a31f38 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -58,6 +58,7 @@ const char* Registers::names_[kNumSimuRegisters] = {
"pc"
};
+
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
@@ -67,6 +68,7 @@ const Registers::RegisterAlias Registers::aliases_[] = {
{kInvalidRegister, NULL}
};
+
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
@@ -106,11 +108,13 @@ const char* FPURegisters::names_[kNumFPURegisters] = {
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
+
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
+
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 6978cde52b..840462e43f 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -457,22 +457,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (a3) (return
+ // Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register t0.
- if (type() == EAGER || type() == SOFT) {
- __ mov(a3, zero_reg);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(a3, ra);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(a3, ra);
- // Correct two words for bailout id and return address.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(a3, ra);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ Subu(t0, fp, t0);
@@ -521,13 +511,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ sdc1(f0, MemOperand(a1, dst_offset));
}
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == SOFT || type() == OSR) {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
@@ -628,25 +613,19 @@ void Deoptimizer::EntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 7 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label table_start;
__ bind(&table_start);
for (int i = 0; i < count(); i++) {
Label start;
__ bind(&start);
- if (type() != EAGER && type() != SOFT) {
- // Emulate ia32 like call by pushing return address to stack.
- __ addiu(sp, sp, -2 * kPointerSize);
- __ sw(ra, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ addiu(sp, sp, -1 * kPointerSize);
- }
+ __ addiu(sp, sp, -1 * kPointerSize);
// Jump over the remaining deopt entries (including this one).
// This code is always reached by calling Jump, which puts the target (label
// start) into t9.
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 032c1f5e5b..9c610c32f9 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -174,9 +174,7 @@ void FullCodeGenerator::Generate() {
// The following three instructions must remain together and unmodified for
// code aging to work properly.
__ Push(ra, fp, cp, a1);
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
info->AddNoFrameRange(0, masm_->pc_offset());
@@ -185,8 +183,11 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
- for (int i = 0; i < locals_count; i++) {
- __ push(at);
+ if (locals_count > 0) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(at);
+ }
}
}
@@ -3745,7 +3746,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -4400,10 +4401,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
// TODO(svenpanne): Allowing format strings in Comment would be nice here...
Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
+ UnaryOpStub stub(expr->op());
// GenericUnaryOpStub expects the argument to be in a0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
@@ -4472,7 +4470,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ JumpIfSmi(v0, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ JumpIfSmi(v0, &no_conversion);
+ }
__ mov(a0, v0);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 896e03007b..ed67e829e3 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -330,9 +330,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// bit test is enough.
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ And(at, hash, Operand(kIsInternalizedMask));
- __ Branch(not_unique, eq, at, Operand(zero_reg));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ And(at, hash, Operand(kIsNotInternalizedMask));
+ __ Branch(not_unique, ne, at, Operand(zero_reg));
__ bind(&unique);
}
@@ -1261,8 +1261,8 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1275,7 +1275,7 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -1291,7 +1291,7 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1495,8 +1495,8 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
@@ -1518,8 +1518,8 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
@@ -1541,8 +1541,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 8109e8a288..65b4a575f7 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -159,9 +159,9 @@ bool LCodeGen::GeneratePrologue() {
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ Push(ra, fp, cp, a1);
- // Add unused load of ip to ensure prologue sequence is identical for
+ // Add unused nop to ensure prologue sequence is identical for
// full-codegen and lithium-codegen.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
// Adj. FP to point to saved FP.
__ Addu(fp, sp, Operand(2 * kPointerSize));
}
@@ -332,8 +332,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label table_start;
__ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
@@ -346,43 +345,22 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) {
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ Branch(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- __ Call(t9);
- }
+ if (needs_frame.is_bound()) {
+ __ Branch(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ Branch(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- __ Jump(t9);
- }
- }
- } else {
- if (type == Deoptimizer::LAZY) {
+ __ bind(&needs_frame);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
__ Call(t9);
- } else {
- __ Jump(t9);
}
+ } else {
+ __ Call(t9);
}
}
__ RecordComment("]");
@@ -766,7 +744,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_deopt_every_n_times == 1 &&
!info()->IsStub() &&
info()->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@@ -780,13 +759,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@@ -1050,11 +1024,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1971,11 +1940,11 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr,
Condition cc, Register src1, const Operand& src2) {
- int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ Branch(chunk_->GetAssemblyLabel(right_block),
@@ -2015,6 +1984,25 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
+void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32() || r.IsDouble()) {
+ EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsTaggedNumber()) {
+ EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+ }
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq, scratch0(), Operand(at));
+ }
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2183,7 +2171,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Condition cond = TokenToCondition(instr->op(), false);
@@ -2801,6 +2789,19 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
}
+void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) {
+ Register object = ToRegister(instr->object());
+ ExternalReference sites_list_address = instr->GetReference(isolate());
+
+ __ li(at, Operand(sites_list_address));
+ __ lw(at, MemOperand(at));
+ __ sw(at, FieldMemOperand(object,
+ instr->hydrogen()->store_field().offset()));
+ __ li(at, Operand(sites_list_address));
+ __ sw(object, MemOperand(at));
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3360,6 +3361,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ bind(&receiver_ok);
}
+
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
@@ -3886,6 +3888,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
__ sub_d(f0, f12, f14);
}
+
void LCodeGen::DoDeferredRandom(LRandom* instr) {
__ PrepareCallCFunction(1, scratch0());
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -4043,7 +4046,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ li(a2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
@@ -4461,7 +4464,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label fail;
- __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
+ __ TestJSArrayForAllocationMemento(object, temp, ne, &fail);
DeoptimizeIf(al, instr->environment());
__ bind(&fail);
}
@@ -4470,7 +4473,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub stub(instr->hydrogen()->flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -5277,80 +5280,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ sw(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ li(a0, Operand(Smi::FromInt(instance_size)));
- __ push(a0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5713,33 +5642,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 3d31ef10ba..1cba8cf468 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -148,7 +148,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 23a8f32f76..9705e1f41a 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -222,7 +222,6 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsStackSlot());
__ sw(source_register, cgen_->ToMemOperand(destination));
}
-
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
@@ -259,6 +258,10 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ Move(result, v);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 638eaa4e8b..c64533cdfc 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -186,7 +186,8 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
return new(zone()) LDebugBreak();
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -276,6 +277,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
+ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) {
+ switch (hydrogen()->known_list()) {
+ case HLinkObjectInList::ALLOCATION_SITE_LIST:
+ return ExternalReference::allocation_sites_list_address(isolate);
+ }
+
+ UNREACHABLE();
+ // Return a dummy value
+ return ExternalReference::isolate_address(isolate);
+}
+
+
+void LLinkObjectInList::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" offset %d", hydrogen()->store_field().offset());
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -329,7 +348,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -1605,8 +1623,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
@@ -1614,14 +1632,14 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@@ -1918,6 +1936,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
+ return new(zone())
+ LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
+}
+
+
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -2023,6 +2053,13 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LLinkObjectInList* result = new(zone()) LLinkObjectInList(object);
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2311,14 +2348,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2341,14 +2370,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* key = UseFixed(instr->key(), a1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2521,14 +2542,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), a0);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 06d30d03de..83a37c6230 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -49,7 +49,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -81,7 +80,7 @@ class LCodeGen;
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -92,7 +91,6 @@ class LCodeGen;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
@@ -106,7 +104,6 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
@@ -118,10 +115,12 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
+ V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@@ -711,9 +710,9 @@ class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -721,8 +720,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -919,6 +919,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNumberAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+};
+
+
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1650,6 +1663,23 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
+class LLinkObjectInList: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LLinkObjectInList(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ ExternalReference GetReference(Isolate* isolate);
+
+ DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list")
+ DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2423,21 +2453,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@@ -2524,20 +2539,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2559,20 +2560,6 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 47e6ff93c7..8a44185ed7 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -768,6 +768,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
+
//------------Pseudo-instructions-------------
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
@@ -1021,6 +1022,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
@@ -1031,6 +1033,7 @@ void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
}
}
+
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
@@ -2639,6 +2642,7 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
nop();
}
+
void MacroAssembler::DropAndRet(int drop) {
Ret(USE_DELAY_SLOT);
addiu(sp, sp, drop * kPointerSize);
@@ -3205,9 +3209,13 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
void MacroAssembler::JumpIfNotUniqueName(Register reg,
Label* not_unique_name) {
- STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
- Branch(not_unique_name, lt, reg, Operand(kIsInternalizedMask));
- Branch(not_unique_name, gt, reg, Operand(SYMBOL_TYPE));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ Branch(&succeed, eq, at, Operand(zero_reg));
+ Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
+
+ bind(&succeed);
}
@@ -5474,26 +5482,26 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Condition cond,
- Label* allocation_info_present) {
- Label no_info_available;
+ Label* allocation_memento_present) {
+ Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
Addu(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start));
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ Branch(&no_memento_available, lt, scratch_reg, Operand(new_space_start));
li(at, Operand(new_space_allocation_top));
lw(at, MemOperand(at));
- Branch(&no_info_available, gt, scratch_reg, Operand(at));
- lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
- Branch(allocation_info_present, cond, scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
+ Branch(&no_memento_available, gt, scratch_reg, Operand(at));
+ lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ Branch(allocation_memento_present, cond, scratch_reg,
+ Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ bind(&no_memento_available);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index ffae2fd69e..bc3e7c48b4 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -1460,16 +1460,16 @@ class MacroAssembler: public Assembler {
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, jump to allocation_info_present
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg,
- Condition cond,
- Label* allocation_info_present);
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Condition cond,
+ Label* allocation_memento_present);
private:
void CallCFunctionHelper(Register function,
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 52211904d9..89d8e68d5e 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -429,89 +429,56 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
+void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
-void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* miss_restore_name,
- Label* slow) {
+void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// a0 : value.
Label exit;
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1, miss_label);
-
- // Check that we are allowed to write this.
- if (object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- // holder == object indicates that no property was found.
- if (lookup->holder() != *object) {
- holder = lookup->holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- Register holder_reg = CheckPrototypes(
- object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
- // If no property was found, and the holder (the last object in the
- // prototype chain) is in slow mode, we need to do a negative lookup on the
- // holder.
- if (lookup->holder() == *object) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm,
- Handle<GlobalObject>(GlobalObject::cast(holder)),
- name,
- scratch1,
- miss_restore_name);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
- }
- }
- }
-
- Register storage_reg = name_reg;
-
if (details.type() == CONSTANT_FUNCTION) {
Handle<HeapObject> constant(
HeapObject::cast(descriptors->GetValue(descriptor)));
__ LoadHeapObject(scratch1, constant);
- __ Branch(miss_restore_name, ne, value_reg, Operand(scratch1));
+ __ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_restore_name);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_restore_name);
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@@ -525,7 +492,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_restore_name, DONT_DO_SMI_CHECK);
+ miss_label, DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -555,8 +522,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ li(scratch1, Operand(transition));
__ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
@@ -594,19 +560,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
if (!FLAG_track_fields || !representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
// Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
- name_reg,
+ storage_reg,
scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
@@ -626,19 +586,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
if (!FLAG_track_fields || !representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
// Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
- name_reg,
+ storage_reg,
receiver_reg,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
@@ -659,27 +613,18 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// a0 : value
Label exit;
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1247,6 +1192,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ li(scratch1, Operand(Handle<Map>(object->map())));
+
Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1348,7 +1297,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ Branch(success);
@@ -1358,6 +1308,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
}
+void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ b(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
+
+
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
@@ -1399,7 +1360,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ Branch(&miss, ne, scratch2(), Operand(callback));
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
return reg;
}
@@ -1420,7 +1381,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
}
@@ -1744,11 +1705,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
- Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
- Handle<Cell> kind_feedback_cell =
- isolate()->factory()->NewCell(kind);
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
__ li(a0, Operand(argc));
- __ li(a2, Operand(kind_feedback_cell));
+ __ li(a2, Operand(site_feedback_cell));
__ li(a1, Operand(function));
ArrayConstructorStub stub(isolate());
@@ -2866,15 +2827,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(receiver(), &miss);
- CheckPrototypes(object, receiver(), holder,
- scratch1(), scratch2(), scratch3(), name, &miss);
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
// Stub never generated for non-global objects that require access
// checks.
@@ -2882,19 +2841,17 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ push(receiver()); // Receiver.
__ li(at, Operand(callback)); // Callback info.
- __ Push(at, this->name(), value());
+ __ push(at);
+ __ li(at, Operand(name));
+ __ Push(at, value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -3144,7 +3101,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- HandlerFrontendFooter(&success, &miss);
+ HandlerFrontendFooter(name, &success, &miss);
__ bind(&success);
Counters* counters = isolate()->counters();
@@ -3157,7 +3114,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
}
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
CodeHandleList* handlers,
Handle<Name> name,
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 28b8fc81ba..28ef24de0f 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1699,30 +1699,12 @@ FrameMirror.prototype.stepInPositions = function() {
FrameMirror.prototype.evaluate = function(source, disable_break,
opt_context_object) {
- var result_array = %DebugEvaluate(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- source,
- Boolean(disable_break),
- opt_context_object);
- // Silently ignore local variables changes if the frame is optimized.
- if (!this.isOptimizedFrame()) {
- var local_scope_on_stack = result_array[1];
- var local_scope_modifed = result_array[2];
- for (var n in local_scope_modifed) {
- var value_on_stack = local_scope_on_stack[n];
- var value_modifed = local_scope_modifed[n];
- if (value_on_stack !== value_modifed) {
- %SetScopeVariableValue(this.break_id_,
- this.details_.frameId(),
- this.details_.inlinedFrameIndex(),
- 0,
- n,
- value_modifed);
- }
- }
- }
- return MakeMirror(result_array[0]);
+ return MakeMirror(%DebugEvaluate(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ source,
+ Boolean(disable_break),
+ opt_context_object));
};
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index a8d9b35f3b..c1edcb1b3a 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -309,6 +309,8 @@ void DumpException(Handle<Message> message) {
int main(int argc, char** argv) {
+ V8::InitializeICU();
+
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index ada7919d6d..a5c12bf009 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -28,12 +28,12 @@
"use strict";
var observationState = %GetObservationState();
-if (IS_UNDEFINED(observationState.observerInfoMap)) {
- observationState.observerInfoMap = %ObservationWeakMapCreate();
+if (IS_UNDEFINED(observationState.callbackInfoMap)) {
+ observationState.callbackInfoMap = %ObservationWeakMapCreate();
observationState.objectInfoMap = %ObservationWeakMapCreate();
observationState.notifierTargetMap = %ObservationWeakMapCreate();
observationState.pendingObservers = new InternalArray;
- observationState.observerPriority = 0;
+ observationState.nextCallbackPriority = 0;
}
function ObservationWeakMap(map) {
@@ -44,20 +44,20 @@ ObservationWeakMap.prototype = {
get: function(key) {
key = %UnwrapGlobalProxy(key);
if (!IS_SPEC_OBJECT(key)) return void 0;
- return %WeakMapGet(this.map_, key);
+ return %WeakCollectionGet(this.map_, key);
},
set: function(key, value) {
key = %UnwrapGlobalProxy(key);
if (!IS_SPEC_OBJECT(key)) return void 0;
- %WeakMapSet(this.map_, key, value);
+ %WeakCollectionSet(this.map_, key, value);
},
has: function(key) {
return !IS_UNDEFINED(this.get(key));
}
};
-var observerInfoMap =
- new ObservationWeakMap(observationState.observerInfoMap);
+var callbackInfoMap =
+ new ObservationWeakMap(observationState.callbackInfoMap);
var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
var notifierTargetMap =
new ObservationWeakMap(observationState.notifierTargetMap);
@@ -198,6 +198,22 @@ function AcceptArgIsValid(arg) {
return true;
}
+function EnsureCallbackPriority(callback) {
+ if (!callbackInfoMap.has(callback))
+ callbackInfoMap.set(callback, observationState.nextCallbackPriority++);
+}
+
+function NormalizeCallbackInfo(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (IS_NUMBER(callbackInfo)) {
+ var priority = callbackInfo;
+ callbackInfo = new InternalArray;
+ callbackInfo.priority = priority;
+ callbackInfoMap.set(callback, callbackInfo);
+ }
+ return callbackInfo;
+}
+
function ObjectObserve(object, callback, accept) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["observe"]);
@@ -208,16 +224,13 @@ function ObjectObserve(object, callback, accept) {
if (!AcceptArgIsValid(accept))
throw MakeTypeError("observe_accept_invalid");
- if (!observerInfoMap.has(callback)) {
- observerInfoMap.set(callback, {
- pendingChangeRecords: null,
- priority: observationState.observerPriority++,
- });
- }
+ EnsureCallbackPriority(callback);
var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object, true);
+ if (IS_UNDEFINED(objectInfo)) {
+ objectInfo = CreateObjectInfo(object);
+ %SetIsObserved(object);
+ }
EnsureObserverRemoved(objectInfo, callback);
@@ -241,12 +254,6 @@ function ObjectUnobserve(object, callback) {
return object;
EnsureObserverRemoved(objectInfo, callback);
-
- if (objectInfo.changeObservers.length === 0 &&
- objectInfo.inactiveObservers.length === 0) {
- %SetIsObserved(object, false);
- }
-
return object;
}
@@ -261,6 +268,13 @@ function ArrayUnobserve(object, callback) {
return ObjectUnobserve(object, callback);
}
+function EnqueueToCallback(callback, changeRecord) {
+ var callbackInfo = NormalizeCallbackInfo(callback);
+ observationState.pendingObservers[callbackInfo.priority] = callback;
+ callbackInfo.push(changeRecord);
+ %SetObserverDeliveryPending();
+}
+
function EnqueueChangeRecord(changeRecord, observers) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
@@ -270,15 +284,7 @@ function EnqueueChangeRecord(changeRecord, observers) {
if (IS_UNDEFINED(observer.accept[changeRecord.type]))
continue;
- var callback = observer.callback;
- var observerInfo = observerInfoMap.get(callback);
- observationState.pendingObservers[observerInfo.priority] = callback;
- %SetObserverDeliveryPending();
- if (IS_NULL(observerInfo.pendingChangeRecords)) {
- observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
- } else {
- observerInfo.pendingChangeRecords.push(changeRecord);
- }
+ EnqueueToCallback(observer.callback, changeRecord);
}
}
@@ -398,21 +404,22 @@ function ObjectGetNotifier(object) {
return objectInfo.notifier;
}
-function DeliverChangeRecordsForObserver(observer) {
- var observerInfo = observerInfoMap.get(observer);
- if (IS_UNDEFINED(observerInfo))
+function CallbackDeliverPending(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
return false;
- var pendingChangeRecords = observerInfo.pendingChangeRecords;
- if (IS_NULL(pendingChangeRecords))
- return false;
+ // Clear the pending change records from callback and return it to its
+ // "optimized" state.
+ var priority = callbackInfo.priority;
+ callbackInfoMap.set(callback, priority);
- observerInfo.pendingChangeRecords = null;
- delete observationState.pendingObservers[observerInfo.priority];
+ delete observationState.pendingObservers[priority];
var delivered = [];
- %MoveArrayContents(pendingChangeRecords, delivered);
+ %MoveArrayContents(callbackInfo, delivered);
+
try {
- %Call(void 0, delivered, observer);
+ %Call(void 0, delivered, callback);
} catch (ex) {}
return true;
}
@@ -421,7 +428,7 @@ function ObjectDeliverChangeRecords(callback) {
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
- while (DeliverChangeRecordsForObserver(callback)) {}
+ while (CallbackDeliverPending(callback)) {}
}
function DeliverChangeRecords() {
@@ -429,7 +436,7 @@ function DeliverChangeRecords() {
var pendingObservers = observationState.pendingObservers;
observationState.pendingObservers = new InternalArray;
for (var i in pendingObservers) {
- DeliverChangeRecordsForObserver(pendingObservers[i]);
+ CallbackDeliverPending(pendingObservers[i]);
}
}
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index c0c0e477bf..cb5f2b7900 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -181,6 +181,9 @@ void HeapObject::HeapObjectVerify() {
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
+ case JS_WEAK_SET_TYPE:
+ JSWeakSet::cast(this)->JSWeakSetVerify();
+ break;
case JS_REGEXP_TYPE:
JSRegExp::cast(this)->JSRegExpVerify();
break;
@@ -699,6 +702,14 @@ void JSWeakMap::JSWeakMapVerify() {
}
+void JSWeakSet::JSWeakSetVerify() {
+ CHECK(IsJSWeakSet());
+ JSObjectVerify();
+ VerifyHeapPointer(table());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
CHECK(data()->IsUndefined() || data()->IsFixedArray());
@@ -755,6 +766,7 @@ void JSFunctionProxy::JSFunctionProxyVerify() {
VerifyPointer(construct_trap());
}
+
void JSArrayBuffer::JSArrayBufferVerify() {
CHECK(IsJSArrayBuffer());
JSObjectVerify();
@@ -875,6 +887,7 @@ void TemplateInfo::TemplateInfoVerify() {
VerifyPointer(property_list());
}
+
void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
CHECK(IsFunctionTemplateInfo());
TemplateInfoVerify();
@@ -912,10 +925,15 @@ void TypeSwitchInfo::TypeSwitchInfoVerify() {
}
-void AllocationSiteInfo::AllocationSiteInfoVerify() {
- CHECK(IsAllocationSiteInfo());
- VerifyHeapPointer(payload());
- CHECK(payload()->IsObject());
+void AllocationSite::AllocationSiteVerify() {
+ CHECK(IsAllocationSite());
+}
+
+
+void AllocationMemento::AllocationMementoVerify() {
+ CHECK(IsAllocationMemento());
+ VerifyHeapPointer(allocation_site());
+ CHECK(!IsValid() || GetAllocationSite()->IsAllocationSite());
}
@@ -1070,6 +1088,7 @@ void JSObject::SpillInformation::Clear() {
number_of_slow_unused_elements_ = 0;
}
+
void JSObject::SpillInformation::Print() {
PrintF("\n JSObject Spill Statistics (#%d):\n", number_of_objects_);
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index fe054dad4b..c12a12a6bc 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -221,9 +221,9 @@ bool Object::IsSpecFunction() {
bool Object::IsInternalizedString() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
- STATIC_ASSERT(kInternalizedTag != 0);
- return (type & (kIsNotStringMask | kIsInternalizedMask)) ==
- (kInternalizedTag | kStringTag);
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
}
@@ -319,9 +319,9 @@ StringShape::StringShape(InstanceType t)
bool StringShape::IsInternalized() {
ASSERT(valid());
- STATIC_ASSERT(kInternalizedTag != 0);
- return (type_ & (kIsNotStringMask | kIsInternalizedMask)) ==
- (kInternalizedTag | kStringTag);
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
}
@@ -567,12 +567,18 @@ TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+bool Object::IsJSWeakCollection() {
+ return IsJSWeakMap() || IsJSWeakSet();
+}
+
+
bool Object::IsDescriptorArray() {
return IsFixedArray();
}
@@ -1311,7 +1317,7 @@ bool JSObject::ShouldTrackAllocationInfo() {
return true;
}
- return AllocationSiteInfo::GetMode(GetElementsKind()) ==
+ return AllocationSite::GetMode(GetElementsKind()) ==
TRACK_ALLOCATION_SITE;
}
return false;
@@ -1320,7 +1326,7 @@ bool JSObject::ShouldTrackAllocationInfo() {
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
-AllocationSiteMode AllocationSiteInfo::GetMode(
+AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
if (FLAG_track_allocation_sites &&
IsFastSmiElementsKind(boilerplate_elements_kind)) {
@@ -1331,11 +1337,11 @@ AllocationSiteMode AllocationSiteInfo::GetMode(
}
-AllocationSiteMode AllocationSiteInfo::GetMode(ElementsKind from,
- ElementsKind to) {
+AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
+ ElementsKind to) {
if (FLAG_track_allocation_sites &&
IsFastSmiElementsKind(from) &&
- (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) {
+ IsMoreGeneralElementsKindTransition(from, to)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1688,6 +1694,8 @@ int JSObject::GetHeaderSize() {
return JSMap::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
+ case JS_WEAK_SET_TYPE:
+ return JSWeakSet::kSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -2569,6 +2577,7 @@ CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSMap)
CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FreeSpace)
@@ -3617,6 +3626,11 @@ bool Map::is_frozen() {
}
+bool Map::has_code_cache() {
+ return code_cache() != GetIsolate()->heap()->empty_fixed_array();
+}
+
+
bool Map::CanBeDeprecated() {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
@@ -3745,7 +3759,8 @@ InlineCacheState Code::ic_state() {
Code::ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
+ || ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
@@ -3787,6 +3802,7 @@ int Code::major_key() {
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == COMPARE_NIL_IC ||
+ kind() == STORE_IC ||
kind() == LOAD_IC ||
kind() == KEYED_LOAD_IC ||
kind() == TO_BOOLEAN_IC);
@@ -4447,7 +4463,9 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
-ACCESSORS(AllocationSiteInfo, payload, Object, kPayloadOffset)
+ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
+ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
+ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
@@ -4538,9 +4556,7 @@ SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
-SMI_ACCESSORS(SharedFunctionInfo,
- stress_deopt_counter,
- kStressDeoptCounterOffset)
+
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
@@ -4590,9 +4606,7 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, opt_count, kOptCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- stress_deopt_counter,
- kStressDeoptCounterOffset)
+
#endif
@@ -5087,8 +5101,8 @@ void JSProxy::InitializeBody(int object_size, Object* value) {
ACCESSORS(JSSet, table, Object, kTableOffset)
ACCESSORS(JSMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
+ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
Address Foreign::foreign_address() {
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index f1616da1aa..91b1c2ec43 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -40,6 +40,11 @@ namespace internal {
static const char* TypeToString(InstanceType type);
+void MaybeObject::Print() {
+ Print(stdout);
+}
+
+
void MaybeObject::Print(FILE* out) {
Object* this_as_object;
if (ToObject(&this_as_object)) {
@@ -55,6 +60,11 @@ void MaybeObject::Print(FILE* out) {
}
+void MaybeObject::PrintLn() {
+ PrintLn(stdout);
+}
+
+
void MaybeObject::PrintLn(FILE* out) {
Print(out);
PrintF(out, "\n");
@@ -173,6 +183,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapPrint(out);
break;
+ case JS_WEAK_SET_TYPE:
+ JSWeakSet::cast(this)->JSWeakSetPrint(out);
+ break;
case FOREIGN_TYPE:
Foreign::cast(this)->ForeignPrint(out);
break;
@@ -549,6 +562,7 @@ static const char* TypeToString(InstanceType type) {
case JS_ARRAY_TYPE: return "JS_ARRAY";
case JS_PROXY_TYPE: return "JS_PROXY";
case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
+ case JS_WEAK_SET_TYPE: return "JS_WEAK_SET";
case JS_REGEXP_TYPE: return "JS_REGEXP";
case JS_VALUE_TYPE: return "JS_VALUE";
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
@@ -760,6 +774,7 @@ static const char* const weekdays[] = {
"???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
};
+
void JSDate::JSDatePrint(FILE* out) {
HeapObject::PrintHeader(out, "JSDate");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -813,6 +828,15 @@ void JSWeakMap::JSWeakMapPrint(FILE* out) {
}
+void JSWeakSet::JSWeakSetPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSWeakSet");
+ PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - table = ");
+ table()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
void JSArrayBuffer::JSArrayBufferPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSArrayBuffer");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -1116,11 +1140,15 @@ void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
}
-void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AllocationSiteInfo");
- PrintF(out, " - payload: ");
- if (payload()->IsCell()) {
- Cell* cell = Cell::cast(payload());
+void AllocationSite::AllocationSitePrint(FILE* out) {
+ HeapObject::PrintHeader(out, "AllocationSite");
+ PrintF(out, " - weak_next: ");
+ weak_next()->ShortPrint(out);
+ PrintF(out, "\n");
+
+ PrintF(out, " - transition_info: ");
+ if (transition_info()->IsCell()) {
+ Cell* cell = Cell::cast(transition_info());
Object* cell_contents = cell->value();
if (cell_contents->IsSmi()) {
ElementsKind kind = static_cast<ElementsKind>(
@@ -1130,19 +1158,30 @@ void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) {
PrintF(out, "\n");
return;
}
- } else if (payload()->IsJSArray()) {
+ } else if (transition_info()->IsJSArray()) {
PrintF(out, "Array literal ");
- payload()->ShortPrint(out);
+ transition_info()->ShortPrint(out);
PrintF(out, "\n");
return;
}
- PrintF(out, "unknown payload ");
- payload()->ShortPrint(out);
+ PrintF(out, "unknown transition_info");
+ transition_info()->ShortPrint(out);
PrintF(out, "\n");
}
+void AllocationMemento::AllocationMementoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "AllocationMemento");
+ PrintF(out, " - allocation site: ");
+ if (IsValid()) {
+ GetAllocationSite()->Print();
+ } else {
+ PrintF(out, "<invalid>\n");
+ }
+}
+
+
void Script::ScriptPrint(FILE* out) {
HeapObject::PrintHeader(out, "Script");
PrintF(out, "\n - source: ");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index cfb7d4461f..9398d6dfea 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -89,6 +89,8 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
+ table_.Register(kVisitJSWeakSet, &JSObjectVisitor::Visit);
+
table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
table_.template RegisterSpecializations<DataObjectVisitor,
@@ -136,8 +138,8 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
map->GetHeap(),
HeapObject::RawField(object,
JSTypedArray::kWeakNextOffset + kPointerSize),
- HeapObject::RawField(object, JSTypedArray::kSize));
- return JSTypedArray::kSize;
+ HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+ return JSTypedArray::kSizeWithInternalFields;
}
@@ -152,8 +154,8 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(
map->GetHeap(),
HeapObject::RawField(object,
JSDataView::kWeakNextOffset + kPointerSize),
- HeapObject::RawField(object, JSDataView::kSize));
- return JSDataView::kSize;
+ HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+ return JSDataView::kSizeWithInternalFields;
}
@@ -185,6 +187,11 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitNativeContext, &VisitNativeContext);
+ table_.Register(kVisitAllocationSite,
+ &FixedBodyVisitor<StaticVisitor,
+ AllocationSite::BodyDescriptor,
+ void>::Visit);
+
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
@@ -193,7 +200,9 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
- table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitJSWeakMap);
+ table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitWeakCollection);
+
+ table_.Register(kVisitJSWeakSet, &StaticVisitor::VisitWeakCollection);
table_.Register(kVisitOddball,
&FixedBodyVisitor<StaticVisitor,
@@ -522,7 +531,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
map->GetHeap(),
HeapObject::RawField(object,
JSTypedArray::kWeakNextOffset + kPointerSize),
- HeapObject::RawField(object, JSTypedArray::kSize));
+ HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
}
@@ -537,7 +546,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(
map->GetHeap(),
HeapObject::RawField(object,
JSDataView::kWeakNextOffset + kPointerSize),
- HeapObject::RawField(object, JSDataView::kSize));
+ HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
}
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 6502209798..cd46013398 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -110,6 +110,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_WEAK_MAP_TYPE:
return kVisitJSWeakMap;
+ case JS_WEAK_SET_TYPE:
+ return kVisitJSWeakSet;
+
case JS_REGEXP_TYPE:
return kVisitJSRegExp;
@@ -182,6 +185,10 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ if (instance_type == ALLOCATION_SITE_TYPE) {
+ return kVisitAllocationSite;
+ }
+
return GetVisitorIdForSize(kVisitStruct,
kVisitStructGeneric,
instance_size);
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index c2ab45df1d..32e457b869 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -55,6 +55,7 @@ class StaticVisitorBase : public AllStatic {
V(FixedArray) \
V(FixedDoubleArray) \
V(NativeContext) \
+ V(AllocationSite) \
V(DataObject2) \
V(DataObject3) \
V(DataObject4) \
@@ -93,6 +94,7 @@ class StaticVisitorBase : public AllStatic {
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakMap) \
+ V(JSWeakSet) \
V(JSArrayBuffer) \
V(JSTypedArray) \
V(JSDataView) \
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index aa678765dd..1967b1324d 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -551,7 +551,9 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
Heap* heap = name->GetHeap();
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ Isolate* isolate = heap->isolate();
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return heap->undefined_value();
}
@@ -630,12 +632,23 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
}
-Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
+Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetNormalizedProperty(result, *value),
+ Object);
+}
+
+
+MaybeObject* JSObject::SetNormalizedProperty(LookupResult* result,
+ Object* value) {
ASSERT(!HasFastProperties());
if (IsGlobalObject()) {
PropertyCell* cell = PropertyCell::cast(
property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- cell->set_value(value);
+ MaybeObject* maybe_type = cell->SetValueInferType(value);
+ if (maybe_type->IsFailure()) return maybe_type;
} else {
property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
}
@@ -691,7 +704,8 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name,
if (IsGlobalObject()) {
PropertyCell* cell =
PropertyCell::cast(property_dictionary()->ValueAt(entry));
- cell->set_value(value);
+ MaybeObject* maybe_type = cell->SetValueInferType(value);
+ if (maybe_type->IsFailure()) return maybe_type;
// Please note we have to update the property details.
property_dictionary()->DetailsAtPut(entry, details);
} else {
@@ -701,44 +715,54 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name,
}
-MaybeObject* JSObject::DeleteNormalizedProperty(Name* name, DeleteMode mode) {
- ASSERT(!HasFastProperties());
- NameDictionary* dictionary = property_dictionary();
- int entry = dictionary->FindEntry(name);
+// TODO(mstarzinger): Temporary wrapper until target is handlified.
+Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict,
+ Handle<Name> name) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(), dict->Shrink(*name), NameDictionary);
+}
+
+
+static void CellSetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION_VOID(cell->GetIsolate(), cell->SetValueInferType(*value));
+}
+
+
+Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dictionary(object->property_dictionary());
+ int entry = dictionary->FindEntry(*name);
if (entry != NameDictionary::kNotFound) {
// If we have a global object set the cell to the hole.
- if (IsGlobalObject()) {
+ if (object->IsGlobalObject()) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsDontDelete()) {
- if (mode != FORCE_DELETION) return GetHeap()->false_value();
+ if (mode != FORCE_DELETION) return isolate->factory()->false_value();
// When forced to delete global properties, we have to make a
// map change to invalidate any ICs that think they can load
// from the DontDelete cell without checking if it contains
// the hole value.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
+ Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
ASSERT(new_map->is_dictionary_map());
- set_map(new_map);
+ object->set_map(*new_map);
}
- PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(cell->GetHeap()->the_hole_value());
+ Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
+ CellSetValueInferType(cell, isolate->factory()->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
- Object* deleted = dictionary->DeleteProperty(entry, mode);
- if (deleted == GetHeap()->true_value()) {
- FixedArray* new_properties = NULL;
- MaybeObject* maybe_properties = dictionary->Shrink(name);
- if (!maybe_properties->To(&new_properties)) {
- return maybe_properties;
- }
- set_properties(new_properties);
+ Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate);
+ if (*deleted == isolate->heap()->true_value()) {
+ Handle<NameDictionary> new_properties =
+ NameDictionaryShrink(dictionary, name);
+ object->set_properties(*new_properties);
}
return deleted;
}
}
- return GetHeap()->true_value();
+ return isolate->factory()->true_value();
}
@@ -911,6 +935,7 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
Isolate* isolate = heap->isolate();
if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return heap->undefined_value();
}
}
@@ -1318,6 +1343,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS WeakMap>");
break;
}
+ case JS_WEAK_SET_TYPE: {
+ accumulator->Add("<JS WeakSet>");
+ break;
+ }
case JS_REGEXP_TYPE: {
accumulator->Add("<JS RegExp>");
break;
@@ -1628,6 +1657,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
@@ -1689,7 +1719,11 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- StructBodyDescriptor::IterateBody(this, object_size, v);
+ if (type == ALLOCATION_SITE_TYPE) {
+ AllocationSite::BodyDescriptor::IterateBody(this, v);
+ } else {
+ StructBodyDescriptor::IterateBody(this, object_size, v);
+ }
break;
default:
PrintF("Unknown type: %d\n", type);
@@ -1930,7 +1964,9 @@ MaybeObject* JSObject::AddSlowProperty(Name* name,
int entry = dict->FindEntry(name);
if (entry != NameDictionary::kNotFound) {
store_value = dict->ValueAt(entry);
- PropertyCell::cast(store_value)->set_value(value);
+ MaybeObject* maybe_type =
+ PropertyCell::cast(store_value)->SetValueInferType(value);
+ if (maybe_type->IsFailure()) return maybe_type;
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
@@ -1944,7 +1980,9 @@ MaybeObject* JSObject::AddSlowProperty(Name* name,
heap->AllocatePropertyCell(value);
if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
- PropertyCell::cast(store_value)->set_value(value);
+ MaybeObject* maybe_type =
+ PropertyCell::cast(store_value)->SetValueInferType(value);
+ if (maybe_type->IsFailure()) return maybe_type;
}
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
Object* result;
@@ -3225,7 +3263,6 @@ void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- // A GlobalProxy's prototype should always be a proper JSObject.
return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
}
@@ -3347,6 +3384,7 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
HandleScope scope(isolate);
Handle<Object> value_handle(value, isolate);
isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
@@ -3510,43 +3548,38 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
}
-MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
- Name* name_raw, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> receiver(this);
- Handle<Object> name(name_raw, isolate);
+Handle<Object> JSProxy::DeletePropertyWithHandler(
+ Handle<JSProxy> object, Handle<Name> name, DeleteMode mode) {
+ Isolate* isolate = object->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return isolate->heap()->false_value();
+ if (name->IsSymbol()) return isolate->factory()->false_value();
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "delete", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ Handle<Object> result = object->CallTrap(
+ "delete", Handle<Object>(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Handle<Object>();
bool result_bool = result->BooleanValue();
if (mode == STRICT_DELETION && !result_bool) {
- Handle<Object> handler(receiver->handler(), isolate);
+ Handle<Object> handler(object->handler(), isolate);
Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("delete"));
Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
- return Failure::Exception();
+ return Handle<Object>();
}
- return isolate->heap()->ToBoolean(result_bool);
+ return isolate->factory()->ToBoolean(result_bool);
}
-MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSProxy::DeleteElementWithHandler(
+ Handle<JSProxy> object, uint32_t index, DeleteMode mode) {
+ Isolate* isolate = object->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(*name, mode);
+ return JSProxy::DeletePropertyWithHandler(object, name, mode);
}
@@ -3579,17 +3612,24 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
// Convert result to PropertyAttributes.
Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("enumerable"));
+ STATIC_ASCII_VECTOR("enumerable_"));
Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n));
if (isolate->has_pending_exception()) return NONE;
Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("configurable"));
+ STATIC_ASCII_VECTOR("configurable_"));
Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n));
if (isolate->has_pending_exception()) return NONE;
Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("writable"));
+ STATIC_ASCII_VECTOR("writable_"));
Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n));
if (isolate->has_pending_exception()) return NONE;
+ if (!writable->BooleanValue()) {
+ Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("set_"));
+ Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_n));
+ if (isolate->has_pending_exception()) return NONE;
+ writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
+ }
if (configurable->IsFalse()) {
Handle<String> trap = isolate->factory()->InternalizeOneByteString(
@@ -3997,7 +4037,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
PropertyAttributes old_attributes = ABSENT;
bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- if (is_observed) {
+ if (is_observed && lookup.IsProperty()) {
if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
old_attributes = lookup.GetAttributes();
}
@@ -4947,52 +4987,52 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
}
-MaybeObject* JSObject::DeletePropertyPostInterceptor(Name* name,
- DeleteMode mode) {
+Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) return GetHeap()->true_value();
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookupRealNamedProperty(*name, &result);
+ if (!result.IsFound()) return isolate->factory()->true_value();
// Normalize object if needed.
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
- return DeleteNormalizedProperty(name, mode);
+ return DeleteNormalizedProperty(object, name, mode);
}
-MaybeObject* JSObject::DeletePropertyWithInterceptor(Name* name) {
+Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
+ Handle<Name> name) {
+ Isolate* isolate = object->GetIsolate();
+
// TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return GetHeap()->false_value();
+ if (name->IsSymbol()) return isolate->factory()->false_value();
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<String> name_handle(String::cast(name));
- Handle<JSObject> this_handle(this);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->deleter()->IsUndefined()) {
v8::NamedPropertyDeleter deleter =
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ ApiNamedPropertyAccess("interceptor-named-delete", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
v8::Handle<v8::Boolean> result =
- args.Call(deleter, v8::Utils::ToLocal(name_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
}
}
- MaybeObject* raw_result =
- this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> result =
+ DeletePropertyPostInterceptor(object, name, NORMAL_DELETION);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return result;
}
@@ -5029,9 +5069,10 @@ MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
- uint32_t index) {
+ uint32_t index,
+ DeleteMode mode) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+ obj->DeleteElement(index, mode),
Object);
}
@@ -5042,6 +5083,7 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
if (IsAccessCheckNeeded() &&
!isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
}
@@ -5102,107 +5144,99 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
}
-Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj,
- Handle<Name> prop) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
-MaybeObject* JSObject::DeleteProperty(Name* name, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
+Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode) {
+ Isolate* isolate = object->GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
ASSERT(name->IsName());
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return isolate->heap()->false_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
+ if (object->IsJSGlobalProxy()) {
+ Object* proto = object->GetPrototype();
+ if (proto->IsNull()) return isolate->factory()->false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
+ return JSGlobalObject::DeleteProperty(
+ handle(JSGlobalObject::cast(proto)), name, mode);
}
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- return DeleteElement(index, mode);
+ return DeleteElement(object, index, mode);
}
LookupResult lookup(isolate);
- LocalLookup(name, &lookup, true);
- if (!lookup.IsFound()) return isolate->heap()->true_value();
+ object->LocalLookup(*name, &lookup, true);
+ if (!lookup.IsFound()) return isolate->factory()->true_value();
// Ignore attributes if forcing a deletion.
if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name, isolate),
- Handle<Object>(this, isolate) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
+ Handle<Object> args[2] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return isolate->heap()->false_value();
+ return isolate->factory()->false_value();
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> hname(name);
-
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
if (is_observed && lookup.IsDataProperty()) {
- old_value = Object::GetProperty(self, hname);
+ old_value = Object::GetProperty(object, name);
}
- MaybeObject* result;
+ Handle<Object> result;
// Check for interceptor.
if (lookup.IsInterceptor()) {
// Skip interceptor if forcing a deletion.
if (mode == FORCE_DELETION) {
- result = self->DeletePropertyPostInterceptor(*hname, mode);
+ result = DeletePropertyPostInterceptor(object, name, mode);
} else {
- result = self->DeletePropertyWithInterceptor(*hname);
+ result = DeletePropertyWithInterceptor(object, name);
}
} else {
// Normalize object if needed.
- Object* obj;
- result = self->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!result->To(&obj)) return result;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// Make sure the properties are normalized before removing the entry.
- result = self->DeleteNormalizedProperty(*hname, mode);
+ result = DeleteNormalizedProperty(object, name, mode);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed && !self->HasLocalProperty(*hname)) {
- EnqueueChangeRecord(self, "deleted", hname, old_value);
+ if (is_observed && !object->HasLocalProperty(*name)) {
+ EnqueueChangeRecord(object, "deleted", name, old_value);
}
- return *hresult;
+ return result;
}
-MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+Handle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
+ uint32_t index,
+ DeleteMode mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::DeleteElementWithHandler(
+ Handle<JSProxy>::cast(object), index, mode);
}
- return JSObject::cast(this)->DeleteElement(index, mode);
+ return JSObject::DeleteElement(Handle<JSObject>::cast(object), index, mode);
}
-MaybeObject* JSReceiver::DeleteProperty(Name* name, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+Handle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ DeleteMode mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::DeletePropertyWithHandler(
+ Handle<JSProxy>::cast(object), name, mode);
}
- return JSObject::cast(this)->DeleteProperty(name, mode);
+ return JSObject::DeleteProperty(Handle<JSObject>::cast(object), name, mode);
}
@@ -5349,6 +5383,7 @@ MaybeObject* JSObject::PreventExtensions() {
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
}
@@ -5427,6 +5462,7 @@ MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) {
heap->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return heap->false_value();
}
@@ -5532,6 +5568,40 @@ MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) {
}
+MUST_USE_RESULT MaybeObject* JSObject::SetObserved(Isolate* isolate) {
+ if (map()->is_observed())
+ return isolate->heap()->undefined_value();
+
+ Heap* heap = isolate->heap();
+
+ if (!HasExternalArrayElements()) {
+ // Go to dictionary mode, so that we don't skip map checks.
+ MaybeObject* maybe = NormalizeElements();
+ if (maybe->IsFailure()) return maybe;
+ ASSERT(!HasFastElements());
+ }
+
+ LookupResult result(isolate);
+ map()->LookupTransition(this, heap->observed_symbol(), &result);
+
+ Map* new_map;
+ if (result.IsTransition()) {
+ new_map = result.GetTransitionTarget();
+ ASSERT(new_map->is_observed());
+ } else if (map()->CanHaveMoreTransitions()) {
+ MaybeObject* maybe_new_map = map()->CopyForObserved();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ } else {
+ MaybeObject* maybe_copy = map()->Copy();
+ if (!maybe_copy->To(&new_map)) return maybe_copy;
+ new_map->set_is_observed(true);
+ }
+ set_map(new_map);
+
+ return heap->undefined_value();
+}
+
+
MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
StackLimitCheck check(isolate);
if (check.HasOverflowed()) return isolate->StackOverflow();
@@ -6240,6 +6310,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
if (IsAccessCheckNeeded() &&
!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
@@ -6315,7 +6386,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
}
-Object* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
+MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
@@ -6326,6 +6397,7 @@ Object* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
if (IsAccessCheckNeeded() &&
!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ RETURN_IF_SCHEDULED_EXCEPTION(heap->isolate());
return heap->undefined_value();
}
@@ -6446,6 +6518,11 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
}
+Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->CopyDropDescriptors(), Map);
+}
+
+
MaybeObject* Map::CopyDropDescriptors() {
Map* result;
MaybeObject* maybe_result = RawCopy(instance_size());
@@ -6653,6 +6730,39 @@ MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
}
+MaybeObject* Map::CopyForObserved() {
+ ASSERT(!is_observed());
+
+ // In case the map owned its own descriptors, share the descriptors and
+ // transfer ownership to the new map.
+ Map* new_map;
+ MaybeObject* maybe_new_map;
+ if (owns_descriptors()) {
+ maybe_new_map = CopyDropDescriptors();
+ } else {
+ maybe_new_map = Copy();
+ }
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions = AddTransition(GetHeap()->observed_symbol(),
+ new_map,
+ FULL_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ set_transitions(transitions);
+
+ new_map->set_is_observed(true);
+
+ if (owns_descriptors()) {
+ new_map->InitializeDescriptors(instance_descriptors());
+ set_owns_descriptors(false);
+ }
+
+ new_map->SetBackPointer(this);
+ return new_map;
+}
+
+
MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
@@ -8822,24 +8932,24 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
}
-AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) {
- // Currently, AllocationSiteInfo objects are only allocated immediately
+AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
+ // Currently, AllocationMemento objects are only allocated immediately
// after JSArrays in NewSpace, and detecting whether a JSArray has one
// involves carefully checking the object immediately after the JSArray
- // (if there is one) to see if it's an AllocationSiteInfo.
+ // (if there is one) to see if it's an AllocationMemento.
if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
object->Size();
- if ((ptr_end + AllocationSiteInfo::kSize) <=
+ if ((ptr_end + AllocationMemento::kSize) <=
object->GetHeap()->NewSpaceTop()) {
// There is room in newspace for allocation info. Do we have some?
- Map** possible_allocation_site_info_map =
+ Map** possible_allocation_memento_map =
reinterpret_cast<Map**>(ptr_end);
- if (*possible_allocation_site_info_map ==
- object->GetHeap()->allocation_site_info_map()) {
- AllocationSiteInfo* info = AllocationSiteInfo::cast(
+ if (*possible_allocation_memento_map ==
+ object->GetHeap()->allocation_memento_map()) {
+ AllocationMemento* memento = AllocationMemento::cast(
reinterpret_cast<Object*>(ptr_end + 1));
- return info;
+ return memento;
}
}
}
@@ -8847,21 +8957,6 @@ AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) {
}
-bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) {
- ASSERT(kind != NULL);
- if (payload()->IsCell()) {
- Cell* cell = Cell::cast(payload());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- *kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- return true;
- }
- }
- return false;
-}
-
-
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -9157,7 +9252,6 @@ void JSFunction::MarkForParallelRecompilation() {
void JSFunction::MarkForInstallingRecompiledCode() {
// The debugger could have switched the builtin to lazy compile.
// In that case, simply carry on. It will be dealt with later.
- ASSERT(IsInRecompileQueue() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(FLAG_parallel_recompilation);
@@ -9450,57 +9544,53 @@ Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
}
-MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
+void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
+ Handle<Object> value) {
ASSERT(value->IsJSReceiver());
- Heap* heap = GetHeap();
// First some logic for the map of the prototype to make sure it is in fast
// mode.
if (value->IsJSObject()) {
- MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
- if (ok->IsFailure()) return ok;
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
// Now some logic for the maps of the objects that are created by using this
// function as a constructor.
- if (has_initial_map()) {
+ if (function->has_initial_map()) {
// If the function has allocated the initial map replace it with a
// copy containing the new prototype. Also complete any in-object
// slack tracking that is in progress at this point because it is
// still tracking the old copy.
- if (shared()->IsInobjectSlackTrackingInProgress()) {
- shared()->CompleteInobjectSlackTracking();
+ if (function->shared()->IsInobjectSlackTrackingInProgress()) {
+ function->shared()->CompleteInobjectSlackTracking();
}
- Map* new_map;
- MaybeObject* maybe_object = initial_map()->Copy();
- if (!maybe_object->To(&new_map)) return maybe_object;
- new_map->set_prototype(value);
+ Handle<Map> new_map = Map::Copy(handle(function->initial_map()));
+ new_map->set_prototype(*value);
// If the function is used as the global Array function, cache the
// initial map (and transitioned versions) in the native context.
- Context* native_context = context()->native_context();
+ Context* native_context = function->context()->native_context();
Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
if (array_function->IsJSFunction() &&
- this == JSFunction::cast(array_function)) {
- MaybeObject* ok = CacheInitialJSArrayMaps(native_context, new_map);
- if (ok->IsFailure()) return ok;
+ *function == JSFunction::cast(array_function)) {
+ CacheInitialJSArrayMaps(handle(native_context), new_map);
}
- set_initial_map(new_map);
+ function->set_initial_map(*new_map);
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
// prototype is put into the initial map where it belongs.
- set_prototype_or_initial_map(value);
+ function->set_prototype_or_initial_map(*value);
}
- heap->ClearInstanceofCache();
- return value;
+ function->GetHeap()->ClearInstanceofCache();
}
-MaybeObject* JSFunction::SetPrototype(Object* value) {
- ASSERT(should_have_prototype());
- Object* construct_prototype = value;
+void JSFunction::SetPrototype(Handle<JSFunction> function,
+ Handle<Object> value) {
+ ASSERT(function->should_have_prototype());
+ Handle<Object> construct_prototype = value;
// If the value is not a JSReceiver, store the value in the map's
// constructor field so it can be accessed. Also, set the prototype
@@ -9510,22 +9600,20 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->Copy();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ Handle<Map> new_map = Map::Copy(handle(function->map()));
- Heap* heap = new_map->GetHeap();
- set_map(new_map);
- new_map->set_constructor(value);
+ function->set_map(*new_map);
+ new_map->set_constructor(*value);
new_map->set_non_instance_prototype(true);
- construct_prototype =
- heap->isolate()->context()->native_context()->
- initial_object_prototype();
+ Isolate* isolate = new_map->GetIsolate();
+ construct_prototype = handle(
+ isolate->context()->native_context()->initial_object_prototype(),
+ isolate);
} else {
- map()->set_non_instance_prototype(false);
+ function->map()->set_non_instance_prototype(false);
}
- return SetInstancePrototype(construct_prototype);
+ return SetInstancePrototype(function, construct_prototype);
}
@@ -9564,8 +9652,16 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
bool JSFunction::PassesHydrogenFilter() {
String* name = shared()->DebugName();
- if (*FLAG_hydrogen_filter != '\0') {
+ // The filter string is a pattern that matches functions in this way:
+ // "*" all; the default
+ // "-" all but the top-level function
+ // "-name" all but the function "name"
+ // "" only the top-level function
+ // "name" only the function "name"
+ // "name*" only functions starting with "name"
+ if (*FLAG_hydrogen_filter != '*') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+ if (filter.length() == 0) return name->length() == 0;
if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
if (filter[0] == '-' &&
!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
@@ -9961,28 +10057,18 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
+
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
VisitPointer(rinfo->target_object_address());
}
+
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
Address* p = rinfo->target_reference_address();
VisitExternalReferences(p, p + 1);
}
-byte Code::compare_nil_state() {
- ASSERT(is_compare_nil_ic_stub());
- return CompareNilICStub::ExtractTypesFromExtraICState(
- extended_extra_ic_state());
-}
-
-byte Code::compare_nil_value() {
- ASSERT(is_compare_nil_ic_stub());
- return CompareNilICStub::ExtractNilValueFromExtraICState(
- extended_extra_ic_state());
-}
-
void Code::InvalidateRelocation() {
set_relocation_info(GetHeap()->empty_byte_array());
@@ -10248,7 +10334,11 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
Cell* cell = type_feedback_cells->GetCell(i);
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ // Don't clear AllocationSites
+ Object* value = cell->value();
+ if (value == NULL || !value->IsAllocationSite()) {
+ cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ }
}
}
}
@@ -10610,6 +10700,7 @@ const char* Code::StubType2String(StubType type) {
void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
+ PrintF(out, "extra_ic_state = ");
const char* name = NULL;
switch (kind) {
case CALL_IC:
@@ -10627,9 +10718,9 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
break;
}
if (name != NULL) {
- PrintF(out, "extra_ic_state = %s\n", name);
+ PrintF(out, "%s\n", name);
} else {
- PrintF(out, "extra_ic_state = %d\n", extra);
+ PrintF(out, "%d\n", extra);
}
}
@@ -10638,7 +10729,8 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "kind = %s\n", Kind2String(kind()));
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), extra_ic_state());
+ PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
+ extended_extra_ic_state() : extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
@@ -10712,7 +10804,8 @@ void Code::Disassemble(const char* name, FILE* out) {
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t ast_id = Memory::uint32_at(back_edge_cursor);
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
- uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
+ uint32_t loop_depth = Memory::uint32_at(back_edge_cursor +
+ 2 * kIntSize);
PrintF(out, "%6u %9u %10u\n", ast_id, pc_offset, loop_depth);
back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
@@ -11711,7 +11804,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
? FAST_HOLEY_DOUBLE_ELEMENTS
: FAST_DOUBLE_ELEMENTS;
- MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
+ MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
if (maybe_failure->IsFailure()) return maybe_failure;
MaybeObject* maybe =
@@ -11728,7 +11821,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
- MaybeObject* maybe_failure = UpdateAllocationSiteInfo(kind);
+ MaybeObject* maybe_failure = UpdateAllocationSite(kind);
if (maybe_failure->IsFailure()) return maybe_failure;
MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
@@ -12068,6 +12161,7 @@ MaybeObject* JSObject::SetElement(uint32_t index,
if (IsAccessCheckNeeded()) {
if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value_raw;
}
}
@@ -12291,50 +12385,55 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
}
-MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) {
+MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
if (!FLAG_track_allocation_sites || !IsJSArray()) {
return this;
}
- AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(this);
- if (info == NULL) {
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
+ if (memento == NULL || !memento->IsValid()) {
return this;
}
- if (info->payload()->IsJSArray()) {
- JSArray* payload = JSArray::cast(info->payload());
- ElementsKind kind = payload->GetElementsKind();
- if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ // Walk through to the Allocation Site
+ AllocationSite* site = memento->GetAllocationSite();
+ if (site->IsLiteralSite()) {
+ JSArray* transition_info = JSArray::cast(site->transition_info());
+ ElementsKind kind = transition_info->GetElementsKind();
+ // if kind is holey ensure that to_kind is as well.
+ if (IsHoleyElementsKind(kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
- CHECK(payload->length()->ToArrayIndex(&length));
- if (length <= AllocationSiteInfo::kMaximumArrayBytesToPretransition) {
+ CHECK(transition_info->length()->ToArrayIndex(&length));
+ if (length <= AllocationSite::kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
PrintF(
- "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n",
+ "AllocationSite: JSArray %p boilerplate updated %s->%s\n",
reinterpret_cast<void*>(this),
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- return payload->TransitionElementsKind(to_kind);
+ return transition_info->TransitionElementsKind(to_kind);
}
}
- } else if (info->payload()->IsCell()) {
- Cell* cell = Cell::cast(info->payload());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSiteInfo: JSArray %p info updated %s->%s\n",
- reinterpret_cast<void*>(this),
- ElementsKindToString(kind),
- ElementsKindToString(to_kind));
- }
- cell->set_value(Smi::FromInt(to_kind));
+ } else {
+ ElementsKind kind = site->GetElementsKind();
+ // if kind is holey ensure that to_kind is as well.
+ if (IsHoleyElementsKind(kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
}
+ site->set_transition_info(Smi::FromInt(to_kind));
}
}
return this;
@@ -12351,7 +12450,7 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
if (from_kind == to_kind) return this;
- MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
+ MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
if (maybe_failure->IsFailure()) return maybe_failure;
Isolate* isolate = GetIsolate();
@@ -12812,6 +12911,13 @@ bool JSObject::HasRealElementProperty(Isolate* isolate, uint32_t index) {
}
}
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return false;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->HasRealElementProperty(isolate, index);
+ }
+
return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT;
}
@@ -13299,6 +13405,7 @@ class RegExpKey : public HashTableKey {
Smi* flags_;
};
+
// Utf8StringKey carries a vector of chars as key.
class Utf8StringKey : public HashTableKey {
public:
@@ -13665,6 +13772,7 @@ uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
return entry;
}
+
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
@@ -14027,6 +14135,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
return result_double;
}
+
ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
case EXTERNAL_BYTE_ARRAY_TYPE:
@@ -14241,39 +14350,36 @@ PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
}
-Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject> global,
- Handle<Name> name) {
- Isolate* isolate = global->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- global->EnsurePropertyCell(*name),
- PropertyCell);
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
}
-MaybeObject* GlobalObject::EnsurePropertyCell(Name* name) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
+Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
+ Handle<GlobalObject> global,
+ Handle<Name> name) {
+ ASSERT(!global->HasFastProperties());
+ int entry = global->property_dictionary()->FindEntry(*name);
if (entry == NameDictionary::kNotFound) {
- Heap* heap = GetHeap();
- Object* cell;
- { MaybeObject* maybe_cell =
- heap->AllocatePropertyCell(heap->the_hole_value());
- if (!maybe_cell->ToObject(&cell)) return maybe_cell;
- }
+ Isolate* isolate = global->GetIsolate();
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(
+ isolate->factory()->the_hole_value());
PropertyDetails details(NONE, NORMAL, 0);
details = details.AsDeleted();
- Object* dictionary;
- { MaybeObject* maybe_dictionary =
- property_dictionary()->Add(name, cell, details);
- if (!maybe_dictionary->ToObject(&dictionary)) return maybe_dictionary;
- }
- set_properties(NameDictionary::cast(dictionary));
+ Handle<NameDictionary> dictionary = NameDictionaryAdd(
+ handle(global->property_dictionary()), name, cell, details);
+ global->set_properties(*dictionary);
return cell;
} else {
- Object* value = property_dictionary()->ValueAt(entry);
+ Object* value = global->property_dictionary()->ValueAt(entry);
ASSERT(value->IsPropertyCell());
- return value;
+ return handle(PropertyCell::cast(value));
}
}
@@ -14406,6 +14512,7 @@ MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str,
return LookupKey(&key, s);
}
+
MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
int entry = FindEntry(key);
@@ -15802,10 +15909,52 @@ Type* PropertyCell::type() {
void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
+ ASSERT(IsPropertyCell());
set_type_raw(type, ignored);
}
+Type* PropertyCell::UpdateType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ Isolate* isolate = cell->GetIsolate();
+ Handle<Type> old_type(cell->type(), isolate);
+ Handle<Type> new_type((value->IsSmi() || value->IsJSFunction() ||
+ value->IsUndefined())
+ ? Type::Constant(value, isolate)
+ : Type::Any(), isolate);
+
+ if (new_type->Is(old_type)) {
+ return *old_type;
+ }
+
+ cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+
+ if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) {
+ return *new_type;
+ }
+
+ return Type::Any();
+}
+
+
+MaybeObject* PropertyCell::SetValueInferType(Object* value,
+ WriteBarrierMode ignored) {
+ set_value(value, ignored);
+ if (!Type::Any()->Is(type())) {
+ IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
+ MaybeObject* maybe_type = trampoline.CallWithReturnValue(
+ &PropertyCell::UpdateType,
+ Handle<PropertyCell>(this),
+ Handle<Object>(value, GetIsolate()));
+ Type* new_type = NULL;
+ if (!maybe_type->To(&new_type)) return maybe_type;
+ set_type(new_type);
+ }
+ return value;
+}
+
+
void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
Handle<DependentCode> dep(dependent_code());
Handle<DependentCode> codes =
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 416ed7fcda..f197b238ff 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -60,11 +60,13 @@
// - JSArray
// - JSArrayBuffer
// - JSArrayBufferView
-// - JSTypedArray
-// - JSDataView
+// - JSTypedArray
+// - JSDataView
// - JSSet
// - JSMap
-// - JSWeakMap
+// - JSWeakCollection
+// - JSWeakMap
+// - JSWeakSet
// - JSRegExp
// - JSFunction
// - JSGeneratorObject
@@ -386,7 +388,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
- V(ALLOCATION_SITE_INFO_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ALLOCATION_SITE_TYPE) \
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
V(POLYMORPHIC_CODE_CACHE_TYPE) \
@@ -414,6 +417,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_DATA_VIEW_TYPE) \
V(JS_PROXY_TYPE) \
V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
V(JS_REGEXP_TYPE) \
\
V(JS_FUNCTION_TYPE) \
@@ -550,7 +554,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
- V(ALLOCATION_SITE_INFO, AllocationSiteInfo, allocation_site_info) \
+ V(ALLOCATION_SITE, AllocationSite, allocation_site) \
+ V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
V(CODE_CACHE, CodeCache, code_cache) \
V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
@@ -577,9 +582,9 @@ const uint32_t kNotStringTag = 0x80;
// Bit 6 indicates that the object is an internalized string (if set) or not.
// Bit 7 has to be clear as well.
-const uint32_t kIsInternalizedMask = 0x40;
-const uint32_t kNotInternalizedTag = 0x0;
-const uint32_t kInternalizedTag = 0x40;
+const uint32_t kIsNotInternalizedMask = 0x40;
+const uint32_t kNotInternalizedTag = 0x40;
+const uint32_t kInternalizedTag = 0x0;
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
@@ -628,45 +633,62 @@ const uint32_t kShortExternalStringTag = 0x10;
// See heap.cc and mark-compact.cc.
const uint32_t kShortcutTypeMask =
kIsNotStringMask |
- kIsInternalizedMask |
+ kIsNotInternalizedMask |
kStringRepresentationMask;
-const uint32_t kShortcutTypeTag = kConsStringTag;
+const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag;
enum InstanceType {
// String types.
- STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kOneByteStringTag | kSeqStringTag,
- CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE = kOneByteStringTag | kConsStringTag,
- SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
- SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag,
- EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
- EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag,
- EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_STRING_TYPE | kOneByteDataHintTag,
- SHORT_EXTERNAL_STRING_TYPE = EXTERNAL_STRING_TYPE | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_STRING_TYPE =
- EXTERNAL_ASCII_STRING_TYPE | kShortExternalStringTag,
- SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE | kShortExternalStringTag,
-
- INTERNALIZED_STRING_TYPE = STRING_TYPE | kInternalizedTag,
- ASCII_INTERNALIZED_STRING_TYPE = ASCII_STRING_TYPE | kInternalizedTag,
- CONS_INTERNALIZED_STRING_TYPE = CONS_STRING_TYPE | kInternalizedTag,
- CONS_ASCII_INTERNALIZED_STRING_TYPE =
- CONS_ASCII_STRING_TYPE | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_STRING_TYPE | kInternalizedTag,
- EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
- EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag,
+ INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag
+ | kInternalizedTag,
+ ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag
+ | kInternalizedTag,
+ CONS_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kConsStringTag
+ | kInternalizedTag,
+ CONS_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kConsStringTag
+ | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag
+ | kInternalizedTag,
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag
+ | kExternalStringTag | kInternalizedTag,
EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag
+ | kInternalizedTag,
SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE =
- SHORT_EXTERNAL_STRING_TYPE | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
+ | kInternalizedTag,
SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
- SHORT_EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag,
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
+ | kInternalizedTag,
SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
+ | kShortExternalStringTag | kInternalizedTag,
+
+ STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ ASCII_STRING_TYPE = ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ CONS_STRING_TYPE = CONS_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ CONS_ASCII_STRING_TYPE =
+ CONS_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+
+ SLICED_STRING_TYPE =
+ kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
+ SLICED_ASCII_STRING_TYPE =
+ kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag,
+ EXTERNAL_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_ASCII_STRING_TYPE =
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
+ | kNotInternalizedTag,
+ SHORT_EXTERNAL_STRING_TYPE =
+ SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ SHORT_EXTERNAL_ASCII_STRING_TYPE =
+ SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
+ | kNotInternalizedTag,
// Non-string names
SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE
@@ -709,7 +731,8 @@ enum InstanceType {
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
- ALLOCATION_SITE_INFO_TYPE,
+ ALLOCATION_SITE_TYPE,
+ ALLOCATION_MEMENTO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
POLYMORPHIC_CODE_CACHE_TYPE,
@@ -751,6 +774,7 @@ enum InstanceType {
JS_SET_TYPE,
JS_MAP_TYPE,
JS_WEAK_MAP_TYPE,
+ JS_WEAK_SET_TYPE,
JS_REGEXP_TYPE,
@@ -919,13 +943,9 @@ class MaybeObject BASE_EMBEDDED {
#ifdef OBJECT_PRINT
// Prints this object with details.
- inline void Print() {
- Print(stdout);
- }
- inline void PrintLn() {
- PrintLn(stdout);
- }
+ void Print();
void Print(FILE* out);
+ void PrintLn();
void PrintLn(FILE* out);
#endif
#ifdef VERIFY_HEAP
@@ -1004,7 +1024,9 @@ class MaybeObject BASE_EMBEDDED {
V(JSFunctionProxy) \
V(JSSet) \
V(JSMap) \
+ V(JSWeakCollection) \
V(JSWeakMap) \
+ V(JSWeakSet) \
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
@@ -1663,8 +1685,12 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
Object* value);
- MUST_USE_RESULT MaybeObject* DeleteProperty(Name* name, DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+ static Handle<Object> DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ DeleteMode mode = NORMAL_DELETION);
+ static Handle<Object> DeleteElement(Handle<JSReceiver> object,
+ uint32_t index,
+ DeleteMode mode);
// Set the index'th array element.
// Can cause GC, or return failure if GC is required.
@@ -1886,9 +1912,16 @@ class JSObject: public JSReceiver {
// Handles the special representation of JS global objects.
Object* GetNormalizedProperty(LookupResult* result);
+ // Sets the property value in a normalized object given (key, value).
+ // Handles the special representation of JS global objects.
+ static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value);
+
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* SetNormalizedProperty(LookupResult* result, Object* value);
+ MUST_USE_RESULT MaybeObject* SetNormalizedProperty(LookupResult* result,
+ Object* value);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
@@ -1901,10 +1934,6 @@ class JSObject: public JSReceiver {
Object* value,
PropertyDetails details);
- // Deletes the named property in a normalized object.
- MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(Name* name,
- DeleteMode mode);
-
static void OptimizeAsPrototype(Handle<JSObject> object);
MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
@@ -1934,7 +1963,7 @@ class JSObject: public JSReceiver {
Handle<Object> setter,
PropertyAttributes attributes);
- Object* LookupAccessor(Name* name, AccessorComponent component);
+ MaybeObject* LookupAccessor(Name* name, AccessorComponent component);
MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
@@ -1995,12 +2024,9 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
- static Handle<Object> DeleteProperty(Handle<JSObject> obj,
- Handle<Name> name);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* DeleteProperty(Name* name, DeleteMode mode);
-
- static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
+ static Handle<Object> DeleteElement(Handle<JSObject> obj,
+ uint32_t index,
+ DeleteMode mode = NORMAL_DELETION);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
inline void ValidateElements();
@@ -2203,8 +2229,7 @@ class JSObject: public JSReceiver {
ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSiteInfo(
- ElementsKind to_kind);
+ MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
// Replaces an existing transition with a transition to a map with a FIELD.
MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
@@ -2324,6 +2349,10 @@ class JSObject: public JSReceiver {
// ES5 Object.freeze
MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate);
+
+ // Called the first time an object is observed with ES7 Object.observe.
+ MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
+
// Copy object
MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
@@ -2430,6 +2459,7 @@ class JSObject: public JSReceiver {
private:
friend class DictionaryElementsAccessor;
+ friend class JSReceiver;
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
@@ -2475,9 +2505,19 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool* done);
- MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(Name* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(Name* name);
+ static Handle<Object> DeleteProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
+ static Handle<Object> DeletePropertyPostInterceptor(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
+ static Handle<Object> DeletePropertyWithInterceptor(Handle<JSObject> object,
+ Handle<Name> name);
+
+ // Deletes the named property in a normalized object.
+ static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
@@ -4567,7 +4607,8 @@ class Code: public HeapObject {
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
+ kind == UNARY_OP_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.
@@ -4663,10 +4704,6 @@ class Code: public HeapObject {
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
- // [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in.
- byte compare_nil_state();
- byte compare_nil_value();
-
// [has_function_cache]: For kind STUB tells whether there is a function
// cache is passed to the stub.
inline bool has_function_cache();
@@ -5326,6 +5363,9 @@ class Map: public HeapObject {
inline void set_is_access_check_needed(bool access_check_needed);
inline bool is_access_check_needed();
+ // Returns true if map has a non-empty stub code cache.
+ inline bool has_code_cache();
+
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
@@ -5450,6 +5490,7 @@ class Map: public HeapObject {
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
+ static Handle<Map> CopyDropDescriptors(Handle<Map> map);
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
DescriptorArray* descriptors,
@@ -5471,8 +5512,10 @@ class Map: public HeapObject {
int index,
TransitionFlag flag);
MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
+
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyForObserved();
MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
@@ -5493,6 +5536,13 @@ class Map: public HeapObject {
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
PropertyAttributes filter = NONE);
+ // Returns the number of slots allocated for the initial properties
+ // backing storage for instances of this map.
+ int InitialPropertiesLength() {
+ return pre_allocated_property_fields() + unused_property_fields() -
+ inobject_properties();
+ }
+
// Casting.
static inline Map* cast(Object* obj);
@@ -6129,11 +6179,6 @@ class SharedFunctionInfo: public HeapObject {
inline int ast_node_count();
inline void set_ast_node_count(int count);
- // A counter used to determine when to stress the deoptimizer with a
- // deopt.
- inline int stress_deopt_counter();
- inline void set_stress_deopt_counter(int counter);
-
inline int profiler_ticks();
// Inline cache age is used to infer whether the function survived a context
@@ -6325,10 +6370,9 @@ class SharedFunctionInfo: public HeapObject {
kFunctionTokenPositionOffset + kPointerSize;
static const int kOptCountOffset = kCompilerHintsOffset + kPointerSize;
static const int kCountersOffset = kOptCountOffset + kPointerSize;
- static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
// Total size.
- static const int kSize = kStressDeoptCounterOffset + kPointerSize;
+ static const int kSize = kCountersOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -6362,10 +6406,9 @@ class SharedFunctionInfo: public HeapObject {
static const int kOptCountOffset = kCompilerHintsOffset + kIntSize;
static const int kCountersOffset = kOptCountOffset + kIntSize;
- static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
// Total size.
- static const int kSize = kStressDeoptCounterOffset + kIntSize;
+ static const int kSize = kCountersOffset + kIntSize;
#endif
@@ -6667,8 +6710,10 @@ class JSFunction: public JSObject {
inline bool has_instance_prototype();
inline Object* prototype();
inline Object* instance_prototype();
- MUST_USE_RESULT MaybeObject* SetInstancePrototype(Object* value);
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
+ static void SetPrototype(Handle<JSFunction> function,
+ Handle<Object> value);
+ static void SetInstancePrototype(Handle<JSFunction> function,
+ Handle<Object> value);
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
@@ -6819,12 +6864,8 @@ class GlobalObject: public JSObject {
}
// Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsurePropertyCell(
- Handle<GlobalObject> global,
- Handle<Name> name);
- // TODO(kmillikin): This function can be eliminated once the stub cache is
- // fully handlified (and the static helper can be written directly).
- MUST_USE_RESULT MaybeObject* EnsurePropertyCell(Name* name);
+ static Handle<PropertyCell> EnsurePropertyCell(Handle<GlobalObject> global,
+ Handle<Name> name);
// Casting.
static inline GlobalObject* cast(Object* obj);
@@ -7457,28 +7498,76 @@ enum AllocationSiteMode {
};
-class AllocationSiteInfo: public Struct {
+class AllocationSite: public Struct {
public:
- DECL_ACCESSORS(payload, Object)
+ static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
- static inline AllocationSiteInfo* cast(Object* obj);
+ DECL_ACCESSORS(transition_info, Object)
+ DECL_ACCESSORS(weak_next, Object)
- DECLARE_PRINTER(AllocationSiteInfo)
- DECLARE_VERIFIER(AllocationSiteInfo)
+ void Initialize() {
+ SetElementsKind(GetInitialFastElementsKind());
+ }
- // Returns NULL if no AllocationSiteInfo is available for object.
- static AllocationSiteInfo* FindForJSObject(JSObject* object);
+ ElementsKind GetElementsKind() {
+ ASSERT(!IsLiteralSite());
+ return static_cast<ElementsKind>(Smi::cast(transition_info())->value());
+ }
+
+ void SetElementsKind(ElementsKind kind) {
+ set_transition_info(Smi::FromInt(static_cast<int>(kind)));
+ }
+
+ bool IsLiteralSite() {
+ // If transition_info is a smi, then it represents an ElementsKind
+ // for a constructed array. Otherwise, it must be a boilerplate
+ // for an array literal
+ return transition_info()->IsJSArray();
+ }
+
+ DECLARE_PRINTER(AllocationSite)
+ DECLARE_VERIFIER(AllocationSite)
+
+ static inline AllocationSite* cast(Object* obj);
static inline AllocationSiteMode GetMode(
ElementsKind boilerplate_elements_kind);
static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
- static const int kPayloadOffset = HeapObject::kHeaderSize;
- static const int kSize = kPayloadOffset + kPointerSize;
- static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
+ static const int kWeakNextOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kSize = kWeakNextOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
+ kTransitionInfoOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
+};
+
+
+class AllocationMemento: public Struct {
+ public:
+ static const int kAllocationSiteOffset = HeapObject::kHeaderSize;
+ static const int kSize = kAllocationSiteOffset + kPointerSize;
+
+ DECL_ACCESSORS(allocation_site, Object)
+
+ bool IsValid() { return allocation_site()->IsAllocationSite(); }
+ AllocationSite* GetAllocationSite() {
+ ASSERT(IsValid());
+ return AllocationSite::cast(allocation_site());
+ }
+
+ DECLARE_PRINTER(AllocationMemento)
+ DECLARE_VERIFIER(AllocationMemento)
+
+ // Returns NULL if no AllocationMemento is available for object.
+ static AllocationMemento* FindForJSObject(JSObject* object);
+ static inline AllocationMemento* cast(Object* obj);
- bool GetElementsKindPayload(ElementsKind* kind);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSiteInfo);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
};
@@ -8586,6 +8675,14 @@ class PropertyCell: public Cell {
// property.
DECL_ACCESSORS(dependent_code, DependentCode)
+ // Sets the value of the cell and updates the type field to be the union
+ // of the cell's current type and the value's type. If the change causes
+ // a change of the type of the cell's contents, code dependent on the cell
+ // will be deoptimized.
+ MUST_USE_RESULT MaybeObject* SetValueInferType(
+ Object* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
// Casting.
static inline PropertyCell* cast(Object* obj);
@@ -8613,6 +8710,9 @@ class PropertyCell: public Cell {
void AddDependentCode(Handle<Code> code);
+ static Type* UpdateType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
private:
DECL_ACCESSORS(type_raw, Object)
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
@@ -8665,13 +8765,6 @@ class JSProxy: public JSReceiver {
StrictModeFlag strict_mode,
bool* done);
- MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
- Name* name,
- DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode);
-
MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
JSReceiver* receiver,
Name* name);
@@ -8715,6 +8808,15 @@ class JSProxy: public JSReceiver {
kSize> BodyDescriptor;
private:
+ friend class JSReceiver;
+
+ static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> object,
+ Handle<Name> name,
+ DeleteMode mode);
+ static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> object,
+ uint32_t index,
+ DeleteMode mode);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -8794,8 +8896,8 @@ class JSMap: public JSObject {
};
-// The JSWeakMap describes EcmaScript Harmony weak maps
-class JSWeakMap: public JSObject {
+// Base class for both JSWeakMap and JSWeakSet
+class JSWeakCollection: public JSObject {
public:
// [table]: the backing hash table mapping keys to values.
DECL_ACCESSORS(table, Object)
@@ -8803,6 +8905,18 @@ class JSWeakMap: public JSObject {
// [next]: linked list of encountered weak maps during GC.
DECL_ACCESSORS(next, Object)
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kNextOffset = kTableOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
+};
+
+
+// The JSWeakMap describes EcmaScript Harmony weak maps
+class JSWeakMap: public JSWeakCollection {
+ public:
// Casting.
static inline JSWeakMap* cast(Object* obj);
@@ -8810,15 +8924,26 @@ class JSWeakMap: public JSObject {
DECLARE_PRINTER(JSWeakMap)
DECLARE_VERIFIER(JSWeakMap)
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kTableOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
};
+// The JSWeakSet describes EcmaScript Harmony weak sets
+class JSWeakSet: public JSWeakCollection {
+ public:
+ // Casting.
+ static inline JSWeakSet* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSWeakSet)
+ DECLARE_VERIFIER(JSWeakSet)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
+};
+
+
class JSArrayBuffer: public JSObject {
public:
// [backing_store]: backing memory for this array
@@ -8921,6 +9046,9 @@ class JSTypedArray: public JSArrayBufferView {
static const int kLengthOffset = kViewSize + kPointerSize;
static const int kSize = kLengthOffset + kPointerSize;
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
};
@@ -8940,6 +9068,9 @@ class JSDataView: public JSArrayBufferView {
static const int kSize = kViewSize;
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
};
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index b320299748..df568ef1bb 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -371,6 +371,7 @@ const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
return result;
}
+
Scanner::Location ScriptDataImpl::MessageLocation() {
int beg_pos = Read(PreparseDataConstants::kMessageStartPos);
int end_pos = Read(PreparseDataConstants::kMessageEndPos);
@@ -562,6 +563,7 @@ Parser::Parser(CompilationInfo* info)
set_allow_lazy(false); // Must be explicitly enabled.
set_allow_generators(FLAG_harmony_generators);
set_allow_for_of(FLAG_harmony_iteration);
+ set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
}
@@ -1562,24 +1564,18 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope,
- name,
- mode,
- true,
- kind,
- kNeedsInitialization);
+ var = new(zone()) Variable(
+ declaration_scope, name, mode, true, kind,
+ kNeedsInitialization, proxy->interface());
} else if (declaration_scope->is_eval_scope() &&
declaration_scope->is_classic_mode()) {
// For variable declarations in a non-strict eval scope the proxy is bound
// to a lookup variable to force a dynamic declaration using the
// DeclareContextSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope,
- name,
- mode,
- true,
- kind,
- declaration->initialization());
+ var = new(zone()) Variable(
+ declaration_scope, name, mode, true, kind,
+ declaration->initialization(), proxy->interface());
var->AllocateTo(Variable::LOOKUP, -1);
resolve = true;
}
@@ -3578,7 +3574,8 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
ASSERT(scanner().is_literal_ascii());
double value = StringToDouble(isolate()->unicode_cache(),
scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
+ ALLOW_HEX | ALLOW_OCTAL |
+ ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
result = factory()->NewNumberLiteral(value);
break;
}
@@ -3769,6 +3766,7 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
return isolate()->factory()->uninitialized_value();
}
+
// Validation per 11.1.5 Object Initialiser
class ObjectLiteralPropertyChecker {
public:
@@ -4030,7 +4028,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
ASSERT(scanner().is_literal_ascii());
double value = StringToDouble(isolate()->unicode_cache(),
scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTALS);
+ ALLOW_HEX | ALLOW_OCTAL |
+ ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
key = factory()->NewNumberLiteral(value);
break;
}
@@ -4585,6 +4584,8 @@ preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
reusable_preparser_->set_allow_lazy(true);
reusable_preparser_->set_allow_generators(allow_generators());
reusable_preparser_->set_allow_for_of(allow_for_of());
+ reusable_preparser_->set_allow_harmony_numeric_literals(
+ allow_harmony_numeric_literals());
}
preparser::PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
@@ -4962,6 +4963,7 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
}
+
// ----------------------------------------------------------------------------
// Regular expressions
@@ -5032,6 +5034,7 @@ bool RegExpParser::simple() {
return simple_;
}
+
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
*error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
@@ -5852,6 +5855,7 @@ ScriptDataImpl* PreParserApi::PreParse(Utf16CharacterStream* source) {
preparser.set_allow_generators(FLAG_harmony_generators);
preparser.set_allow_for_of(FLAG_harmony_iteration);
preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
+ preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
scanner.Initialize(source);
preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
if (result == preparser::PreParser::kPreParseStackOverflow) {
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index c3a7edfd9c..68a74b78a9 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -438,6 +438,9 @@ class Parser BASE_EMBEDDED {
bool allow_harmony_scoping() { return scanner().HarmonyScoping(); }
bool allow_generators() const { return allow_generators_; }
bool allow_for_of() const { return allow_for_of_; }
+ bool allow_harmony_numeric_literals() {
+ return scanner().HarmonyNumericLiterals();
+ }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
@@ -447,6 +450,9 @@ class Parser BASE_EMBEDDED {
}
void set_allow_generators(bool allow) { allow_generators_ = allow; }
void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
+ void set_allow_harmony_numeric_literals(bool allow) {
+ scanner().SetHarmonyNumericLiterals(allow);
+ }
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index bda9f923fd..51321c7b33 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -67,6 +67,7 @@ void OS::PostSetUp() {
POSIXPostSetUp();
}
+
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // Nothing special about Cygwin.
}
@@ -573,57 +574,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class CygwinMutex : public Mutex {
- public:
- CygwinMutex() {
- pthread_mutexattr_t attrs;
- memset(&attrs, 0, sizeof(attrs));
-
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- }
-
- virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new CygwinMutex();
-}
-
-
class CygwinSemaphore : public Semaphore {
public:
explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index e2c2c42de5..c771cd3be0 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -196,27 +196,7 @@ void OS::DebugBreak() {
void OS::DumpBacktrace() {
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
- fprintf(stderr, "%s\n", mangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
+ POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
}
@@ -318,30 +298,7 @@ void OS::SignalCodeMovingGC() {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
+ return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
}
@@ -568,56 +525,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class FreeBSDMutex : public Mutex {
- public:
- FreeBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new FreeBSDMutex();
-}
-
-
class FreeBSDSemaphore : public Semaphore {
public:
explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 2c6a36c37e..613d2434b9 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -146,6 +146,9 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
case VFP3:
search_string = "vfpv3";
break;
+ case NEON:
+ search_string = "neon";
+ break;
case ARMv7:
search_string = "ARMv7";
break;
@@ -200,6 +203,36 @@ CpuImplementer OS::GetCpuImplementer() {
}
+CpuPart OS::GetCpuPart(CpuImplementer implementer) {
+ static bool use_cached_value = false;
+ static CpuPart cached_value = CPU_UNKNOWN;
+ if (use_cached_value) {
+ return cached_value;
+ }
+ if (implementer == ARM_IMPLEMENTER) {
+ if (CPUInfoContainsString("CPU part\t: 0xc0f")) {
+ cached_value = CORTEX_A15;
+ } else if (CPUInfoContainsString("CPU part\t: 0xc0c")) {
+ cached_value = CORTEX_A12;
+ } else if (CPUInfoContainsString("CPU part\t: 0xc09")) {
+ cached_value = CORTEX_A9;
+ } else if (CPUInfoContainsString("CPU part\t: 0xc08")) {
+ cached_value = CORTEX_A8;
+ } else if (CPUInfoContainsString("CPU part\t: 0xc07")) {
+ cached_value = CORTEX_A7;
+ } else if (CPUInfoContainsString("CPU part\t: 0xc05")) {
+ cached_value = CORTEX_A5;
+ } else {
+ cached_value = CPU_UNKNOWN;
+ }
+ } else {
+ cached_value = CPU_UNKNOWN;
+ }
+ use_cached_value = true;
+ return cached_value;
+}
+
+
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
@@ -418,32 +451,9 @@ void OS::DebugBreak() {
void OS::DumpBacktrace() {
+ // backtrace is a glibc extension.
#if defined(__GLIBC__) && !defined(__UCLIBC__)
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
- int status;
- size_t length;
- char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- fprintf(stderr, "%s\n", demangled ? demangled : mangled);
- free(demangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
+ POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
#endif
}
@@ -584,7 +594,13 @@ void OS::SignalCodeMovingGC() {
}
void* addr = mmap(OS::GetRandomMmapAddr(),
size,
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ PROT_READ,
+#else
PROT_READ | PROT_EXEC,
+#endif
MAP_PRIVATE,
fileno(f),
0);
@@ -597,33 +613,10 @@ void OS::SignalCodeMovingGC() {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
#if defined(__GLIBC__) && !defined(__UCLIBC__)
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-#else // defined(__GLIBC__) && !defined(__UCLIBC__)
+ return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
+#else
return 0;
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
+#endif
}
@@ -730,7 +723,13 @@ void* VirtualMemory::ReserveRegion(size_t size) {
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ int prot = PROT_READ | PROT_WRITE;
+#else
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
if (MAP_FAILED == mmap(base,
size,
prot,
@@ -860,56 +859,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class LinuxMutex : public Mutex {
- public:
- LinuxMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new LinuxMutex();
-}
-
-
class LinuxSemaphore : public Semaphore {
public:
explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 21e9c7f516..097691be07 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -53,6 +53,7 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
+#include <cxxabi.h>
#undef MAP_TYPE
@@ -189,7 +190,10 @@ void OS::DebugBreak() {
void OS::DumpBacktrace() {
- // Currently unsupported.
+ // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
+ if (backtrace == NULL) return;
+
+ POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
}
@@ -315,34 +319,9 @@ double OS::LocalTimeOffset() {
int OS::StackWalk(Vector<StackFrame> frames) {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL)
- return 0;
-
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text,
- kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
+ if (backtrace == NULL) return 0;
- return frames_count;
+ return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
}
@@ -596,6 +575,7 @@ static void InitializeTlsBaseOffset() {
Release_Store(&tls_base_offset_initialized, 1);
}
+
static void CheckFastTls(Thread::LocalStorageKey key) {
void* expected = reinterpret_cast<void*>(0x1234CAFE);
Thread::SetThreadLocal(key, expected);
@@ -651,45 +631,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class MacOSMutex : public Mutex {
- public:
- MacOSMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new MacOSMutex();
-}
-
-
class MacOSSemaphore : public Semaphore {
public:
explicit MacOSSemaphore(int count) {
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
index 1b481f4b3a..dd5a3ddb32 100644
--- a/deps/v8/src/platform-nullos.cc
+++ b/deps/v8/src/platform-nullos.cc
@@ -220,6 +220,11 @@ CpuImplementer OS::GetCpuImplementer() {
}
+CpuPart OS::GetCpuPart(CpuImplementer implementer) {
+ UNIMPLEMENTED();
+}
+
+
bool OS::ArmCpuHasFeature(CpuFeature feature) {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index b722e31e0c..a40df48d81 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -607,56 +607,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class OpenBSDMutex : public Mutex {
- public:
- OpenBSDMutex() {
- pthread_mutexattr_t attrs;
- int result = pthread_mutexattr_init(&attrs);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attrs);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() {
- int result = pthread_mutex_lock(&mutex_);
- return result;
- }
-
- virtual int Unlock() {
- int result = pthread_mutex_unlock(&mutex_);
- return result;
- }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new OpenBSDMutex();
-}
-
-
class OpenBSDSemaphore : public Semaphore {
public:
explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
@@ -720,6 +670,7 @@ bool OpenBSDSemaphore::Wait(int timeout) {
}
}
+
Semaphore* OS::CreateSemaphore(int count) {
return new OpenBSDSemaphore(count);
}
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index fecee9c7f5..9d3d7695f0 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -31,6 +31,8 @@
#include "platform-posix.h"
+#include <pthread.h>
+#include <sched.h> // for sched_yield
#include <unistd.h>
#include <errno.h>
#include <time.h>
@@ -82,7 +84,13 @@ intptr_t OS::CommitPageSize() {
#ifndef __CYGWIN__
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter, so
+ // code pages don't need PROT_EXEC.
+ mprotect(address, size, PROT_READ);
+#else
mprotect(address, size, PROT_READ | PROT_EXEC);
+#endif
}
@@ -115,26 +123,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
-# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -341,6 +334,7 @@ static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
+
// Initialize to library version so we can call this at any time during startup.
static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
@@ -355,7 +349,26 @@ void OS::MemMove(void* dest, const void* src, size_t size) {
(*memmove_function)(dest, src, size);
}
-#endif // V8_TARGET_ARCH_IA32
+#elif defined(V8_HOST_ARCH_ARM)
+void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest,
+ const uint8_t* src,
+ size_t chars) {
+ uint16_t *limit = dest + chars;
+ while (dest < limit) {
+ *dest++ = static_cast<uint16_t>(*src++);
+ }
+}
+
+
+OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
+OS::MemCopyUint16Uint8Function OS::memcopy_uint16_uint8_function =
+ &OS::MemCopyUint16Uint8Wrapper;
+// Defined in codegen-arm.cc.
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ OS::MemCopyUint8Function stub);
+OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
+ OS::MemCopyUint16Uint8Function stub);
+#endif
void POSIXPostSetUp() {
@@ -364,6 +377,11 @@ void POSIXPostSetUp() {
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
}
+#elif defined(V8_HOST_ARCH_ARM)
+ OS::memcopy_uint8_function =
+ CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
+ OS::memcopy_uint16_uint8_function =
+ CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
#endif
init_fast_sin_function();
init_fast_cos_function();
@@ -373,6 +391,7 @@ void POSIXPostSetUp() {
init_fast_sqrt_function();
}
+
// ----------------------------------------------------------------------------
// POSIX string support.
//
@@ -388,6 +407,57 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
// ----------------------------------------------------------------------------
+// POSIX thread support.
+//
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class POSIXMutex : public Mutex {
+ public:
+ POSIXMutex() {
+ pthread_mutexattr_t attr;
+ memset(&attr, 0, sizeof(attr));
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT(result == 0);
+ result = pthread_mutex_init(&mutex_, &attr);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT(result == 0);
+ USE(result);
+ }
+
+ virtual ~POSIXMutex() { pthread_mutex_destroy(&mutex_); }
+
+ virtual int Lock() { return pthread_mutex_lock(&mutex_); }
+
+ virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
+
+ virtual bool TryLock() {
+ int result = pthread_mutex_trylock(&mutex_);
+ // Return false if the lock is busy and locking failed.
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT(result == 0); // Verify no other errors.
+ return true;
+ }
+
+ private:
+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new POSIXMutex();
+}
+
+
+// ----------------------------------------------------------------------------
// POSIX socket support.
//
diff --git a/deps/v8/src/platform-posix.h b/deps/v8/src/platform-posix.h
index 7a982ed2ef..bcc2b7e74e 100644
--- a/deps/v8/src/platform-posix.h
+++ b/deps/v8/src/platform-posix.h
@@ -28,12 +28,82 @@
#ifndef V8_PLATFORM_POSIX_H_
#define V8_PLATFORM_POSIX_H_
+#if !defined(ANDROID)
+#include <cxxabi.h>
+#endif
+#include <stdio.h>
+
+#include "platform.h"
+
namespace v8 {
namespace internal {
// Used by platform implementation files during OS::PostSetUp().
void POSIXPostSetUp();
+// Used by platform implementation files during OS::DumpBacktrace()
+// and OS::StackWalk().
+template<int (*backtrace)(void**, int),
+ char** (*backtrace_symbols)(void* const*, int)>
+struct POSIXBacktraceHelper {
+ static void DumpBacktrace() {
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ fprintf(stderr, "\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ fprintf(stderr, "(empty)\n");
+ } else if (symbols == NULL) {
+ fprintf(stderr, "(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ fprintf(stderr, "%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {// NOLINT
+ char* demangled = NULL;
+#if !defined(ANDROID)
+ int status;
+ size_t length;
+ demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+#endif
+ fprintf(stderr, "%s\n", demangled != NULL ? demangled : mangled);
+ free(demangled);
+ } else {
+ fprintf(stderr, "??\n");
+ }
+ }
+ }
+ fflush(stderr);
+ free(symbols);
+ }
+
+ static int StackWalk(Vector<OS::StackFrame> frames) {
+ int frames_size = frames.length();
+ ScopedVector<void*> addresses(frames_size);
+
+ int frames_count = backtrace(addresses.start(), frames_size);
+
+ char** symbols = backtrace_symbols(addresses.start(), frames_count);
+ if (symbols == NULL) {
+ return OS::kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
+ "%s", symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ free(symbols);
+
+ return frames_count;
+ }
+};
+
} } // namespace v8::internal
#endif // V8_PLATFORM_POSIX_H_
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 4b0094fb22..3c4df665f0 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -38,7 +38,6 @@
#include <ucontext.h> // walkstack(), getcontext()
#include <dlfcn.h> // dladdr
#include <pthread.h>
-#include <sched.h> // for sched_yield
#include <semaphore.h>
#include <time.h>
#include <sys/time.h> // gettimeofday(), timeradd()
@@ -539,46 +538,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-void Thread::YieldCPU() {
- sched_yield();
-}
-
-
-class SolarisMutex : public Mutex {
- public:
- SolarisMutex() {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mutex_, &attr);
- }
-
- ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
-
- int Lock() { return pthread_mutex_lock(&mutex_); }
-
- int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_;
-};
-
-
-Mutex* OS::CreateMutex() {
- return new SolarisMutex();
-}
-
-
class SolarisSemaphore : public Semaphore {
public:
explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 191376099c..80bcaf92af 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -152,6 +152,7 @@ static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
+
// Initialize to library version so we can call this at any time during startup.
static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
@@ -178,6 +179,7 @@ void init_modulo_function() {
modulo_function = CreateModuloFunction();
}
+
double modulo(double x, double y) {
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
@@ -321,6 +323,7 @@ class Time {
TimeStamp time_;
};
+
// Static variables.
bool Time::tz_initialized_ = false;
TIME_ZONE_INFORMATION Time::tzinfo_;
@@ -616,6 +619,7 @@ double OS::TimeCurrentMillis() {
return t.ToJSTime();
}
+
// Returns the tickcounter based on timeGetTime.
int64_t OS::Ticks() {
return timeGetTime() * 1000; // Convert to microseconds.
@@ -1449,6 +1453,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
return frames_count;
}
+
// Restore warnings to previous settings.
#pragma warning(pop)
@@ -1479,6 +1484,10 @@ double OS::nan_value() {
int OS::ActivationFrameAlignment() {
#ifdef _WIN64
return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#elif defined(__MINGW32__)
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ return 16;
#else
return 8; // Floating-point math runs faster with 8-byte alignment.
#endif
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 24d21cb3ae..211be39d00 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -315,6 +315,9 @@ class OS {
// Support runtime detection of Cpu implementer
static CpuImplementer GetCpuImplementer();
+ // Support runtime detection of Cpu implementer
+ static CpuPart GetCpuPart(CpuImplementer implementer);
+
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
@@ -343,7 +346,42 @@ class OS {
static void MemCopy(void* dest, const void* src, size_t size) {
MemMove(dest, src, size);
}
-#else // V8_TARGET_ARCH_IA32
+#elif defined(V8_HOST_ARCH_ARM)
+ typedef void (*MemCopyUint8Function)(uint8_t* dest,
+ const uint8_t* src,
+ size_t size);
+ static MemCopyUint8Function memcopy_uint8_function;
+ static void MemCopyUint8Wrapper(uint8_t* dest,
+ const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+ }
+ // For values < 16, the assembler function is slower than the inlined C code.
+ static const int kMinComplexMemCopy = 16;
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ size);
+ }
+ static void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+ }
+
+ typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
+ const uint8_t* src,
+ size_t size);
+ static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
+ static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
+ const uint8_t* src,
+ size_t chars);
+ // For values < 12, the assembler function is slower than the inlined C code.
+ static const int kMinComplexConvertMemCopy = 12;
+ static void MemCopyUint16Uint8(uint16_t* dest,
+ const uint8_t* src,
+ size_t size) {
+ (*memcopy_uint16_uint8_function)(dest, src, size);
+ }
+#else
// Copy memory area to disjoint memory area.
static void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index 287ad6698a..8e08848285 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -86,6 +86,7 @@ void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
}
}
+
// ----------------------------------------------------------------------------
// PartialParserRecorder - Record both function entries and symbols.
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 3268e3c508..36a94a3315 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -1685,6 +1685,7 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
return result;
}
+
bool PreParser::peek_any_identifier() {
i::Token::Value next = peek();
return next == i::Token::IDENTIFIER ||
@@ -1698,6 +1699,7 @@ int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
return AddSymbol(i::Vector<const byte>::cast(key), true, value);
}
+
int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
return AddSymbol(i::Vector<const byte>::cast(key), false, value);
}
@@ -1722,7 +1724,8 @@ int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) {
return AddAsciiSymbol(key, value);
}
- int flags = i::ALLOW_HEX | i::ALLOW_OCTALS;
+ int flags = i::ALLOW_HEX | i::ALLOW_OCTAL | i::ALLOW_IMPLICIT_OCTAL |
+ i::ALLOW_BINARY;
double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
int length;
const char* string;
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 41907d12eb..faddecc562 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -141,6 +141,9 @@ class PreParser {
bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); }
bool allow_generators() const { return allow_generators_; }
bool allow_for_of() const { return allow_for_of_; }
+ bool allow_harmony_numeric_literals() const {
+ return scanner_->HarmonyNumericLiterals();
+ }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
@@ -150,6 +153,9 @@ class PreParser {
}
void set_allow_generators(bool allow) { allow_generators_ = allow; }
void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
+ void set_allow_harmony_numeric_literals(bool allow) {
+ scanner_->SetHarmonyNumericLiterals(allow);
+ }
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 20c1aec731..d92085ac32 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -45,7 +45,6 @@ const char* StringsStorage::GetFunctionName(const char* name) {
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
- int security_token_id,
const char* name_prefix,
const char* resource_name,
int line_number)
@@ -57,7 +56,6 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
line_number_(line_number),
shared_id_(0),
script_id_(v8::Script::kNoScriptId),
- security_token_id_(security_token_id),
no_frame_ranges_(NULL) {
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 78b05c57e4..cc86724437 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -41,60 +41,6 @@ namespace v8 {
namespace internal {
-TokenEnumerator::TokenEnumerator()
- : token_locations_(4),
- token_removed_(4) {
-}
-
-
-TokenEnumerator::~TokenEnumerator() {
- Isolate* isolate = Isolate::Current();
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (!token_removed_[i]) {
- isolate->global_handles()->ClearWeakness(token_locations_[i]);
- isolate->global_handles()->Destroy(token_locations_[i]);
- }
- }
-}
-
-
-int TokenEnumerator::GetTokenId(Object* token) {
- Isolate* isolate = Isolate::Current();
- if (token == NULL) return TokenEnumerator::kNoSecurityToken;
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (*token_locations_[i] == token && !token_removed_[i]) return i;
- }
- Handle<Object> handle = isolate->global_handles()->Create(token);
- // handle.location() points to a memory cell holding a pointer
- // to a token object in the V8's heap.
- isolate->global_handles()->MakeWeak(handle.location(),
- this,
- TokenRemovedCallback);
- token_locations_.Add(handle.location());
- token_removed_.Add(false);
- return token_locations_.length() - 1;
-}
-
-
-void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void* parameter) {
- reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
- Utils::OpenPersistent(handle).location());
- handle->Dispose(isolate);
-}
-
-
-void TokenEnumerator::TokenRemoved(Object** token_location) {
- for (int i = 0; i < token_locations_.length(); ++i) {
- if (token_locations_[i] == token_location && !token_removed_[i]) {
- token_removed_[i] = true;
- return;
- }
- }
-}
-
-
StringsStorage::StringsStorage()
: names_(StringsMatch) {
}
@@ -274,12 +220,11 @@ double ProfileNode::GetTotalMillis() const {
void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s [%d] #%d %d",
+ OS::Print("%5u %5u %*c %s%s #%d %d",
total_ticks_, self_ticks_,
indent, ' ',
entry_->name_prefix(),
entry_->name(),
- entry_->security_token_id(),
entry_->script_id(),
id());
if (entry_->resource_name()[0] != '\0')
@@ -353,58 +298,6 @@ struct NodesPair {
};
-class FilteredCloneCallback {
- public:
- FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
- : stack_(10),
- security_token_id_(security_token_id) {
- stack_.Add(NodesPair(NULL, dst_root));
- }
-
- void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
- if (IsTokenAcceptable(child->entry()->security_token_id(),
- parent->entry()->security_token_id())) {
- ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
- clone->IncreaseSelfTicks(child->self_ticks());
- stack_.Add(NodesPair(child, clone));
- } else {
- // Attribute ticks to parent node.
- stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
- }
- }
-
- void AfterAllChildrenTraversed(ProfileNode* parent) { }
-
- void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
- if (stack_.last().src == child) {
- stack_.RemoveLast();
- }
- }
-
- private:
- bool IsTokenAcceptable(int token, int parent_token) {
- if (token == TokenEnumerator::kNoSecurityToken
- || token == security_token_id_) return true;
- if (token == TokenEnumerator::kInheritsSecurityToken) {
- ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken);
- return parent_token == TokenEnumerator::kNoSecurityToken
- || parent_token == security_token_id_;
- }
- return false;
- }
-
- List<NodesPair> stack_;
- int security_token_id_;
-};
-
-void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
- ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
- FilteredCloneCallback cb(root_, security_token_id);
- src->TraverseDepthFirst(&cb);
- CalculateTotalTicks();
-}
-
-
void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
}
@@ -495,14 +388,6 @@ void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
}
-CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
- ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken);
- CpuProfile* clone = new CpuProfile(title_, uid_, false);
- clone->top_down_.FilteredClone(&top_down_, security_token_id);
- return clone;
-}
-
-
void CpuProfile::ShortPrint() {
OS::Print("top down ");
top_down_.ShortPrint();
@@ -601,10 +486,7 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection()
- : profiles_uids_(UidsMatch),
- current_profiles_semaphore_(OS::CreateSemaphore(1)) {
- // Create list of unabridged profiles.
- profiles_by_token_.Add(new List<CpuProfile*>());
+ : current_profiles_semaphore_(OS::CreateSemaphore(1)) {
}
@@ -612,22 +494,16 @@ static void DeleteCodeEntry(CodeEntry** entry_ptr) {
delete *entry_ptr;
}
+
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
delete *profile_ptr;
}
-static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
- if (*list_ptr != NULL) {
- (*list_ptr)->Iterate(DeleteCpuProfile);
- delete *list_ptr;
- }
-}
CpuProfilesCollection::~CpuProfilesCollection() {
delete current_profiles_semaphore_;
+ finished_profiles_.Iterate(DeleteCpuProfile);
current_profiles_.Iterate(DeleteCpuProfile);
- detached_profiles_.Iterate(DeleteCpuProfile);
- profiles_by_token_.Iterate(DeleteProfilesList);
code_entries_.Iterate(DeleteCodeEntry);
}
@@ -653,8 +529,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
}
-CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
- const char* title,
+CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
double actual_sampling_rate) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
@@ -667,48 +542,11 @@ CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
}
current_profiles_semaphore_->Signal();
- if (profile != NULL) {
- profile->CalculateTotalTicks();
- profile->SetActualSamplingRate(actual_sampling_rate);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- unabridged_list->Add(profile);
- HashMap::Entry* entry =
- profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
- static_cast<uint32_t>(profile->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
- return GetProfile(security_token_id, profile->uid());
- }
- return NULL;
-}
-
-
-CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
- unsigned uid) {
- int index = GetProfileIndex(uid);
- if (index < 0) return NULL;
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list->at(index);
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- if (list->at(index) == NULL) {
- (*list)[index] =
- unabridged_list->at(index)->FilteredClone(security_token_id);
- }
- return list->at(index);
-}
-
-
-int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
- HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ?
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
+ if (profile == NULL) return NULL;
+ profile->CalculateTotalTicks();
+ profile->SetActualSamplingRate(actual_sampling_rate);
+ finished_profiles_.Add(profile);
+ return profile;
}
@@ -724,74 +562,13 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
unsigned uid = profile->uid();
- int index = GetProfileIndex(uid);
- if (index < 0) {
- detached_profiles_.RemoveElement(profile);
- return;
- }
- profiles_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
- // Decrement all indexes above the deleted one.
- for (HashMap::Entry* p = profiles_uids_.Start();
- p != NULL;
- p = profiles_uids_.Next(p)) {
- intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
- if (p_index > index) {
- p->value = reinterpret_cast<void*>(p_index - 1);
- }
- }
- for (int i = 0; i < profiles_by_token_.length(); ++i) {
- List<CpuProfile*>* list = profiles_by_token_[i];
- if (list != NULL && index < list->length()) {
- // Move all filtered clones into detached_profiles_,
- // so we can know that they are still in use.
- CpuProfile* cloned_profile = list->Remove(index);
- if (cloned_profile != NULL && cloned_profile != profile) {
- detached_profiles_.Add(cloned_profile);
- }
- }
- }
-}
-
-
-int CpuProfilesCollection::TokenToIndex(int security_token_id) {
- ASSERT(TokenEnumerator::kNoSecurityToken == -1);
- return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
- int security_token_id) {
- const int index = TokenToIndex(security_token_id);
- const int lists_to_add = index - profiles_by_token_.length() + 1;
- if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- const int current_count = unabridged_list->length();
- if (profiles_by_token_[index] == NULL) {
- profiles_by_token_[index] = new List<CpuProfile*>(current_count);
- }
- List<CpuProfile*>* list = profiles_by_token_[index];
- const int profiles_to_add = current_count - list->length();
- if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
- return list;
-}
-
-
-List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
- List<CpuProfile*>* unabridged_list =
- profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
- if (security_token_id == TokenEnumerator::kNoSecurityToken) {
- return unabridged_list;
- }
- List<CpuProfile*>* list = GetProfilesList(security_token_id);
- const int current_count = unabridged_list->length();
- for (int i = 0; i < current_count; ++i) {
- if (list->at(i) == NULL) {
- (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
+ for (int i = 0; i < finished_profiles_.length(); i++) {
+ if (uid == finished_profiles_[i]->uid()) {
+ finished_profiles_.Remove(i);
+ return;
}
}
- return list;
+ UNREACHABLE();
}
@@ -811,13 +588,11 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
CodeEntry* CpuProfilesCollection::NewCodeEntry(
Logger::LogEventsAndTags tag,
const char* name,
- int security_token_id,
const char* name_prefix,
const char* resource_name,
int line_number) {
CodeEntry* code_entry = new CodeEntry(tag,
name,
- security_token_id,
name_prefix,
resource_name,
line_number);
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 411cbdbab2..6b02368816 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -37,30 +37,6 @@ namespace internal {
struct OffsetRange;
-class TokenEnumerator {
- public:
- TokenEnumerator();
- ~TokenEnumerator();
- int GetTokenId(Object* token);
-
- static const int kNoSecurityToken = -1;
- static const int kInheritsSecurityToken = -2;
-
- private:
- static void TokenRemovedCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void* parameter);
- void TokenRemoved(Object** token_location);
-
- List<Object**> token_locations_;
- List<bool> token_removed_;
-
- friend class TokenEnumeratorTester;
-
- DISALLOW_COPY_AND_ASSIGN(TokenEnumerator);
-};
-
-
// Provides a storage of strings allocated in C++ heap, to hold them
// forever, even if they disappear from JS heap or external storage.
class StringsStorage {
@@ -98,7 +74,6 @@ class CodeEntry {
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
- int security_token_id = TokenEnumerator::kNoSecurityToken,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo));
@@ -113,7 +88,6 @@ class CodeEntry {
INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
INLINE(int script_id() const) { return script_id_; }
INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
- INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
@@ -141,7 +115,6 @@ class CodeEntry {
int line_number_;
int shared_id_;
int script_id_;
- int security_token_id_;
List<OffsetRange>* no_frame_ranges_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
@@ -201,7 +174,6 @@ class ProfileTree {
ProfileNode* AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
- void FilteredClone(ProfileTree* src, int security_token_id);
double TicksToMillis(unsigned ticks) const {
return ticks * ms_to_ticks_scale_;
@@ -238,7 +210,6 @@ class CpuProfile {
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
void SetActualSamplingRate(double actual_sampling_rate);
- CpuProfile* FilteredClone(int security_token_id);
INLINE(const char* title() const) { return title_; }
INLINE(unsigned uid() const) { return uid_; }
@@ -315,10 +286,8 @@ class CpuProfilesCollection {
~CpuProfilesCollection();
bool StartProfiling(const char* title, unsigned uid, bool record_samples);
- CpuProfile* StopProfiling(int security_token_id,
- const char* title,
- double actual_sampling_rate);
- List<CpuProfile*>* Profiles(int security_token_id);
+ CpuProfile* StopProfiling(const char* title, double actual_sampling_rate);
+ List<CpuProfile*>* profiles() { return &finished_profiles_; }
const char* GetName(Name* name) {
return function_and_resource_names_.GetName(name);
}
@@ -331,15 +300,12 @@ class CpuProfilesCollection {
const char* GetFunctionName(const char* name) {
return function_and_resource_names_.GetFunctionName(name);
}
- CpuProfile* GetProfile(int security_token_id, unsigned uid);
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
- bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
CodeEntry* NewCodeEntry(
Logger::LogEventsAndTags tag,
const char* name,
- int security_token_id = TokenEnumerator::kNoSecurityToken,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
@@ -351,21 +317,9 @@ class CpuProfilesCollection {
static const int kMaxSimultaneousProfiles = 100;
private:
- int GetProfileIndex(unsigned uid);
- List<CpuProfile*>* GetProfilesList(int security_token_id);
- int TokenToIndex(int security_token_id);
-
- INLINE(static bool UidsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
StringsStorage function_and_resource_names_;
List<CodeEntry*> code_entries_;
- List<List<CpuProfile*>* > profiles_by_token_;
- // Mapping from profiles' uids to indexes in the second nested list
- // of profiles_by_token_.
- HashMap profiles_uids_;
- List<CpuProfile*> detached_profiles_;
+ List<CpuProfile*> finished_profiles_;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index b0d10e1270..ac365634cd 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -55,6 +55,8 @@ namespace v8 {
namespace internal {
class Smi;
+class Type;
+class TypeInfo;
// Type of properties.
// Order of properties is significant.
@@ -101,6 +103,10 @@ class Representation {
static Representation FromKind(Kind kind) { return Representation(kind); }
+ // TODO(rossberg): this should die eventually.
+ static Representation FromType(TypeInfo info);
+ static Representation FromType(Handle<Type> type);
+
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
}
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 528c47d80d..de9be50ddc 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -192,8 +192,12 @@ function DerivedEnumerateTrap() {
var name = names[i]
if (IS_SYMBOL(name)) continue
var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
- if (!IS_UNDEFINED(desc) && desc.enumerable) {
- enumerableNames[count++] = names[i]
+ if (!IS_UNDEFINED(desc)) {
+ if (!desc.configurable) {
+ throw MakeTypeError("proxy_prop_not_configurable",
+ [this, "getPropertyDescriptor", name])
+ }
+ if (desc.enumerable) enumerableNames[count++] = names[i]
}
}
return enumerableNames
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index bd02a69042..ff41432b28 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -247,7 +247,7 @@ void RuntimeProfiler::OptimizeNow() {
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
if (!FLAG_watch_ic_patching) {
// Adjust threshold each time we have processed
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index e3ee6d56c3..c36d453d03 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -467,7 +467,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
constant_properties,
should_have_fast_elements,
has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
@@ -493,7 +493,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
constant_properties,
should_have_fast_elements,
has_function_literal);
- if (boilerplate.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
@@ -501,6 +501,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
}
+static Handle<AllocationSite> GetLiteralAllocationSite(
+ Isolate* isolate,
+ Handle<FixedArray> literals,
+ int literals_index,
+ Handle<FixedArray> elements) {
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<AllocationSite> site;
+ if (*literal_site == isolate->heap()->undefined_value()) {
+ ASSERT(*elements != isolate->heap()->empty_fixed_array());
+ Handle<Object> boilerplate =
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
+ if (boilerplate.is_null()) return site;
+ site = isolate->factory()->NewAllocationSite();
+ site->set_transition_info(*boilerplate);
+ literals->set(literals_index, *site);
+ } else {
+ site = Handle<AllocationSite>::cast(literal_site);
+ }
+
+ return site;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -508,17 +532,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- ASSERT(*elements != isolate->heap()->empty_fixed_array());
- boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return JSObject::cast(*boilerplate)->DeepCopy(isolate);
+ Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
+ literals_index, elements);
+ RETURN_IF_EMPTY_HANDLE(isolate, site);
+
+ JSObject* boilerplate = JSObject::cast(site->transition_info());
+ return boilerplate->DeepCopy(isolate);
}
@@ -529,29 +548,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- ASSERT(*elements != isolate->heap()->empty_fixed_array());
- boilerplate =
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return Failure::Exception();
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- if (JSObject::cast(*boilerplate)->elements()->map() ==
+ Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
+ literals_index, elements);
+ RETURN_IF_EMPTY_HANDLE(isolate, site);
+
+ JSObject* boilerplate = JSObject::cast(site->transition_info());
+ if (boilerplate->elements()->map() ==
isolate->heap()->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
}
- JSObject* boilerplate_object = JSObject::cast(*boilerplate);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(
- boilerplate_object->GetElementsKind());
+ AllocationSiteMode mode = AllocationSite::GetMode(
+ boilerplate->GetElementsKind());
if (mode == TRACK_ALLOCATION_SITE) {
- return isolate->heap()->CopyJSObjectWithAllocationSite(boilerplate_object);
+ return isolate->heap()->CopyJSObjectWithAllocationSite(
+ boilerplate, *site);
}
- return isolate->heap()->CopyJSObject(boilerplate_object);
+ return isolate->heap()->CopyJSObject(boilerplate);
}
@@ -796,6 +810,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3);
CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
ExternalArrayType arrayType;
size_t elementSize;
switch (arrayId) {
@@ -1012,6 +1032,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, byte_length, 3);
+ ASSERT(holder->GetInternalFieldCount() ==
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ holder->SetInternalField(i, Smi::FromInt(0));
+ }
+
holder->set_buffer(*buffer);
ASSERT(byte_offset->IsNumber());
ASSERT(
@@ -1190,6 +1216,59 @@ DATA_VIEW_GETTER(Float64, double, NumberFromDouble)
#undef DATA_VIEW_GETTER
+
+template <typename T>
+static T DataViewConvertValue(double value);
+
+
+template <>
+int8_t DataViewConvertValue<int8_t>(double value) {
+ return static_cast<int8_t>(DoubleToInt32(value));
+}
+
+
+template <>
+int16_t DataViewConvertValue<int16_t>(double value) {
+ return static_cast<int16_t>(DoubleToInt32(value));
+}
+
+
+template <>
+int32_t DataViewConvertValue<int32_t>(double value) {
+ return DoubleToInt32(value);
+}
+
+
+template <>
+uint8_t DataViewConvertValue<uint8_t>(double value) {
+ return static_cast<uint8_t>(DoubleToUint32(value));
+}
+
+
+template <>
+uint16_t DataViewConvertValue<uint16_t>(double value) {
+ return static_cast<uint16_t>(DoubleToUint32(value));
+}
+
+
+template <>
+uint32_t DataViewConvertValue<uint32_t>(double value) {
+ return DoubleToUint32(value);
+}
+
+
+template <>
+float DataViewConvertValue<float>(double value) {
+ return static_cast<float>(value);
+}
+
+
+template <>
+double DataViewConvertValue<double>(double value) {
+ return value;
+}
+
+
#define DATA_VIEW_SETTER(TypeName, Type) \
RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewSet##TypeName) { \
HandleScope scope(isolate); \
@@ -1198,7 +1277,7 @@ DATA_VIEW_GETTER(Float64, double, NumberFromDouble)
CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1); \
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); \
CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
- Type v = static_cast<Type>(value->Number()); \
+ Type v = DataViewConvertValue<Type>(value->Number()); \
if (DataViewSetValue( \
isolate, holder, offset, is_little_endian, v)) { \
return isolate->heap()->undefined_value(); \
@@ -1342,69 +1421,73 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
}
-static JSWeakMap* WeakMapInitialize(Isolate* isolate,
- Handle<JSWeakMap> weakmap) {
- ASSERT(weakmap->map()->inobject_properties() == 0);
+static JSWeakCollection* WeakCollectionInitialize(Isolate* isolate,
+ Handle<JSWeakCollection> weak_collection) {
+ ASSERT(weak_collection->map()->inobject_properties() == 0);
Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
- weakmap->set_table(*table);
- weakmap->set_next(Smi::FromInt(0));
- return *weakmap;
+ weak_collection->set_table(*table);
+ weak_collection->set_next(Smi::FromInt(0));
+ return *weak_collection;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
- return WeakMapInitialize(isolate, weakmap);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
+ return WeakCollectionInitialize(isolate, weak_collection);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(
+ weak_collection->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
- weakmap->set_table(*new_table);
+ weak_collection->set_table(*new_table);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<Object> value(args[2], isolate);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
- weakmap->set_table(*new_table);
+ weak_collection->set_table(*new_table);
return isolate->heap()->undefined_value();
}
@@ -1430,6 +1513,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
isolate->heap()->proto_string(),
v8::ACCESS_GET)) {
isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
obj = obj->GetPrototype(isolate);
@@ -1460,7 +1544,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
Handle<Object> new_value(
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
@@ -1472,7 +1556,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
return *result;
}
Handle<Object> result = JSObject::SetPrototype(obj, prototype, true);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -1534,28 +1618,20 @@ enum AccessCheckResult {
};
-static AccessCheckResult CheckElementAccess(
- JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- // TODO(1095): we should traverse hidden prototype hierachy as well.
- if (CheckGenericAccess(
- obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
- return ACCESS_ALLOWED;
- }
-
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
- return ACCESS_FORBIDDEN;
-}
-
-
static AccessCheckResult CheckPropertyAccess(
JSObject* obj,
Name* name,
v8::AccessType access_type) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
- return CheckElementAccess(obj, index, access_type);
+ // TODO(1095): we should traverse hidden prototype hierachy as well.
+ if (CheckGenericAccess(
+ obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
+ return ACCESS_ALLOWED;
+ }
+
+ obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ return ACCESS_FORBIDDEN;
}
LookupResult lookup(obj->GetIsolate());
@@ -1615,14 +1691,21 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
Heap* heap = isolate->heap();
// Due to some WebKit tests, we want to make sure that we do not log
// more than one access failure here.
- switch (CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS)) {
+ AccessCheckResult access_check_result =
+ CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ switch (access_check_result) {
case ACCESS_FORBIDDEN: return heap->false_value();
case ACCESS_ALLOWED: break;
case ACCESS_ABSENT: return heap->undefined_value();
}
PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
- if (attrs == ABSENT) return heap->undefined_value();
+ if (attrs == ABSENT) {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ return heap->undefined_value();
+ }
+ ASSERT(!isolate->has_scheduled_exception());
AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
Handle<AccessorPair> accessors(raw_accessors, isolate);
@@ -1635,7 +1718,7 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
// GetProperty does access check.
Handle<Object> value = GetProperty(isolate, obj, name);
- if (value.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
elms->set(VALUE_INDEX, *value);
} else {
// Access checks are performed for both accessors separately.
@@ -1643,10 +1726,16 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
+ ASSERT(!isolate->has_scheduled_exception());
elms->set(GETTER_INDEX, getter);
+ } else {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
}
if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
+ ASSERT(!isolate->has_scheduled_exception());
elms->set(SETTER_INDEX, setter);
+ } else {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
}
}
@@ -1700,7 +1789,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
Handle<Object> result =
RegExpImpl::Compile(re, pattern, flags);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2109,7 +2198,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
} else if (lookup.IsNormal()) {
if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
!lookup.IsReadOnly()) {
- global->SetNormalizedProperty(&lookup, *value);
+ HandleScope scope(isolate);
+ JSObject::SetNormalizedProperty(Handle<JSObject>(global), &lookup, value);
}
} else {
// Ignore re-initialization of constants that have already been
@@ -2198,7 +2288,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
}
} else if (lookup.IsNormal()) {
if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
- object->SetNormalizedProperty(&lookup, *value);
+ JSObject::SetNormalizedProperty(object, &lookup, value);
}
} else {
// We should not reach here. Any real, named property should be
@@ -2250,7 +2340,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
subject,
index,
last_match_info);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2770,7 +2860,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- JSFunction* function = JSFunction::cast(frame->function());
+ JSFunction* function = frame->function();
RUNTIME_ASSERT(function->shared()->is_generator());
JSGeneratorObject* generator;
@@ -2799,8 +2889,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
JavaScriptFrameIterator stack_iterator(isolate);
JavaScriptFrame* frame = stack_iterator.frame();
- RUNTIME_ASSERT(JSFunction::cast(frame->function())->shared()->is_generator());
- ASSERT_EQ(JSFunction::cast(frame->function()), generator_object->function());
+ RUNTIME_ASSERT(frame->function()->shared()->is_generator());
+ ASSERT_EQ(frame->function(), generator_object->function());
// The caller should have saved the context and continuation already.
ASSERT_EQ(generator_object->context(), Context::cast(frame->context()));
@@ -4068,6 +4158,7 @@ static int StringMatchBackwards(Vector<const schar> subject,
return -1;
}
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -4785,10 +4876,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
bool fast = obj->HasFastProperties();
JSObject::DefineAccessor(obj, name, getter, setter, attr);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (fast) JSObject::TransformToFastProperties(obj, 0);
return isolate->heap()->undefined_value();
}
+
// Implements part of 8.12.9 DefineOwnProperty.
// There are 3 cases that lead here:
// Step 4a - define a new data property.
@@ -5070,7 +5163,9 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
return isolate->heap()->true_value();
}
- return receiver->DeleteElement(index, mode);
+ Handle<Object> result = JSReceiver::DeleteElement(receiver, index, mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
Handle<Name> name;
@@ -5085,7 +5180,9 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
}
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- return receiver->DeleteProperty(*name, mode);
+ Handle<Object> result = JSReceiver::DeleteProperty(receiver, name, mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5188,8 +5285,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
CONVERT_SMI_ARG_CHECKED(literal_index, 4);
- Object* raw_boilerplate_object = literals->get(literal_index);
- Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+ Object* raw_literal_cell = literals->get(literal_index);
+ JSArray* boilerplate = NULL;
+ if (raw_literal_cell->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(raw_literal_cell);
+ boilerplate = JSArray::cast(site->transition_info());
+ } else {
+ boilerplate = JSArray::cast(raw_literal_cell);
+ }
+ Handle<JSArray> boilerplate_object(boilerplate);
ElementsKind elements_kind = object->GetElementsKind();
ASSERT(IsFastElementsKind(elements_kind));
// Smis should never trigger transitions.
@@ -5291,21 +5395,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
-
- CONVERT_ARG_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- return object->DeleteProperty(key, (strict_mode == kStrictMode)
- ? JSReceiver::STRICT_DELETION
- : JSReceiver::NORMAL_DELETION);
+ JSReceiver::DeleteMode delete_mode = (strict_mode == kStrictMode)
+ ? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION;
+ Handle<Object> result = JSReceiver::DeleteProperty(object, key, delete_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
-static Object* HasLocalPropertyImplementation(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Name> key) {
+static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Name> key) {
if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
@@ -5317,6 +5422,7 @@ static Object* HasLocalPropertyImplementation(Isolate* isolate,
Handle<JSObject>::cast(proto),
key);
}
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
}
@@ -5336,8 +5442,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
- if (object->HasRealNamedProperty(isolate, key))
+ if (object->HasRealNamedProperty(isolate, key)) {
+ ASSERT(!isolate->has_scheduled_exception());
return isolate->heap()->true_value();
+ } else {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ }
Map* map = object->map();
if (!key_is_array_index &&
!map->has_named_interceptor() &&
@@ -5367,6 +5477,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
CONVERT_ARG_CHECKED(Name, key, 1);
bool result = receiver->HasProperty(key);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
}
@@ -5379,6 +5490,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
CONVERT_SMI_ARG_CHECKED(index, 1);
bool result = receiver->HasElement(index);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
}
@@ -5392,7 +5504,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
CONVERT_ARG_CHECKED(Name, key, 1);
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
- return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
+ if (att == ABSENT || (att & DONT_ENUM) != 0) {
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->false_value();
+ }
+ ASSERT(!isolate->has_scheduled_exception());
+ return isolate->heap()->true_value();
}
@@ -5470,6 +5587,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
@@ -5489,6 +5607,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
int n;
@@ -5615,6 +5734,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -5696,9 +5816,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
// Handle special arguments properties.
if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
if (key->Equals(isolate->heap()->callee_string())) {
- Object* function = frame->function();
- if (function->IsJSFunction() &&
- !JSFunction::cast(function)->shared()->is_classic_mode()) {
+ JSFunction* function = frame->function();
+ if (!function->shared()->is_classic_mode()) {
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
@@ -5844,8 +5963,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
}
// Slower case.
+ int flags = ALLOW_HEX;
+ if (FLAG_harmony_numeric_literals) {
+ // The current spec draft has not updated "ToNumber Applied to the String
+ // Type", https://bugs.ecmascript.org/show_bug.cgi?id=1584
+ flags |= ALLOW_OCTAL | ALLOW_BINARY;
+ }
return isolate->heap()->NumberFromDouble(
- StringToDouble(isolate->unicode_cache(), subject, ALLOW_HEX));
+ StringToDouble(isolate->unicode_cache(), subject, flags));
}
@@ -7463,6 +7588,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
+
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
@@ -7485,6 +7611,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
return isolate->heap()->AllocateHeapNumber(result);
}
+
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from full codegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
@@ -8183,7 +8310,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
+ Handle<JSFunction> function(frame->function(), isolate);
Handle<Code> optimized_code(function->code());
RUNTIME_ASSERT((type != Deoptimizer::EAGER &&
type != Deoptimizer::SOFT) || function->IsOptimized());
@@ -8199,7 +8326,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
bool has_other_activations = false;
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
- JSFunction* other_function = JSFunction::cast(frame->function());
+ JSFunction* other_function = frame->function();
if (frame->is_optimized() && other_function->code() == function->code()) {
has_other_activations = true;
break;
@@ -8308,38 +8435,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompleteOptimization) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation && V8::UseCrankshaft()) {
- // While function is in optimization pipeline, it is marked accordingly.
- // Note that if the debugger is activated during parallel recompilation,
- // the function will be marked with the lazy-recompile builtin, which is
- // not related to parallel recompilation.
- while (function->IsMarkedForParallelRecompilation() ||
- function->IsInRecompileQueue() ||
- function->IsMarkedForInstallingRecompiledCode()) {
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- OS::Sleep(50);
- }
- }
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ ASSERT(!function->IsOptimized());
+ function->shared()->set_optimization_disabled(true);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- // The least significant bit (after untagging) indicates whether the
- // function is currently optimized, regardless of reason.
+ RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
if (!V8::UseCrankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
+ bool sync_with_compiler_thread = true;
+ if (args.length() == 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, sync, 1);
+ if (sync->IsOneByteEqualTo(STATIC_ASCII_VECTOR("no sync"))) {
+ sync_with_compiler_thread = false;
+ }
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation) {
- if (function->IsMarkedForLazyRecompilation()) {
- return Smi::FromInt(5);
+ if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
+ while (function->IsMarkedForParallelRecompilation() ||
+ function->IsInRecompileQueue() ||
+ function->IsMarkedForInstallingRecompiledCode()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ OS::Sleep(50);
}
}
if (FLAG_always_opt) {
@@ -8348,6 +8473,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
return function->IsOptimized() ? Smi::FromInt(3) // 3 == "always".
: Smi::FromInt(2); // 2 == "no".
}
+ if (FLAG_deopt_every_n_times) {
+ return Smi::FromInt(6); // 6 == "maybe deopted".
+ }
return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
: Smi::FromInt(2); // 2 == "no".
}
@@ -8405,13 +8533,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
Address table_cursor = start + unoptimized->back_edge_table_offset();
uint32_t table_length = Memory::uint32_at(table_cursor);
table_cursor += kIntSize;
- uint8_t loop_depth = 0;
+ uint32_t loop_depth = 0;
for (unsigned i = 0; i < table_length; ++i) {
// Table entries are (AST id, pc offset) pairs.
uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
if (pc_offset == target_pc_offset) {
ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
- loop_depth = Memory::uint8_at(table_cursor + 2 * kIntSize);
+ loop_depth = Memory::uint32_at(table_cursor + 2 * kIntSize);
break;
}
table_cursor += FullCodeGenerator::kBackEdgeEntrySize;
@@ -8616,9 +8744,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
- JSObject* extension_object;
- if (args[0]->IsJSObject()) {
- extension_object = JSObject::cast(args[0]);
+ JSReceiver* extension_object;
+ if (args[0]->IsJSReceiver()) {
+ extension_object = JSReceiver::cast(args[0]);
} else {
// Convert the object to a proper JavaScript object.
MaybeObject* maybe_js_object = args[0]->ToObject();
@@ -8834,7 +8962,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
// the global object, or the subject of a with. Try to delete it
// (respecting DONT_DELETE).
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
+ Handle<Object> result = JSReceiver::DeleteProperty(object, name);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -8852,6 +8982,7 @@ struct ObjectPair {
MaybeObject* y;
};
+
static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
ObjectPair result = {x, y};
// Pointers x and y returned in rax and rdx, in AMD-x64-abi.
@@ -8918,6 +9049,9 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
&index,
&attributes,
&binding_flags);
+ if (isolate->has_pending_exception()) {
+ return MakePair(Failure::Exception(), NULL);
+ }
// If the index is non-negative, the slot has been found in a context.
if (index >= 0) {
@@ -8958,13 +9092,14 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
// object, subject of a with, or a global object. We read the named
// property from it.
if (!holder.is_null()) {
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ASSERT(object->HasProperty(*name));
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ ASSERT(object->IsJSProxy() || object->HasProperty(*name));
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
? GlobalObject::cast(*object)->global_receiver()
- : ComputeReceiverForNonGlobal(isolate, *object),
+ : object->IsJSProxy() ? static_cast<Object*>(*object)
+ : ComputeReceiverForNonGlobal(isolate, JSObject::cast(*object)),
isolate);
// No need to unhole the value here. This is taken care of by the
@@ -9017,6 +9152,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
&index,
&attributes,
&binding_flags);
+ if (isolate->has_pending_exception()) return Failure::Exception();
if (index >= 0) {
// The property was found in a context slot.
@@ -9045,11 +9181,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
// Slow case: The property is not in a context slot. It is either in a
// context extension object, a property of the subject of a with, or a
// property of the global object.
- Handle<JSObject> object;
+ Handle<JSReceiver> object;
if (!holder.is_null()) {
// The property exists on the holder.
- object = Handle<JSObject>::cast(holder);
+ object = Handle<JSReceiver>::cast(holder);
} else {
// The property was not found.
ASSERT(attributes == ABSENT);
@@ -9063,7 +9199,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
// In non-strict mode, the property is added to the global object.
attributes = NONE;
- object = Handle<JSObject>(isolate->context()->global_object());
+ object = Handle<JSReceiver>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
@@ -9377,7 +9513,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source, context, true, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
- if (shared.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, shared);
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
context,
@@ -9414,7 +9550,8 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
language_mode,
NO_PARSE_RESTRICTION,
scope_position);
- if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, shared,
+ MakePair(Failure::Exception(), NULL));
Handle<JSFunction> compiled =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, context, NOT_TENURED);
@@ -10233,6 +10370,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
return string;
}
+
// Moves all own elements of an object, that are below a limit, to positions
// starting at zero. All undefined values are placed after non-undefined values,
// and are followed by non-existing element. Does not change the length
@@ -11020,7 +11158,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject() &&
shared->is_classic_mode() &&
- !shared->native()) {
+ !function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
// value object is not converted into a wrapped JS objects. To
@@ -11030,6 +11168,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
it.Advance();
Handle<Context> calling_frames_native_context(
Context::cast(Context::cast(it.frame()->context())->native_context()));
+ ASSERT(!receiver->IsUndefined() && !receiver->IsNull());
receiver =
isolate->factory()->ToObject(receiver, calling_frames_native_context);
}
@@ -11042,19 +11181,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
+static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
Isolate* isolate,
- JavaScriptFrame* frame,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
FrameInspector* frame_inspector) {
- Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- // Allocate and initialize a JSObject with all the arguments, stack locals
- // heap locals and extension properties of the debugged function.
- Handle<JSObject> local_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
Handle<Object> value(i < frame_inspector->GetParametersCount()
@@ -11065,7 +11199,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(isolate,
- local_scope,
+ target,
Handle<String>(scope_info->ParameterName(i)),
value,
NONE,
@@ -11078,7 +11212,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(isolate,
- local_scope,
+ target,
Handle<String>(scope_info->StackLocalName(i)),
Handle<Object>(frame_inspector->GetExpression(i), isolate),
NONE,
@@ -11086,45 +11220,90 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
Handle<JSObject>());
}
- if (scope_info->HasContext()) {
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, function_context, local_scope)) {
- return Handle<JSObject>();
- }
+ return target;
+}
- // Finally copy any properties from the function context extension.
- // These will be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- bool threw = false;
- Handle<FixedArray> keys =
- GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
- if (threw) return Handle<JSObject>();
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(isolate,
- local_scope,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
+
+static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index) {
+ if (inlined_jsframe_index != 0 || frame->is_optimized()) {
+ // Optimized frames are not supported.
+ // TODO(yangguo): make sure all code deoptimized when debugger is active
+ // and assert that this cannot happen.
+ return;
+ }
+
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ // Parameters.
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ HandleScope scope(isolate);
+ Handle<Object> value = GetProperty(
+ isolate, target, Handle<String>(scope_info->ParameterName(i)));
+ frame->SetParameterValue(i, *value);
+ }
+
+ // Stack locals.
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ HandleScope scope(isolate);
+ Handle<Object> value = GetProperty(
+ isolate, target, Handle<String>(scope_info->StackLocalName(i)));
+ frame->SetExpression(i, *value);
+ }
+}
+
+
+static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function,
+ JavaScriptFrame* frame) {
+ HandleScope scope(isolate);
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ if (!scope_info->HasContext()) return target;
+
+ // Third fill all context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (!scope_info->CopyContextLocalsToScopeObject(
+ isolate, function_context, target)) {
+ return Handle<JSObject>();
+ }
+
+ // Finally copy any properties from the function context extension.
+ // These will be variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsNativeContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ bool threw = false;
+ Handle<FixedArray> keys =
+ GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+ if (threw) return Handle<JSObject>();
+
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(isolate,
+ target,
+ key,
+ GetProperty(isolate, ext, key),
+ NONE,
+ kNonStrictMode),
+ Handle<JSObject>());
}
}
}
- return local_scope;
+ return target;
}
@@ -11133,9 +11312,15 @@ static Handle<JSObject> MaterializeLocalScope(
JavaScriptFrame* frame,
int inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- return MaterializeLocalScopeWithFrameInspector(isolate,
- frame,
- &frame_inspector);
+ Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+
+ Handle<JSObject> local_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ local_scope = MaterializeStackLocalsWithFrameInspector(
+ isolate, local_scope, function, &frame_inspector);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, local_scope, Handle<JSObject>());
+
+ return MaterializeLocalContext(isolate, local_scope, function, frame);
}
@@ -11171,7 +11356,7 @@ static bool SetLocalVariableValue(Isolate* isolate,
return false;
}
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<JSFunction> function(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -11418,7 +11603,7 @@ class ScopeIterator {
: isolate_(isolate),
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index),
- function_(JSFunction::cast(frame->function())),
+ function_(frame->function()),
context_(Context::cast(frame->context())),
nested_scope_chain_(4),
failed_(false) {
@@ -11799,7 +11984,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
JavaScriptFrame* frame = frame_it.frame();
Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+ Handle<SharedFunctionInfo>(frame->function()->shared());
Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
int len = 0;
@@ -11850,6 +12035,7 @@ static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
return *isolate->factory()->NewJSArrayWithElements(details);
}
+
// Return an array with scope details
// args[0]: number: break id
// args[1]: number: frame index
@@ -12287,111 +12473,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
}
-static bool IsBlockOrCatchOrWithScope(ScopeIterator::ScopeType type) {
- return type == ScopeIterator::ScopeTypeBlock ||
- type == ScopeIterator::ScopeTypeCatch ||
- type == ScopeIterator::ScopeTypeWith;
-}
-
-
-// Creates a copy of the with context chain. The copy of the context chain is
-// is linked to the function context supplied.
-static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Context> base,
- JavaScriptFrame* frame,
- int inlined_jsframe_index) {
- HandleScope scope(isolate);
- List<Handle<ScopeInfo> > scope_chain;
- List<Handle<Context> > context_chain;
-
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
- if (it.Failed()) return Handle<Context>::null();
-
- for ( ; IsBlockOrCatchOrWithScope(it.Type()); it.Next()) {
- ASSERT(!it.Done());
- scope_chain.Add(it.CurrentScopeInfo());
- context_chain.Add(it.CurrentContext());
- }
-
- // At the end of the chain. Return the base context to link to.
- Handle<Context> context = base;
-
- // Iteratively copy and or materialize the nested contexts.
- while (!scope_chain.is_empty()) {
- Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
- Handle<Context> current = context_chain.RemoveLast();
- ASSERT(!(scope_info->HasContext() & current.is_null()));
-
- if (scope_info->scope_type() == CATCH_SCOPE) {
- ASSERT(current->IsCatchContext());
- Handle<String> name(String::cast(current->extension()));
- Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX),
- isolate);
- context =
- isolate->factory()->NewCatchContext(function,
- context,
- name,
- thrown_object);
- } else if (scope_info->scope_type() == BLOCK_SCOPE) {
- // Materialize the contents of the block scope into a JSObject.
- ASSERT(current->IsBlockContext());
- Handle<JSObject> block_scope_object =
- MaterializeBlockScope(isolate, current);
- CHECK(!block_scope_object.is_null());
- // Allocate a new function context for the debug evaluation and set the
- // extension object.
- Handle<Context> new_context =
- isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
- function);
- new_context->set_extension(*block_scope_object);
- new_context->set_previous(*context);
- context = new_context;
- } else {
- ASSERT(scope_info->scope_type() == WITH_SCOPE);
- ASSERT(current->IsWithContext());
- Handle<JSObject> extension(JSObject::cast(current->extension()));
- context =
- isolate->factory()->NewWithContext(function, context, extension);
- }
- }
-
- return scope.CloseAndEscape(context);
-}
-
-
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
-static Handle<Object> GetArgumentsObject(Isolate* isolate,
- JavaScriptFrame* frame,
- FrameInspector* frame_inspector,
- Handle<ScopeInfo> scope_info,
- Handle<Context> function_context) {
- // Try to find the value of 'arguments' to pass as parameter. If it is not
- // found (that is the debugged function does not reference 'arguments' and
- // does not support eval) then create an 'arguments' object.
- int index;
- if (scope_info->StackLocalCount() > 0) {
- index = scope_info->StackSlotIndex(isolate->heap()->arguments_string());
- if (index != -1) {
- return Handle<Object>(frame->GetExpression(index), isolate);
- }
- }
-
- if (scope_info->HasHeapAllocatedLocals()) {
- VariableMode mode;
- InitializationFlag init_flag;
- index = scope_info->ContextSlotIndex(
- isolate->heap()->arguments_string(), &mode, &init_flag);
- if (index != -1) {
- return Handle<Object>(function_context->get(index), isolate);
- }
+static Handle<JSObject> MaterializeArgumentsObject(
+ Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function) {
+ // Do not materialize the arguments object for eval or top-level code.
+ // Skip if "arguments" is already taken.
+ if (!function->shared()->is_function() ||
+ target->HasLocalProperty(isolate->heap()->arguments_string())) {
+ return target;
}
- // FunctionGetArguments can't return a non-Object.
- return Handle<JSObject>(JSObject::cast(
- Accessors::FunctionGetArguments(frame_inspector->GetFunction(),
- NULL)->ToObjectUnchecked()), isolate);
+ // FunctionGetArguments can't throw an exception.
+ Handle<JSObject> arguments = Handle<JSObject>::cast(
+ Accessors::FunctionGetArguments(function));
+ SetProperty(isolate,
+ target,
+ isolate->factory()->arguments_string(),
+ arguments,
+ ::NONE,
+ kNonStrictMode);
+ return target;
}
@@ -12414,7 +12518,7 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
CLASSIC_MODE,
NO_PARSE_RESTRICTION,
RelocInfo::kNoPosition);
- if (shared.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate, shared);
Handle<JSFunction> eval_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -12438,24 +12542,10 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
// Evaluate a piece of JavaScript in the context of a stack frame for
-// debugging. This is done by creating a new context which in its extension
-// part has all the parameters and locals of the function on the stack frame
-// as well as a materialized arguments object. As this context replaces
-// the context of the function on the stack frame a new (empty) function
-// is created as well to be used as the closure for the context.
-// This closure as replacements for the one on the stack frame presenting
-// the same view of the values of parameters and local variables as if the
-// piece of JavaScript was evaluated at the point where the function on the
-// stack frame is currently stopped when we compile and run the (direct) eval.
-// Returns array of
-// #0: evaluate result
-// #1: local variables materizalized again as object after evaluation, contain
-// original variable values as they remained on stack
-// #2: local variables materizalized as object before evaluation (and possibly
-// modified by expression having been executed)
-// Since user expression only reaches (and modifies) copies of local variables,
-// those copies are returned to the caller to allow tracking the changes and
-// manually updating the actual variables.
+// debugging. Things that need special attention are:
+// - Parameters and stack-allocated locals need to be materialized. Altered
+// values need to be written back to the stack afterwards.
+// - The arguments object needs to materialized.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
HandleScope scope(isolate);
@@ -12490,69 +12580,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
- // Create the (empty) function replacing the function on the stack frame for
- // the purpose of evaluating in the context created below. It is important
- // that this function does not describe any parameters and local variables
- // in the context. If it does then this will cause problems with the lookup
- // in Context::Lookup, where context slots for parameters and local variables
- // are looked at before the extension object.
- Handle<JSFunction> go_between =
- isolate->factory()->NewFunction(isolate->factory()->empty_string(),
- isolate->factory()->undefined_value());
- go_between->set_context(function->context());
-#ifdef DEBUG
- Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
- ASSERT(go_between_scope_info->ParameterCount() == 0);
- ASSERT(go_between_scope_info->ContextLocalCount() == 0);
-#endif
+ // Evaluate on the context of the frame.
+ Handle<Context> context(Context::cast(frame->context()));
+ ASSERT(!context.is_null());
- // Materialize the content of the local scope including the arguments object.
- Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
- isolate, frame, &frame_inspector);
- RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
+ // Materialize stack locals and the arguments object.
+ Handle<JSObject> materialized =
+ isolate->factory()->NewJSObject(isolate->object_function());
- // Do not materialize the arguments object for eval or top-level code.
- if (function->shared()->is_function()) {
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context;
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
- if (scope_info->HasContext()) {
- function_context = Handle<Context>(frame_context->declaration_context());
- }
- Handle<Object> arguments = GetArgumentsObject(isolate,
- frame,
- &frame_inspector,
- scope_info,
- function_context);
- SetProperty(isolate,
- local_scope,
- isolate->factory()->arguments_string(),
- arguments,
- ::NONE,
- kNonStrictMode);
- }
-
- // Allocate a new context for the debug evaluation and set the extension
- // object build.
- Handle<Context> context = isolate->factory()->NewFunctionContext(
- Context::MIN_CONTEXT_SLOTS, go_between);
-
- // Use the materialized local scope in a with context.
- context =
- isolate->factory()->NewWithContext(go_between, context, local_scope);
-
- // Copy any with contexts present and chain them in front of this context.
- context = CopyNestedScopeContextChain(isolate,
- go_between,
- context,
- frame,
- inlined_jsframe_index);
- if (context.is_null()) {
- ASSERT(isolate->has_pending_exception());
- MaybeObject* exception = isolate->pending_exception();
- isolate->clear_pending_exception();
- return exception;
- }
+ materialized = MaterializeStackLocalsWithFrameInspector(
+ isolate, materialized, function, &frame_inspector);
+ RETURN_IF_EMPTY_HANDLE(isolate, materialized);
+
+ materialized = MaterializeArgumentsObject(isolate, materialized, function);
+ RETURN_IF_EMPTY_HANDLE(isolate, materialized);
+
+ // Add the materialized object in a with-scope to shadow the stack locals.
+ context = isolate->factory()->NewWithContext(function, context, materialized);
Handle<Object> receiver(frame->receiver(), isolate);
Object* evaluate_result_object;
@@ -12560,18 +12604,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
DebugEvaluate(isolate, context, context_extension, receiver, source);
if (!maybe_result->ToObject(&evaluate_result_object)) return maybe_result;
}
- Handle<Object> evaluate_result(evaluate_result_object, isolate);
- Handle<JSObject> local_scope_control_copy =
- MaterializeLocalScopeWithFrameInspector(isolate, frame,
- &frame_inspector);
+ Handle<Object> result(evaluate_result_object, isolate);
- Handle<FixedArray> resultArray = isolate->factory()->NewFixedArray(3);
- resultArray->set(0, *evaluate_result);
- resultArray->set(1, *local_scope_control_copy);
- resultArray->set(2, *local_scope);
+ // Write back potential changes to materialized stack locals to the stack.
+ UpdateStackLocalsFromMaterializedObject(
+ isolate, materialized, function, frame, inlined_jsframe_index);
- return *(isolate->factory()->NewJSArrayWithElements(resultArray));
+ return *result;
}
@@ -12956,6 +12996,7 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
return counter;
}
+
// For a script finds all SharedFunctionInfo's in the heap that points
// to this script. Returns JSArray of SharedFunctionInfo wrapped
// in OpaqueReferences.
@@ -13001,6 +13042,7 @@ RUNTIME_FUNCTION(MaybeObject*,
return *result;
}
+
// For a script calculates compilation information about all its functions.
// The script source is explicitly specified by the second argument.
// The source of the actual script is not used, however it is important that
@@ -13027,6 +13069,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
return result;
}
+
// Changes the source of the script to a new_source.
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
@@ -13074,6 +13117,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
}
+
// Connects SharedFunctionInfo to another script.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
HandleScope scope(isolate);
@@ -13148,6 +13192,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
}
+
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
@@ -13696,9 +13741,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
if (proto->IsNull()) return isolate->heap()->undefined_value();
@@ -13707,21 +13751,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
}
ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
JSObject::cast(obj)->HasFastElements()));
- if (obj->map()->is_observed() != is_observed) {
- if (is_observed && obj->IsJSObject() &&
- !JSObject::cast(obj)->HasExternalArrayElements()) {
- // Go to dictionary mode, so that we don't skip map checks.
- MaybeObject* maybe = JSObject::cast(obj)->NormalizeElements();
- if (maybe->IsFailure()) return maybe;
- ASSERT(!JSObject::cast(obj)->HasFastElements());
- }
- MaybeObject* maybe = obj->map()->Copy();
- Map* map;
- if (!maybe->To(&map)) return maybe;
- map->set_is_observed(is_observed);
- obj->set_map(map);
- }
- return isolate->heap()->undefined_value();
+ ASSERT(obj->IsJSObject());
+ return JSObject::cast(obj)->SetObserved(isolate);
}
@@ -13750,7 +13781,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
Handle<JSWeakMap> weakmap =
Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
- return WeakMapInitialize(isolate, weakmap);
+ return WeakCollectionInitialize(isolate, weakmap);
}
@@ -13792,19 +13823,21 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
MaybeObject* maybe_array;
if (!type_info.is_null() &&
*type_info != isolate->heap()->undefined_value() &&
- Cell::cast(*type_info)->value()->IsSmi() &&
+ Cell::cast(*type_info)->value()->IsAllocationSite() &&
can_use_type_feedback) {
- Cell* cell = Cell::cast(*type_info);
- Smi* smi = Smi::cast(cell->value());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ Handle<Cell> cell = Handle<Cell>::cast(type_info);
+ Handle<AllocationSite> site = Handle<AllocationSite>(
+ AllocationSite::cast(cell->value()), isolate);
+ ASSERT(!site->IsLiteralSite());
+ ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
// Update the allocation site info to reflect the advice alteration.
- cell->set_value(Smi::FromInt(to_kind));
+ site->SetElementsKind(to_kind);
}
maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
- *constructor, type_info);
+ *constructor, site);
if (!maybe_array->To(&array)) return maybe_array;
} else {
maybe_array = isolate->heap()->AllocateJSObject(*constructor);
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 70568f9fa8..a8c10d92d5 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -97,8 +97,8 @@ namespace internal {
F(RunningInSimulator, 0, 1) \
F(IsParallelRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
- F(CompleteOptimization, 1, 1) \
- F(GetOptimizationStatus, 1, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
@@ -342,16 +342,16 @@ namespace internal {
F(MapSet, 3, 1) \
F(MapGetSize, 1, 1) \
\
- /* Harmony weakmaps */ \
- F(WeakMapInitialize, 1, 1) \
- F(WeakMapGet, 2, 1) \
- F(WeakMapHas, 2, 1) \
- F(WeakMapDelete, 2, 1) \
- F(WeakMapSet, 3, 1) \
+ /* Harmony weak maps and sets */ \
+ F(WeakCollectionInitialize, 1, 1) \
+ F(WeakCollectionGet, 2, 1) \
+ F(WeakCollectionHas, 2, 1) \
+ F(WeakCollectionDelete, 2, 1) \
+ F(WeakCollectionSet, 3, 1) \
\
/* Harmony observe */ \
F(IsObserved, 1, 1) \
- F(SetIsObserved, 2, 1) \
+ F(SetIsObserved, 1, 1) \
F(SetObserverDeliveryPending, 0, 1) \
F(GetObservationState, 0, 1) \
F(ObservationWeakMapCreate, 0, 1) \
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 348fd747f5..90fb36b422 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -587,7 +587,7 @@ function ToObject(x) {
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
- throw %MakeTypeError('null_to_object', []);
+ throw %MakeTypeError('undefined_or_null_to_object', []);
}
return x;
}
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 982f252807..222b3182e8 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -658,7 +658,8 @@ Sampler::Sampler(Isolate* isolate, int interval)
interval_(interval),
profiling_(false),
active_(false),
- samples_taken_(0) {
+ is_counting_samples_(false),
+ js_and_external_sample_count_(0) {
data_ = new PlatformData;
}
@@ -668,6 +669,7 @@ Sampler::~Sampler() {
delete data_;
}
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -681,12 +683,17 @@ void Sampler::Stop() {
SetActive(false);
}
+
void Sampler::SampleStack(const RegisterState& state) {
TickSample* sample = isolate_->cpu_profiler()->TickSampleEvent();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
sample->Init(isolate_, state);
- if (++samples_taken_ < 0) samples_taken_ = 0;
+ if (is_counting_samples_) {
+ if (sample->state == JS || sample->state == EXTERNAL) {
+ ++js_and_external_sample_count_;
+ }
+ }
Tick(sample);
}
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h
index a47a3635dd..80ccc087ca 100644
--- a/deps/v8/src/sampler.h
+++ b/deps/v8/src/sampler.h
@@ -103,8 +103,13 @@ class Sampler {
bool IsActive() const { return NoBarrier_Load(&active_); }
// Used in tests to make sure that stack sampling is performed.
- int samples_taken() const { return samples_taken_; }
- void ResetSamplesTaken() { samples_taken_ = 0; }
+ unsigned js_and_external_sample_count() const {
+ return js_and_external_sample_count_;
+ }
+ void StartCountingSamples() {
+ is_counting_samples_ = true;
+ js_and_external_sample_count_ = 0;
+ }
class PlatformData;
PlatformData* platform_data() const { return data_; }
@@ -122,7 +127,9 @@ class Sampler {
Atomic32 profiling_;
Atomic32 active_;
PlatformData* data_; // Platform specific data.
- int samples_taken_; // Counts stack samples taken.
+ bool is_counting_samples_;
+ // Counts stack samples taken in JS VM state.
+ unsigned js_and_external_sample_count_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index 56b9f03aa5..fb503459f7 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -46,6 +46,7 @@ BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
buffer_end_ = buffer_;
}
+
BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
void BufferedUtf16CharacterStream::PushBack(uc32 character) {
@@ -113,6 +114,7 @@ unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
return BufferSeekForward(delta);
}
+
// ----------------------------------------------------------------------------
// GenericStringUtf16CharacterStream
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index ef2dc2c647..8b7cb569bd 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -42,7 +42,8 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
harmony_scoping_(false),
- harmony_modules_(false) { }
+ harmony_modules_(false),
+ harmony_numeric_literals_(false) { }
void Scanner::Initialize(Utf16CharacterStream* source) {
@@ -719,7 +720,7 @@ void Scanner::ScanDecimalDigits() {
Token::Value Scanner::ScanNumber(bool seen_period) {
ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
- enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
+ enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL;
LiteralScope literal(this);
if (seen_period) {
@@ -733,7 +734,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
int start_pos = source_pos(); // For reporting octal positions.
AddLiteralCharAdvance();
- // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
+ // either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
+ // an octal number.
if (c0_ == 'x' || c0_ == 'X') {
// hex number
kind = HEX;
@@ -745,9 +747,29 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
while (IsHexDigit(c0_)) {
AddLiteralCharAdvance();
}
+ } else if (harmony_numeric_literals_ && (c0_ == 'o' || c0_ == 'O')) {
+ kind = OCTAL;
+ AddLiteralCharAdvance();
+ if (!IsOctalDigit(c0_)) {
+ // we must have at least one octal digit after 'o'/'O'
+ return Token::ILLEGAL;
+ }
+ while (IsOctalDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ } else if (harmony_numeric_literals_ && (c0_ == 'b' || c0_ == 'B')) {
+ kind = BINARY;
+ AddLiteralCharAdvance();
+ if (!IsBinaryDigit(c0_)) {
+ // we must have at least one binary digit after 'b'/'B'
+ return Token::ILLEGAL;
+ }
+ while (IsBinaryDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
} else if ('0' <= c0_ && c0_ <= '7') {
// (possible) octal number
- kind = OCTAL;
+ kind = IMPLICIT_OCTAL;
while (true) {
if (c0_ == '8' || c0_ == '9') {
kind = DECIMAL;
@@ -776,7 +798,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// scan exponent, if any
if (c0_ == 'e' || c0_ == 'E') {
ASSERT(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
- if (kind == OCTAL) return Token::ILLEGAL; // no exponent for octals allowed
+ if (kind != DECIMAL) return Token::ILLEGAL;
// scan exponent
AddLiteralCharAdvance();
if (c0_ == '+' || c0_ == '-')
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index eb6764e80f..d7328085b7 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -408,7 +408,12 @@ class Scanner {
void SetHarmonyModules(bool modules) {
harmony_modules_ = modules;
}
-
+ bool HarmonyNumericLiterals() const {
+ return harmony_numeric_literals_;
+ }
+ void SetHarmonyNumericLiterals(bool numeric_literals) {
+ harmony_numeric_literals_ = numeric_literals;
+ }
// Returns true if there was a line terminator before the peek'ed token,
// possibly inside a multi-line comment.
@@ -557,6 +562,8 @@ class Scanner {
bool harmony_scoping_;
// Whether we scan 'module', 'import', 'export' as keywords.
bool harmony_modules_;
+ // Whether we scan 0o777 and 0b111 as numbers.
+ bool harmony_numeric_literals_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 6ae7cc0691..e631332d5c 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -970,6 +970,13 @@ Variable* Scope::LookupRecursive(Handle<String> name,
BindingKind* binding_kind,
AstNodeFactory<AstNullVisitor>* factory) {
ASSERT(binding_kind != NULL);
+ if (already_resolved() && is_with_scope()) {
+ // Short-cut: if the scope is deserialized from a scope info, variable
+ // allocation is already fixed. We can simply return with dynamic lookup.
+ *binding_kind = DYNAMIC_LOOKUP;
+ return NULL;
+ }
+
// Try to find the variable in this scope.
Variable* var = LocalLookup(name);
@@ -998,6 +1005,7 @@ Variable* Scope::LookupRecursive(Handle<String> name,
}
if (is_with_scope()) {
+ ASSERT(!already_resolved());
// The current scope is a with scope, so the variable binding can not be
// statically resolved. However, note that it was necessary to do a lookup
// in the outer scope anyway, because if a binding exists in an outer scope,
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 4e51cd396b..ad56d36132 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -577,6 +577,10 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
62,
"Heap::NewSpaceAllocationLimitAddress");
+ Add(ExternalReference::allocation_sites_list_address(isolate).address(),
+ UNCLASSIFIED,
+ 63,
+ "Heap::allocation_sites_list_address()");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
@@ -587,7 +591,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 63 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
}
}
@@ -690,6 +694,13 @@ void Deserializer::Deserialize() {
isolate_->heap()->set_array_buffers_list(
isolate_->heap()->undefined_value());
+ // The allocation site list is build during root iteration, but if no sites
+ // were encountered then it needs to be initialized to undefined.
+ if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ isolate_->heap()->set_allocation_sites_list(
+ isolate_->heap()->undefined_value());
+ }
+
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
@@ -745,6 +756,16 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
}
+void Deserializer::RelinkAllocationSite(AllocationSite* site) {
+ if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ site->set_weak_next(isolate_->heap()->undefined_value());
+ } else {
+ site->set_weak_next(isolate_->heap()->allocation_sites_list());
+ }
+ isolate_->heap()->set_allocation_sites_list(site);
+}
+
+
// This routine writes the new object into the pointer provided and then
// returns true if the new object was in young space and false otherwise.
// The reason for this strange interface is that otherwise the object is
@@ -754,16 +775,25 @@ void Deserializer::ReadObject(int space_number,
Object** write_back) {
int size = source_->GetInt() << kObjectAlignmentBits;
Address address = Allocate(space_number, size);
- *write_back = HeapObject::FromAddress(address);
+ HeapObject* obj = HeapObject::FromAddress(address);
+ *write_back = obj;
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
if (FLAG_log_snapshot_positions) {
LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
+
+ // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
+ // as a (weak) root. If this root is relocated correctly,
+ // RelinkAllocationSite() isn't necessary.
+ if (obj->IsAllocationSite()) {
+ RelinkAllocationSite(AllocationSite::cast(obj));
+ }
+
#ifdef DEBUG
bool is_codespace = (space_number == CODE_SPACE);
- ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
+ ASSERT(obj->IsCode() == is_codespace);
#endif
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index a6099afc23..283c1b77a1 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -347,6 +347,10 @@ class Deserializer: public SerializerDeserializer {
UNREACHABLE();
}
+ // Allocation sites are present in the snapshot, and must be linked into
+ // a list at deserialization time.
+ void RelinkAllocationSite(AllocationSite* site);
+
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or NULL if we are not writing into an
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 15381eaf32..5935c4a0ea 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -914,6 +914,7 @@ void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
chunk->IncrementLiveBytes(by);
}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -994,6 +995,7 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
return Failure::Exception();
}
+
bool PagedSpace::CanExpand() {
ASSERT(max_capacity_ % AreaSize() == 0);
@@ -1868,6 +1870,7 @@ void NewSpace::ClearHistograms() {
}
}
+
// Because the copying collector does not touch garbage objects, we iterate
// the new space before a collection to get a histogram of allocated objects.
// This only happens when --log-gc flag is set.
@@ -1961,6 +1964,7 @@ size_t NewSpace::CommittedPhysicalMemory() {
return size;
}
+
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h
index dd65cbcc9c..bb386dbacf 100644
--- a/deps/v8/src/store-buffer-inl.h
+++ b/deps/v8/src/store-buffer-inl.h
@@ -74,6 +74,14 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
}
+void StoreBuffer::ClearDeadObject(HeapObject* object) {
+ Address& map_field = Memory::Address_at(object->address());
+ if (heap_->map_space()->Contains(map_field)) {
+ map_field = NULL;
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index 0386280de6..9705b60489 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -364,7 +364,8 @@ void StoreBuffer::VerifyPointers(PagedSpace* space,
reinterpret_cast<PagedSpace*>(page->owner()),
page,
region_callback,
- &DummyScavengePointer);
+ &DummyScavengePointer,
+ false);
}
}
@@ -412,7 +413,10 @@ void StoreBuffer::GCEpilogue() {
void StoreBuffer::FindPointersToNewSpaceInRegion(
- Address start, Address end, ObjectSlotCallback slot_callback) {
+ Address start,
+ Address end,
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
for (Address slot_address = start;
slot_address < end;
slot_address += kPointerSize) {
@@ -420,6 +424,9 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
if (heap_->InNewSpace(*slot)) {
HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
ASSERT(object->IsHeapObject());
+ // The new space object was not promoted if it still contains a map
+ // pointer. Clear the map field now lazily.
+ if (clear_maps) ClearDeadObject(object);
slot_callback(reinterpret_cast<HeapObject**>(slot), object);
if (heap_->InNewSpace(*slot)) {
EnterDirectlyIntoStoreBuffer(slot_address);
@@ -446,7 +453,8 @@ static inline Address MapEndAlign(Address addr) {
void StoreBuffer::FindPointersToNewSpaceInMaps(
Address start,
Address end,
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
ASSERT(MapStartAlign(start) == start);
ASSERT(MapEndAlign(end) == end);
@@ -460,7 +468,8 @@ void StoreBuffer::FindPointersToNewSpaceInMaps(
FindPointersToNewSpaceInRegion(pointer_fields_start,
pointer_fields_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
map_address += Map::kSize;
}
}
@@ -469,7 +478,8 @@ void StoreBuffer::FindPointersToNewSpaceInMaps(
void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
Address start,
Address end,
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
Address map_aligned_start = MapStartAlign(start);
Address map_aligned_end = MapEndAlign(end);
@@ -478,7 +488,8 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
}
@@ -500,7 +511,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
PagedSpace* space,
Page* page,
RegionCallback region_callback,
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
Address visitable_start = page->area_start();
Address end_of_page = page->area_end();
@@ -520,7 +532,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
// After calling this the special garbage section may have moved.
(this->*region_callback)(visitable_start,
visitable_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
if (visitable_end >= space->top() && visitable_end < space->limit()) {
visitable_end = space->limit();
visitable_start = visitable_end;
@@ -551,13 +564,15 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
if (visitable_start != visitable_end) {
(this->*region_callback)(visitable_start,
visitable_end,
- slot_callback);
+ slot_callback,
+ clear_maps);
}
}
void StoreBuffer::IteratePointersInStoreBuffer(
- ObjectSlotCallback slot_callback) {
+ ObjectSlotCallback slot_callback,
+ bool clear_maps) {
Address* limit = old_top_;
old_top_ = old_start_;
{
@@ -570,6 +585,9 @@ void StoreBuffer::IteratePointersInStoreBuffer(
Object* object = *slot;
if (heap_->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ // The new space object was not promoted if it still contains a map
+ // pointer. Clear the map field now lazily.
+ if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
if (heap_->InNewSpace(*slot)) {
EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
@@ -582,6 +600,18 @@ void StoreBuffer::IteratePointersInStoreBuffer(
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+ IteratePointersToNewSpace(slot_callback, false);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
+ ObjectSlotCallback slot_callback) {
+ IteratePointersToNewSpace(slot_callback, true);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
+ bool clear_maps) {
// We do not sort or remove duplicated entries from the store buffer because
// we expect that callback will rebuild the store buffer thus removing
// all duplicates and pointers to old space.
@@ -590,7 +620,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
// TODO(gc): we want to skip slots on evacuation candidates
// but we can't simply figure that out from slot address
// because slot can belong to a large object.
- IteratePointersInStoreBuffer(slot_callback);
+ IteratePointersInStoreBuffer(slot_callback, clear_maps);
// We are done scanning all the pointers that were in the store buffer, but
// there may be some pages marked scan_on_scavenge that have pointers to new
@@ -619,7 +649,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
ASSERT(array->IsFixedArray());
Address start = array->address();
Address end = start + array->Size();
- FindPointersToNewSpaceInRegion(start, end, slot_callback);
+ FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
@@ -629,7 +659,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
(owner == heap_->map_space() ?
&StoreBuffer::FindPointersToNewSpaceInMapsRegion :
&StoreBuffer::FindPointersToNewSpaceInRegion),
- slot_callback);
+ slot_callback,
+ clear_maps);
}
}
}
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
index 520cbc0162..01e7cbeb8d 100644
--- a/deps/v8/src/store-buffer.h
+++ b/deps/v8/src/store-buffer.h
@@ -43,8 +43,10 @@ class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-typedef void (StoreBuffer::*RegionCallback)(
- Address start, Address end, ObjectSlotCallback slot_callback);
+typedef void (StoreBuffer::*RegionCallback)(Address start,
+ Address end,
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
@@ -83,6 +85,10 @@ class StoreBuffer {
// surviving old-to-new pointers into the store buffer to rebuild it.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
+ // Same as IteratePointersToNewSpace but additonally clears maps in objects
+ // referenced from the store buffer that do not contain a forwarding pointer.
+ void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
+
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
@@ -164,9 +170,15 @@ class StoreBuffer {
void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
+ // Set the map field of the object to NULL if contains a map.
+ inline void ClearDeadObject(HeapObject *object);
+
+ void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+
void FindPointersToNewSpaceInRegion(Address start,
Address end,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
// For each region of pointers on a page in use from an old space call
// visit_pointer_region callback.
@@ -182,20 +194,24 @@ class StoreBuffer {
void FindPointersToNewSpaceInMaps(
Address start,
Address end,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
void FindPointersToNewSpaceInMapsRegion(
Address start,
Address end,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
void FindPointersToNewSpaceOnPage(
PagedSpace* space,
Page* page,
RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
+ ObjectSlotCallback slot_callback,
+ bool clear_maps);
- void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+ void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+ bool clear_maps);
#ifdef VERIFY_HEAP
void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 7e186871ba..cb82c16634 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -185,7 +185,8 @@ function StringMatch(regexp) {
if (IS_REGEXP(regexp)) {
// Emulate RegExp.prototype.exec's side effect in step 5, even though
// value is discarded.
- ToInteger(regexp.lastIndex);
+ var lastIndex = regexp.lastIndex;
+ TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
@@ -236,7 +237,8 @@ function StringReplace(search, replace) {
if (IS_REGEXP(search)) {
// Emulate RegExp.prototype.exec's side effect in step 5, even if
// value is discarded.
- ToInteger(search.lastIndex);
+ var lastIndex = search.lastIndex;
+ TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
if (!IS_SPEC_FUNCTION(replace)) {
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index a1774b6e1f..d332fd2bc4 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -128,6 +128,7 @@ static void TrimToMaxSignificantDigits(Vector<const char> buffer,
exponent + (buffer.length() - kMaxSignificantDecimalDigits);
}
+
// Reads digits from the buffer and converts them to a uint64.
// Reads in as many digits as fit into a uint64.
// When the string starts with "1844674407370955161" no further digit is read.
@@ -175,8 +176,7 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
-#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) \
- && !defined(_MSC_VER)
+#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 62ac2c873b..436cd46ce1 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -133,11 +133,11 @@ Handle<Code> StubCache::FindIC(Handle<Name> name,
}
-Handle<Code> StubCache::FindHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type) {
+Handle<Code> StubCache::FindLoadHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::StubType type) {
Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
receiver.is_identical_to(stub_holder) ? Code::OWN_STUB
: Code::PROTOTYPE_STUB);
@@ -151,9 +151,26 @@ Handle<Code> StubCache::FindHandler(Handle<Name> name,
}
-Handle<Code> StubCache::ComputeMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
+Handle<Code> StubCache::FindStoreHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Code::Kind kind,
+ Code::StubType type,
+ StrictModeFlag strict_mode) {
+ Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
+ STANDARD_STORE, strict_mode);
+ ASSERT(type != Code::NORMAL);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STUB, extra_ic_state, type, kind);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name) {
Handle<Code> ic = FindIC(name, receiver, Code::LOAD_IC, handler->type());
if (!ic.is_null()) return ic;
@@ -166,9 +183,9 @@ Handle<Code> StubCache::ComputeMonomorphicIC(Handle<JSObject> receiver,
}
-Handle<Code> StubCache::ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
+Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name) {
Handle<Code> ic = FindIC(
name, receiver, Code::KEYED_LOAD_IC, handler->type());
if (!ic.is_null()) return ic;
@@ -182,6 +199,41 @@ Handle<Code> StubCache::ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
}
+Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ Handle<Code> ic = FindIC(
+ name, receiver, Code::STORE_IC, handler->type(), strict_mode);
+ if (!ic.is_null()) return ic;
+
+ StoreStubCompiler ic_compiler(isolate(), strict_mode);
+ ic = ic_compiler.CompileMonomorphicIC(
+ Handle<Map>(receiver->map()), handler, name);
+
+ JSObject::UpdateMapCodeCache(receiver, name, ic);
+ return ic;
+}
+
+
+Handle<Code> StubCache::ComputeMonomorphicKeyedStoreIC(
+ Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ Handle<Code> ic = FindIC(
+ name, receiver, Code::KEYED_STORE_IC, handler->type(), strict_mode);
+ if (!ic.is_null()) return ic;
+
+ KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
+ ic = ic_compiler.CompileMonomorphicIC(
+ Handle<Map>(receiver->map()), handler, name);
+
+ JSObject::UpdateMapCodeCache(receiver, name, ic);
+ return ic;
+}
+
+
Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
Handle<JSObject> receiver) {
// If no global objects are present in the prototype chain, the load
@@ -207,7 +259,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
- Handle<Code> handler = FindHandler(
+ Handle<Code> handler = FindLoadHandler(
cache_name, receiver, receiver, Code::LOAD_IC, Code::NONEXISTENT);
if (!handler.is_null()) return handler;
@@ -232,7 +284,7 @@ Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
}
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::LOAD_IC, Code::FIELD);
if (!stub.is_null()) return stub;
@@ -251,7 +303,7 @@ Handle<Code> StubCache::ComputeLoadCallback(
Handle<ExecutableAccessorInfo> callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
if (!stub.is_null()) return stub;
@@ -268,7 +320,7 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
Handle<JSObject> holder,
Handle<JSFunction> getter) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
if (!stub.is_null()) return stub;
@@ -285,7 +337,7 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name,
Handle<JSObject> holder,
Handle<JSFunction> value) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindHandler(
+ Handle<Code> handler = FindLoadHandler(
name, receiver, stub_holder, Code::LOAD_IC, Code::CONSTANT_FUNCTION);
if (!handler.is_null()) return handler;
@@ -301,7 +353,7 @@ Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR);
if (!stub.is_null()) return stub;
@@ -350,7 +402,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
}
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD);
if (!stub.is_null()) return stub;
@@ -367,7 +419,7 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name,
Handle<JSObject> holder,
Handle<JSFunction> value) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindHandler(
+ Handle<Code> handler = FindLoadHandler(
name, receiver, stub_holder, Code::KEYED_LOAD_IC,
Code::CONSTANT_FUNCTION);
if (!handler.is_null()) return handler;
@@ -383,7 +435,7 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
if (!stub.is_null()) return stub;
@@ -401,7 +453,7 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<JSObject> holder,
Handle<ExecutableAccessorInfo> callback) {
Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindHandler(
+ Handle<Code> stub = FindLoadHandler(
name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
if (!stub.is_null()) return stub;
@@ -417,14 +469,14 @@ Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
Handle<JSObject> receiver,
LookupResult* lookup,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::STORE_IC, Code::FIELD, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreField(receiver, lookup, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
@@ -433,15 +485,15 @@ Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name,
LookupResult* lookup,
Handle<Map> transition,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
+ Handle<Code> handler =
compiler.CompileStoreTransition(receiver, lookup, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
@@ -499,15 +551,30 @@ Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> receiver,
Handle<PropertyCell> cell,
+ Handle<Object> value,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
- name, Handle<JSObject>::cast(receiver),
- Code::STORE_IC, Code::NORMAL, strict_mode);
- if (!stub.is_null()) return stub;
+ Isolate* isolate = cell->GetIsolate();
+ Handle<Type> union_type(PropertyCell::UpdateType(cell, value), isolate);
+ bool is_constant = union_type->IsConstant();
+ StoreGlobalStub stub(strict_mode, is_constant);
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
+ Handle<Code> code = FindIC(
+ name, Handle<JSObject>::cast(receiver),
+ Code::STORE_IC, Code::NORMAL, stub.GetExtraICState());
+ if (!code.is_null()) return code;
+
+ if (is_constant) return stub.GetCode(isolate_);
+
+ // Replace the placeholder cell and global object map with the actual global
+ // cell and receiver map.
+ Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
+ Handle<Map> meta_map(isolate_->heap()->meta_map());
+ Handle<Object> receiver_map(receiver->map(), isolate_);
+ code = stub.GetCodeCopyFromTemplate(isolate_);
+ code->ReplaceNthObject(1, *meta_map, *receiver_map);
+ code->ReplaceNthObject(1, *cell_map, *cell);
JSObject::UpdateMapCodeCache(receiver, name, code);
+
return code;
}
@@ -519,15 +586,15 @@ Handle<Code> StubCache::ComputeStoreCallback(
Handle<ExecutableAccessorInfo> callback,
StrictModeFlag strict_mode) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreCallback(name, receiver, holder, callback);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler = compiler.CompileStoreCallback(
+ receiver, holder, name, callback);
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
@@ -536,29 +603,29 @@ Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name,
Handle<JSObject> holder,
Handle<JSFunction> setter,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code =
- compiler.CompileStoreViaSetter(name, receiver, holder, setter);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler = compiler.CompileStoreViaSetter(
+ receiver, holder, name, setter);
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler = compiler.CompileStoreInterceptor(receiver, name);
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
@@ -566,14 +633,14 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name,
Handle<JSObject> receiver,
LookupResult* lookup,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::KEYED_STORE_IC, Code::FIELD, strict_mode);
if (!stub.is_null()) return stub;
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
- Handle<Code> code = compiler.CompileStoreField(receiver, lookup, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
@@ -583,15 +650,15 @@ Handle<Code> StubCache::ComputeKeyedStoreTransition(
LookupResult* lookup,
Handle<Map> transition,
StrictModeFlag strict_mode) {
- Handle<Code> stub = FindIC(
+ Handle<Code> stub = FindStoreHandler(
name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
if (!stub.is_null()) return stub;
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
- Handle<Code> code =
+ Handle<Code> handler =
compiler.CompileStoreTransition(receiver, lookup, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ JSObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
}
@@ -922,12 +989,8 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
if (!cached_ic.is_null()) return cached_ic;
}
- Handle<Code> ic = stub.GetCode(isolate_);
-
- // For monomorphic maps, use the code as a template, copying and replacing
- // the monomorphic map that checks the object's type.
- ic = isolate_->factory()->CopyCode(ic);
- ic->ReplaceFirstMap(*receiver_map);
+ Handle<Code> ic = stub.GetCodeCopyFromTemplate(isolate_);
+ ic->ReplaceNthObject(1, isolate_->heap()->meta_map(), *receiver_map);
if (!receiver_map->is_shared()) {
Map::UpdateCodeCache(receiver_map, name, ic);
@@ -959,10 +1022,10 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
}
-Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name) {
+Handle<Code> StubCache::ComputePolymorphicLoadIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name) {
LoadStubCompiler ic_compiler(isolate_);
Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
: Code::NORMAL;
@@ -972,6 +1035,20 @@ Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
}
+Handle<Code> StubCache::ComputePolymorphicStoreIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ StoreStubCompiler ic_compiler(isolate_, strict_mode);
+ Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
+ : Code::NORMAL;
+ Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
+ receiver_maps, handlers, name, type, PROPERTY);
+ return ic;
+}
+
+
Handle<Code> StubCache::ComputeStoreElementPolymorphic(
MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
@@ -1462,7 +1539,7 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
Handle<Name> name) {
return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
? GetCodeWithFlags(flags, *Handle<String>::cast(name)->ToCString())
- : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+ : GetCodeWithFlags(flags, NULL);
}
@@ -1479,28 +1556,42 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
#define __ ACCESS_MASM(masm())
-Register BaseLoadStubCompiler::HandlerFrontendHeader(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- // Check the prototype chain.
+Register BaseLoadStubCompiler::HandlerFrontendHeader(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss) {
return CheckPrototypes(object, object_reg, holder,
scratch1(), scratch2(), scratch3(),
name, miss, SKIP_RECEIVER);
}
-Register BaseLoadStubCompiler::HandlerFrontend(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* success) {
+// HandlerFrontend for store uses the name register. It has to be restored
+// before a miss.
+Register BaseStoreStubCompiler::HandlerFrontendHeader(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss) {
+ return CheckPrototypes(object, object_reg, holder,
+ this->name(), scratch1(), scratch2(),
+ name, miss, SKIP_RECEIVER);
+}
+
+
+Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
return reg;
}
@@ -1614,7 +1705,7 @@ void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
}
-Handle<Code> BaseLoadStubCompiler::CompileMonomorphicIC(
+Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
Handle<Map> receiver_map,
Handle<Code> handler,
Handle<Name> name) {
@@ -1648,9 +1739,35 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name) {
- Label miss, miss_restore_name, slow;
+ Label miss, slow;
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1(), &miss);
+
+ // Check that we are allowed to write this.
+ if (object->GetPrototype()->IsJSObject()) {
+ Handle<JSObject> holder;
+ // holder == object indicates that no property was found.
+ if (lookup->holder() != *object) {
+ holder = Handle<JSObject>(lookup->holder());
+ } else {
+ // Find the top object.
+ holder = object;
+ do {
+ holder = Handle<JSObject>(JSObject::cast(holder->GetPrototype()));
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+
+ Register holder_reg =
+ HandlerFrontendHeader(object, receiver(), holder, name, &miss);
- GenerateNameCheck(name, this->name(), &miss);
+ // If no property was found, and the holder (the last object in the
+ // prototype chain) is in slow mode, we need to do a negative lookup on the
+ // holder.
+ if (lookup->holder() == *object) {
+ GenerateNegativeHolderLookup(masm(), holder, holder_reg, name, &miss);
+ }
+ }
GenerateStoreTransition(masm(),
object,
@@ -1660,19 +1777,17 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
receiver(), this->name(), value(),
scratch1(), scratch2(), scratch3(),
&miss,
- &miss_restore_name,
&slow);
// Handle store cache miss.
- GenerateRestoreName(masm(), &miss_restore_name, name);
- __ bind(&miss);
+ GenerateRestoreName(masm(), &miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
GenerateRestoreName(masm(), &slow, name);
TailCallBuiltin(masm(), SlowBuiltin(kind()));
// Return the generated code.
- return GetICCode(kind(), Code::MAP_TRANSITION, name);
+ return GetCode(kind(), Code::MAP_TRANSITION, name);
}
@@ -1681,7 +1796,7 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
Handle<Name> name) {
Label miss;
- GenerateNameCheck(name, this->name(), &miss);
+ HandlerFrontendHeader(object, receiver(), object, name, &miss);
// Generate store field code.
GenerateStoreField(masm(),
@@ -1695,32 +1810,22 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetICCode(kind(), Code::FIELD, name);
+ return GetCode(kind(), Code::FIELD, name);
}
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<JSFunction> setter) {
- Label miss, miss_restore_name;
-
- // Check that the maps haven't changed, preserving the name register.
- __ JumpIfSmi(receiver(), &miss);
- CheckPrototypes(object, receiver(), holder,
- this->name(), scratch1(), scratch2(),
- name, &miss_restore_name);
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
GenerateStoreViaSetter(masm(), setter);
- GenerateRestoreName(masm(), &miss_restore_name, name);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -1802,12 +1907,12 @@ void KeyedStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
}
-Handle<Code> BaseLoadStubCompiler::GetICCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name,
- InlineCacheState state) {
+Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name,
+ InlineCacheState state) {
Code::Flags flags = Code::ComputeFlags(
- kind, state, Code::kNoExtraICState, type);
+ kind, state, extra_state(), type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -1828,19 +1933,6 @@ Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind,
}
-Handle<Code> BaseStoreStubCompiler::GetICCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name,
- InlineCacheState state) {
- Code::Flags flags = Code::ComputeFlags(
- kind, state, extra_state(), type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
- JitEvent(name, code);
- return code;
-}
-
-
Handle<Code> BaseStoreStubCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
@@ -1901,12 +1993,21 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
ElementsKind elements_kind = receiver_map->elements_kind();
if (!transitioned_map.is_null()) {
- cached_stub = ElementsTransitionAndStoreStub(
- elements_kind,
- transitioned_map->elements_kind(),
- is_js_array,
- strict_mode(),
- store_mode_).GetCode(isolate());
+ if (FLAG_compiled_transitions) {
+ cached_stub = ElementsTransitionAndStoreStub(
+ elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array,
+ store_mode_).GetCode(isolate());
+ } else {
+ // TODO(bmeurer) Remove this when compiled transitions is enabled
+ cached_stub = ElementsTransitionAndStorePlatformStub(
+ elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array,
+ strict_mode(),
+ store_mode_).GetCode(isolate());
+ }
} else {
if (FLAG_compiled_keyed_stores &&
(receiver_map->has_fast_elements() ||
@@ -2045,6 +2146,7 @@ CallOptimization::CallOptimization(LookupResult* lookup) {
}
}
+
CallOptimization::CallOptimization(Handle<JSFunction> function) {
Initialize(function);
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 6d70d3477d..73a1a8a76f 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -90,19 +90,35 @@ class StubCache {
Code::StubType type,
Code::ExtraICState extra_state = Code::kNoExtraICState);
- Handle<Code> FindHandler(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type);
+ Handle<Code> FindLoadHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::StubType type);
- Handle<Code> ComputeMonomorphicIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<Name> name);
- Handle<Code> ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> FindStoreHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Code::Kind kind,
+ Code::StubType type,
+ StrictModeFlag strict_mode);
+
+ Handle<Code> ComputeMonomorphicLoadIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name);
+
+ Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name);
+
+ Handle<Code> ComputeMonomorphicStoreIC(Handle<JSObject> receiver,
Handle<Code> handler,
- Handle<Name> name);
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
+
+ Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
@@ -184,6 +200,7 @@ class StubCache {
Handle<Code> ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> object,
Handle<PropertyCell> cell,
+ Handle<Object> value,
StrictModeFlag strict_mode);
Handle<Code> ComputeStoreCallback(Handle<Name> name,
@@ -290,10 +307,16 @@ class StubCache {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name);
+ Handle<Code> ComputePolymorphicLoadIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name);
+
+ Handle<Code> ComputePolymorphicStoreIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
@@ -529,49 +552,6 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Label* miss_label);
- void GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* miss_restore_name,
- Label* slow);
-
- void GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- static Builtins::Name MissBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
- case Code::STORE_IC: return Builtins::kStoreIC_Miss;
- case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
- case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
- default: UNREACHABLE();
- }
- return Builtins::kLoadIC_Miss;
- }
- static Builtins::Name SlowBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::STORE_IC: return Builtins::kStoreIC_Slow;
- case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
- default: UNREACHABLE();
- }
- return Builtins::kStoreIC_Slow;
- }
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
@@ -641,10 +621,77 @@ class StubCompiler BASE_EMBEDDED {
enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
-class BaseLoadStubCompiler: public StubCompiler {
+class BaseLoadStoreStubCompiler: public StubCompiler {
public:
- BaseLoadStubCompiler(Isolate* isolate, Register* registers)
+ BaseLoadStoreStubCompiler(Isolate* isolate, Register* registers)
: StubCompiler(isolate), registers_(registers) { }
+ virtual ~BaseLoadStoreStubCompiler() { }
+
+ Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
+ Handle<Code> handler,
+ Handle<Name> name);
+
+ Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check);
+
+ virtual void GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) { }
+
+ static Builtins::Name MissBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
+ case Code::STORE_IC: return Builtins::kStoreIC_Miss;
+ case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
+ default: UNREACHABLE();
+ }
+ return Builtins::kLoadIC_Miss;
+ }
+
+ protected:
+ virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss) = 0;
+
+ virtual void HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) = 0;
+
+ Register HandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success);
+
+ Handle<Code> GetICCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name,
+ InlineCacheState state = MONOMORPHIC);
+
+ virtual Code::ExtraICState extra_state() { return Code::kNoExtraICState; }
+ virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
+ virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0;
+ virtual Code::Kind kind() = 0;
+ virtual Register receiver() = 0;
+ virtual Register name() = 0;
+ virtual Register scratch1() = 0;
+ virtual Register scratch2() = 0;
+ virtual Register scratch3() = 0;
+
+ Register* registers_;
+};
+
+
+class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
+ public:
+ BaseLoadStubCompiler(Isolate* isolate, Register* registers)
+ : BaseLoadStoreStubCompiler(isolate, registers) { }
virtual ~BaseLoadStubCompiler() { }
Handle<Code> CompileLoadField(Handle<JSObject> object,
@@ -667,28 +714,17 @@ class BaseLoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<Name> name);
- Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
- Handle<Code> handler,
- Handle<Name> name);
- Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check);
-
protected:
- Register HandlerFrontendHeader(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* success);
- void HandlerFrontendFooter(Label* success, Label* miss);
+ virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss);
+
+ virtual void HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss);
- Register HandlerFrontend(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* success);
Register CallbackHandlerFrontend(Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -718,30 +754,16 @@ class BaseLoadStubCompiler: public StubCompiler {
Handle<Name> name,
LookupResult* lookup);
- Handle<Code> GetICCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name,
- InlineCacheState state = MONOMORPHIC);
-
Handle<Code> GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name);
- Register receiver() { return registers_[0]; }
- Register name() { return registers_[1]; }
- Register scratch1() { return registers_[2]; }
- Register scratch2() { return registers_[3]; }
- Register scratch3() { return registers_[4]; }
+ virtual Register receiver() { return registers_[0]; }
+ virtual Register name() { return registers_[1]; }
+ virtual Register scratch1() { return registers_[2]; }
+ virtual Register scratch2() { return registers_[3]; }
+ virtual Register scratch3() { return registers_[4]; }
Register scratch4() { return registers_[5]; }
-
- private:
- virtual Code::Kind kind() = 0;
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
- virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0;
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) { }
- Register* registers_;
};
@@ -769,8 +791,6 @@ class LoadStubCompiler: public BaseLoadStubCompiler {
Handle<Name> name,
bool is_dont_delete);
- static Register receiver() { return registers()[0]; }
-
private:
static Register* registers();
virtual Code::Kind kind() { return Code::LOAD_IC; }
@@ -795,8 +815,6 @@ class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- static Register receiver() { return registers()[0]; }
-
private:
static Register* registers();
virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; }
@@ -812,14 +830,13 @@ class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
};
-class BaseStoreStubCompiler: public StubCompiler {
+class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
public:
BaseStoreStubCompiler(Isolate* isolate,
StrictModeFlag strict_mode,
Register* registers)
- : StubCompiler(isolate),
- strict_mode_(strict_mode),
- registers_(registers) { }
+ : BaseLoadStoreStubCompiler(isolate, registers),
+ strict_mode_(strict_mode) { }
virtual ~BaseStoreStubCompiler() { }
@@ -832,12 +849,65 @@ class BaseStoreStubCompiler: public StubCompiler {
LookupResult* lookup,
Handle<Name> name);
- protected:
- Handle<Code> GetICCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name,
- InlineCacheState state = MONOMORPHIC);
+ void GenerateNegativeHolderLookup(MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss);
+
+ void GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow);
+
+ void GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
+
+ static Builtins::Name MissBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
+ case Code::STORE_IC: return Builtins::kStoreIC_Miss;
+ case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
+ default: UNREACHABLE();
+ }
+ return Builtins::kLoadIC_Miss;
+ }
+ static Builtins::Name SlowBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::STORE_IC: return Builtins::kStoreIC_Slow;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
+ default: UNREACHABLE();
+ }
+ return Builtins::kStoreIC_Slow;
+ }
+ protected:
+ virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* miss);
+
+ virtual void HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss);
Handle<Code> GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name);
@@ -846,24 +916,17 @@ class BaseStoreStubCompiler: public StubCompiler {
Label* label,
Handle<Name> name);
- Register receiver() { return registers_[0]; }
- Register name() { return registers_[1]; }
+ virtual Register receiver() { return registers_[0]; }
+ virtual Register name() { return registers_[1]; }
Register value() { return registers_[2]; }
- Register scratch1() { return registers_[3]; }
- Register scratch2() { return registers_[4]; }
- Register scratch3() { return registers_[5]; }
+ virtual Register scratch1() { return registers_[3]; }
+ virtual Register scratch2() { return registers_[4]; }
+ virtual Register scratch3() { return registers_[5]; }
StrictModeFlag strict_mode() { return strict_mode_; }
virtual Code::ExtraICState extra_state() { return strict_mode_; }
private:
- virtual Code::Kind kind() = 0;
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
- virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0;
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) { }
StrictModeFlag strict_mode_;
- Register* registers_;
};
@@ -873,17 +936,17 @@ class StoreStubCompiler: public BaseStoreStubCompiler {
: BaseStoreStubCompiler(isolate, strict_mode, registers()) { }
- Handle<Code> CompileStoreCallback(Handle<Name> name,
- Handle<JSObject> object,
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<JSFunction> setter);
- Handle<Code> CompileStoreViaSetter(Handle<Name> name,
- Handle<JSObject> object,
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<JSFunction> setter);
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
index 6c3de3e011..6adf365689 100644
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -29,7 +29,7 @@
'variables': {
'v8_code': 1,
},
- 'includes': ['../../../build/common.gypi'],
+ 'includes': ['../../../build/toolchain.gypi', '../../../build/features.gypi'],
'targets': [
{
'target_name': 'v8_vtune',
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 83eb9c45b4..b905a74716 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -141,7 +141,7 @@ bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsMap()) return false;
- if (!map_or_code->IsCode()) return true;
+ if (!map_or_code->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(map_or_code);
return code->ic_state() == UNINITIALIZED;
}
@@ -170,14 +170,14 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::StoreIsPolymorphic(TypeFeedbackId ast_id) {
+bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
bool standard_store = FLAG_compiled_keyed_stores ||
(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
STANDARD_STORE);
- return code->is_keyed_store_stub() && standard_store &&
+ return code->is_keyed_store_stub() && standard_store &&
code->ic_state() == POLYMORPHIC;
}
return false;
@@ -186,13 +186,14 @@ bool TypeFeedbackOracle::StoreIsPolymorphic(TypeFeedbackId ast_id) {
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
Handle<Object> value = GetInfo(expr->CallFeedbackId());
- return value->IsMap() || value->IsSmi() || value->IsJSFunction();
+ return value->IsMap() || value->IsAllocationSite() || value->IsJSFunction() ||
+ value->IsSmi();
}
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- return info->IsSmi() || info->IsJSFunction();
+ return info->IsAllocationSite() || info->IsJSFunction();
}
@@ -266,7 +267,9 @@ void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::STORE_IC);
CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
}
@@ -302,9 +305,7 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
Handle<Object> info = GetInfo(expr->CallFeedbackId());
- if (info->IsSmi()) {
- ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
- LAST_FAST_ELEMENTS_KIND);
+ if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate_->global_context()->array_function());
} else {
return Handle<JSFunction>::cast(info);
@@ -314,9 +315,7 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
- LAST_FAST_ELEMENTS_KIND);
+ if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate_->global_context()->array_function());
} else {
return Handle<JSFunction>::cast(info);
@@ -378,12 +377,9 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
CompareIC::StubInfoToType(
stub_minor_key, left_type, right_type, combined_type, map, isolate());
} else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub::State state(code->compare_nil_state());
- *combined_type = CompareNilICStub::StateToType(isolate_, state, map);
- Handle<Type> nil_type = handle(code->compare_nil_value() == kNullValue
- ? Type::Null() : Type::Undefined(), isolate_);
- *left_type = *right_type =
- handle(Type::Union(*combined_type, nil_type), isolate_);
+ CompareNilICStub stub(code->extended_extra_ic_state());
+ *combined_type = stub.GetType(isolate_, map);
+ *left_type = *right_type = stub.GetInputType(isolate_, map);
}
}
@@ -395,8 +391,7 @@ Handle<Type> TypeFeedbackOracle::UnaryType(TypeFeedbackId id) {
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT(code->is_unary_op_stub());
- return UnaryOpIC::TypeInfoToType(
- static_cast<UnaryOpIC::TypeInfo>(code->unary_op_type()), isolate());
+ return UnaryOpStub(code->extended_extra_ic_state()).GetType(isolate());
}
@@ -555,6 +550,18 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
}
+void TypeFeedbackOracle::CollectPolymorphicStoreReceiverTypes(
+ TypeFeedbackId ast_id,
+ SmallMapList* types) {
+ Handle<Object> object = GetInfo(ast_id);
+ if (!object->IsCode()) return;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (code->kind() == Code::STORE_IC && code->ic_state() == POLYMORPHIC) {
+ CollectPolymorphicMaps(code, types);
+ }
+}
+
+
byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
Handle<Object> object = GetInfo(id);
return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
@@ -676,6 +683,7 @@ void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
Cell* cell = cache->GetCell(i);
Object* value = cell->value();
if (value->IsSmi() ||
+ value->IsAllocationSite() ||
(value->IsJSFunction() &&
!CanRetainOtherContext(JSFunction::cast(value),
*native_context_))) {
@@ -698,4 +706,16 @@ void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
#endif
}
+
+Representation Representation::FromType(TypeInfo info) {
+ if (info.IsUninitialized()) return Representation::None();
+ // TODO(verwaest): Return Smi rather than Integer32.
+ if (info.IsSmi()) return Representation::Integer32();
+ if (info.IsInteger32()) return Representation::Integer32();
+ if (info.IsDouble()) return Representation::Double();
+ if (info.IsNumber()) return Representation::Double();
+ return Representation::Tagged();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index a1c1f54cc1..1a7c67dfb8 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -246,7 +246,7 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsPolymorphic(Property* expr);
bool StoreIsUninitialized(TypeFeedbackId ast_id);
bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
- bool StoreIsPolymorphic(TypeFeedbackId ast_id);
+ bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
bool ObjectLiteralStoreIsMonomorphic(ObjectLiteralProperty* prop);
@@ -272,6 +272,8 @@ class TypeFeedbackOracle: public ZoneObject {
SmallMapList* types);
void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types);
+ void CollectPolymorphicStoreReceiverTypes(TypeFeedbackId ast_id,
+ SmallMapList* types);
static bool CanRetainOtherContext(Map* map, Context* native_context);
static bool CanRetainOtherContext(JSFunction* function,
@@ -303,9 +305,9 @@ class TypeFeedbackOracle: public ZoneObject {
Maybe<int>* fixed_right_arg);
void CompareType(TypeFeedbackId id,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* combined_type);
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* combined);
Handle<Type> ClauseType(TypeFeedbackId id);
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 0d90355049..601012d58e 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -89,12 +89,11 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
if (%_IsConstructCall()) {
if (IS_ARRAYBUFFER(arg1)) {
ConstructByArrayBuffer(this, arg1, arg2, arg3);
- } else if (IS_NUMBER(arg1) || IS_STRING(arg1) || IS_BOOLEAN(arg1)) {
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
+ IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
ConstructByLength(this, arg1);
- } else if (!IS_UNDEFINED(arg1)){
- ConstructByArrayLike(this, arg1);
} else {
- throw MakeTypeError("parameterless_typed_array_constr", [name]);
+ ConstructByArrayLike(this, arg1);
}
} else {
throw MakeTypeError("constructor_not_function", [name])
@@ -155,7 +154,14 @@ function TypedArraySet(obj, offset) {
var l = obj.length;
if (IS_UNDEFINED(l)) {
- throw MakeTypeError("invalid_argument");
+ if (IS_NUMBER(obj)) {
+ // For number as a first argument, throw TypeError
+ // instead of silently ignoring the call, so that
+ // the user knows (s)he did something wrong.
+ // (Consistent with Firefox and Blink/WebKit)
+ throw MakeTypeError("invalid_argument");
+ }
+ return;
}
if (intOffset + l > this.length) {
throw MakeRangeError("typed_array_set_source_too_large");
@@ -174,6 +180,8 @@ function SetupTypedArray(arrayId, name, constructor, elementSize) {
%SetCode(constructor, fun);
%FunctionSetPrototype(constructor, new $Object());
+ %SetProperty(constructor, "BYTES_PER_ELEMENT", elementSize,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
%SetProperty(constructor.prototype,
"constructor", constructor, DONT_ENUM);
%SetProperty(constructor.prototype,
@@ -260,6 +268,9 @@ function DataViewGetInt8(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getInt8', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetInt8(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -270,6 +281,9 @@ function DataViewSetInt8(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setInt8', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetInt8(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -281,6 +295,9 @@ function DataViewGetUint8(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getUint8', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetUint8(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -291,6 +308,9 @@ function DataViewSetUint8(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setUint8', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetUint8(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -302,6 +322,9 @@ function DataViewGetInt16(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getInt16', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetInt16(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -312,6 +335,9 @@ function DataViewSetInt16(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setInt16', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetInt16(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -323,6 +349,9 @@ function DataViewGetUint16(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getUint16', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetUint16(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -333,6 +362,9 @@ function DataViewSetUint16(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setUint16', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetUint16(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -344,6 +376,9 @@ function DataViewGetInt32(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getInt32', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetInt32(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -354,6 +389,9 @@ function DataViewSetInt32(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setInt32', this]);
}
+ if (%_ArgumentsLength() < 2) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetInt32(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -365,6 +403,9 @@ function DataViewGetUint32(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getUint32', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetUint32(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -375,6 +416,9 @@ function DataViewSetUint32(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setUint32', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetUint32(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -386,6 +430,9 @@ function DataViewGetFloat32(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getFloat32', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
return %DataViewGetFloat32(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
@@ -396,6 +443,9 @@ function DataViewSetFloat32(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setFloat32', this]);
}
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
+ }
%DataViewSetFloat32(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
@@ -407,9 +457,8 @@ function DataViewGetFloat64(offset, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.getFloat64', this]);
}
- offset = TO_INTEGER(offset);
- if (offset < 0) {
- throw MakeRangeError("invalid_data_view_accessor_offset");
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
}
return %DataViewGetFloat64(this,
ToPositiveDataViewOffset(offset),
@@ -421,9 +470,8 @@ function DataViewSetFloat64(offset, value, little_endian) {
throw MakeTypeError('incompatible_method_reciever',
['DataView.setFloat64', this]);
}
- offset = TO_INTEGER(offset);
- if (offset < 0) {
- throw MakeRangeError("invalid_data_view_accessor_offset");
+ if (%_ArgumentsLength() < 1) {
+ throw MakeTypeError('invalid_argument');
}
%DataViewSetFloat64(this,
ToPositiveDataViewOffset(offset),
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 1275deacb7..8bf91293fb 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "types.h"
+#include "string-stream.h"
namespace v8 {
namespace internal {
@@ -179,6 +180,7 @@ int Type::LubBitset() {
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_ARRAY_TYPE:
@@ -318,6 +320,7 @@ bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
return false;
}
+
// Get non-bitsets from this which are not subsumed by union, store at unioned,
// starting at index. Returns updated index.
int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
@@ -476,4 +479,58 @@ Type* Type::Optional(Handle<Type> type) {
: Union(type, Undefined()->handle_via_isolate_of(*type));
}
+
+Representation Representation::FromType(Handle<Type> type) {
+ if (type->Is(Type::None())) return Representation::None();
+ if (type->Is(Type::Signed32())) return Representation::Integer32();
+ if (type->Is(Type::Number())) return Representation::Double();
+ return Representation::Tagged();
+}
+
+
+#ifdef OBJECT_PRINT
+void Type::TypePrint() {
+ TypePrint(stdout);
+ PrintF(stdout, "\n");
+ Flush(stdout);
+}
+
+
+void Type::TypePrint(FILE* out) {
+ if (is_bitset()) {
+ int val = as_bitset();
+ const char* composed_name = GetComposedName(val);
+ if (composed_name != NULL) {
+ PrintF(out, "%s", composed_name);
+ return;
+ }
+ bool first_entry = true;
+ PrintF(out, "{");
+ for (unsigned i = 0; i < sizeof(val)*8; ++i) {
+ int mask = (1 << i);
+ if ((val & mask) != 0) {
+ if (!first_entry) PrintF(out, ",");
+ first_entry = false;
+ PrintF(out, "%s", GetPrimitiveName(mask));
+ }
+ }
+ PrintF(out, "}");
+ } else if (is_constant()) {
+ PrintF(out, "Constant(%p)", static_cast<void*>(*as_constant()));
+ } else if (is_class()) {
+ PrintF(out, "Class(%p)", static_cast<void*>(*as_class()));
+ } else if (is_union()) {
+ PrintF(out, "{");
+ Handle<Unioned> unioned = as_union();
+ for (int i = 0; i < unioned->length(); ++i) {
+ Handle<Type> type_i = union_get(unioned, i);
+ if (i > 0) PrintF(out, ",");
+ type_i->TypePrint(out);
+ }
+ PrintF(out, "}");
+ }
+}
+#endif
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index a2bcda6579..b2eb60c692 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -94,39 +94,54 @@ namespace internal {
// The type representation is heap-allocated, so cannot (currently) be used in
// a parallel compilation context.
+
+#define PRIMITIVE_TYPE_LIST(V) \
+ V(None, 0) \
+ V(Null, 1 << 0) \
+ V(Undefined, 1 << 1) \
+ V(Boolean, 1 << 2) \
+ V(Smi, 1 << 3) \
+ V(OtherSigned32, 1 << 4) \
+ V(Unsigned32, 1 << 5) \
+ V(Double, 1 << 6) \
+ V(Symbol, 1 << 7) \
+ V(InternalizedString, 1 << 8) \
+ V(OtherString, 1 << 9) \
+ V(Undetectable, 1 << 10) \
+ V(Array, 1 << 11) \
+ V(Function, 1 << 12) \
+ V(RegExp, 1 << 13) \
+ V(OtherObject, 1 << 14) \
+ V(Proxy, 1 << 15) \
+ V(Internal, 1 << 16)
+
+#define COMPOSED_TYPE_LIST(V) \
+ V(Oddball, kBoolean | kNull | kUndefined) \
+ V(Signed32, kSmi | kOtherSigned32) \
+ V(Number, kSigned32 | kUnsigned32 | kDouble) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(Object, kUndetectable | kArray | kFunction | \
+ kRegExp | kOtherObject) \
+ V(Receiver, kObject | kProxy) \
+ V(Allocated, kDouble | kName | kReceiver) \
+ V(Any, kOddball | kNumber | kAllocated | kInternal) \
+ V(Detectable, kAllocated - kUndetectable)
+
+#define TYPE_LIST(V) \
+ PRIMITIVE_TYPE_LIST(V) \
+ COMPOSED_TYPE_LIST(V)
+
+
+
class Type : public Object {
public:
- static Type* None() { return from_bitset(kNone); }
- static Type* Any() { return from_bitset(kAny); }
- static Type* Allocated() { return from_bitset(kAllocated); }
- static Type* Detectable() { return from_bitset(kDetectable); }
-
- static Type* Oddball() { return from_bitset(kOddball); }
- static Type* Boolean() { return from_bitset(kBoolean); }
- static Type* Null() { return from_bitset(kNull); }
- static Type* Undefined() { return from_bitset(kUndefined); }
-
- static Type* Number() { return from_bitset(kNumber); }
- static Type* Smi() { return from_bitset(kSmi); }
- static Type* Signed32() { return from_bitset(kSigned32); }
- static Type* Unsigned32() { return from_bitset(kUnsigned32); }
- static Type* Double() { return from_bitset(kDouble); }
- static Type* NumberOrString() { return from_bitset(kNumberOrString); }
-
- static Type* Name() { return from_bitset(kName); }
- static Type* UniqueName() { return from_bitset(kUniqueName); }
- static Type* String() { return from_bitset(kString); }
- static Type* InternalizedString() { return from_bitset(kInternalizedString); }
- static Type* Symbol() { return from_bitset(kSymbol); }
-
- static Type* Receiver() { return from_bitset(kReceiver); }
- static Type* Object() { return from_bitset(kObject); }
- static Type* Undetectable() { return from_bitset(kUndetectable); }
- static Type* Array() { return from_bitset(kArray); }
- static Type* Function() { return from_bitset(kFunction); }
- static Type* RegExp() { return from_bitset(kRegExp); }
- static Type* Proxy() { return from_bitset(kProxy); }
- static Type* Internal() { return from_bitset(kInternal); }
+ #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+ static Type* type() { return from_bitset(k##type); }
+ TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+ #undef DEFINE_TYPE_CONSTRUCTOR
static Type* Class(Handle<Map> map) { return from_handle(map); }
static Type* Constant(Handle<HeapObject> value) {
@@ -184,6 +199,18 @@ class Type : public Object {
return Iterator<v8::internal::Object>(this->handle());
}
+ static Type* cast(v8::internal::Object* object) {
+ Type* t = static_cast<Type*>(object);
+ ASSERT(t->is_bitset() || t->is_class() ||
+ t->is_constant() || t->is_union());
+ return t;
+ }
+
+#ifdef OBJECT_PRINT
+ void TypePrint();
+ void TypePrint(FILE* out);
+#endif
+
private:
// A union is a fixed array containing types. Invariants:
// - its length is at least 2
@@ -192,37 +219,10 @@ class Type : public Object {
typedef FixedArray Unioned;
enum {
- kNull = 1 << 0,
- kUndefined = 1 << 1,
- kBoolean = 1 << 2,
- kSmi = 1 << 3,
- kOtherSigned32 = 1 << 4,
- kUnsigned32 = 1 << 5,
- kDouble = 1 << 6,
- kSymbol = 1 << 7,
- kInternalizedString = 1 << 8,
- kOtherString = 1 << 9,
- kUndetectable = 1 << 10,
- kArray = 1 << 11,
- kFunction = 1 << 12,
- kRegExp = 1 << 13,
- kOtherObject = 1 << 14,
- kProxy = 1 << 15,
- kInternal = 1 << 16,
-
- kOddball = kBoolean | kNull | kUndefined,
- kSigned32 = kSmi | kOtherSigned32,
- kNumber = kSigned32 | kUnsigned32 | kDouble,
- kString = kInternalizedString | kOtherString,
- kUniqueName = kSymbol | kInternalizedString,
- kName = kSymbol | kString,
- kNumberOrString = kNumber | kString,
- kObject = kUndetectable | kArray | kFunction | kRegExp | kOtherObject,
- kReceiver = kObject | kProxy,
- kAllocated = kDouble | kName | kReceiver,
- kAny = kOddball | kNumber | kAllocated | kInternal,
- kDetectable = kAllocated - kUndetectable,
- kNone = 0
+ #define DECLARE_TYPE(type, value) k##type = (value),
+ TYPE_LIST(DECLARE_TYPE)
+ #undef DECLARE_TYPE
+ kUnusedEOL = 0
};
bool is_bitset() { return this->IsSmi(); }
@@ -265,6 +265,64 @@ class Type : public Object {
int ExtendUnion(Handle<Unioned> unioned, int current_size);
int ExtendIntersection(
Handle<Unioned> unioned, Handle<Type> type, int current_size);
+
+ static const char* GetComposedName(int type) {
+ switch (type) {
+ #define PRINT_COMPOSED_TYPE(type, value) \
+ case k##type: \
+ return # type;
+ COMPOSED_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ #undef PRINT_COMPOSED_TYPE
+ }
+ return NULL;
+ }
+
+ static const char* GetPrimitiveName(int type) {
+ switch (type) {
+ #define PRINT_PRIMITIVE_TYPE(type, value) \
+ case k##type: \
+ return # type;
+ PRIMITIVE_TYPE_LIST(PRINT_PRIMITIVE_TYPE)
+ #undef PRINT_PRIMITIVE_TYPE
+ default:
+ UNREACHABLE();
+ return "InvalidType";
+ }
+ }
+};
+
+
+// A simple struct to represent a pair of lower/upper type bounds.
+struct Bounds {
+ Handle<Type> lower;
+ Handle<Type> upper;
+
+ Bounds() {}
+ Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {}
+ Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {}
+ explicit Bounds(Handle<Type> t) : lower(t), upper(t) {}
+ Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {}
+
+ // Meet: both b1 and b2 are known to hold.
+ static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
+ return Bounds(
+ handle(Type::Union(b1.lower, b2.lower), isl),
+ handle(Type::Intersect(b1.upper, b2.upper), isl));
+ }
+
+ // Join: either b1 or b2 is known to hold.
+ static Bounds Either(Bounds b1, Bounds b2, Isolate* isl) {
+ return Bounds(
+ handle(Type::Intersect(b1.lower, b2.lower), isl),
+ handle(Type::Union(b1.upper, b2.upper), isl));
+ }
+
+ static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+ return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
+ }
+ static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
+ return Bounds(b.lower, handle(Type::Intersect(b.upper, t), isl));
+ }
};
} } // namespace v8::internal
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 7c116120a2..4220d2110d 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -45,13 +45,13 @@ AstTyper::AstTyper(CompilationInfo* info)
}
-#define CHECK_ALIVE(call) \
+#define RECURSE(call) \
do { \
+ ASSERT(!visitor->HasStackOverflow()); \
call; \
if (visitor->HasStackOverflow()) return; \
} while (false)
-
void AstTyper::Run(CompilationInfo* info) {
AstTyper* visitor = new(info->zone()) AstTyper(info);
Scope* scope = info->scope();
@@ -59,52 +59,48 @@ void AstTyper::Run(CompilationInfo* info) {
// Handle implicit declaration of the function name in named function
// expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) {
- CHECK_ALIVE(visitor->VisitVariableDeclaration(scope->function()));
+ RECURSE(visitor->VisitVariableDeclaration(scope->function()));
}
- CHECK_ALIVE(visitor->VisitDeclarations(scope->declarations()));
- CHECK_ALIVE(visitor->VisitStatements(info->function()->body()));
+ RECURSE(visitor->VisitDeclarations(scope->declarations()));
+ RECURSE(visitor->VisitStatements(info->function()->body()));
}
+#undef RECURSE
-#undef CHECK_ALIVE
-#define CHECK_ALIVE(call) \
+#define RECURSE(call) \
do { \
+ ASSERT(!HasStackOverflow()); \
call; \
if (HasStackOverflow()) return; \
} while (false)
void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
- ASSERT(!HasStackOverflow());
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
- CHECK_ALIVE(Visit(stmt));
+ RECURSE(Visit(stmt));
}
}
void AstTyper::VisitBlock(Block* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(VisitStatements(stmt->statements()));
+ RECURSE(VisitStatements(stmt->statements()));
}
void AstTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->expression()));
+ RECURSE(Visit(stmt->expression()));
}
void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitIfStatement(IfStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->condition()));
- CHECK_ALIVE(Visit(stmt->then_statement()));
- CHECK_ALIVE(Visit(stmt->else_statement()));
+ RECURSE(Visit(stmt->condition()));
+ RECURSE(Visit(stmt->then_statement()));
+ RECURSE(Visit(stmt->else_statement()));
if (!stmt->condition()->ToBooleanIsTrue() &&
!stmt->condition()->ToBooleanIsFalse()) {
@@ -114,18 +110,15 @@ void AstTyper::VisitIfStatement(IfStatement* stmt) {
void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->expression()));
+ RECURSE(Visit(stmt->expression()));
// TODO(rossberg): we only need this for inlining into test contexts...
stmt->expression()->RecordToBooleanTypeFeedback(oracle());
@@ -133,22 +126,20 @@ void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
void AstTyper::VisitWithStatement(WithStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(stmt->expression());
- CHECK_ALIVE(stmt->statement());
+ RECURSE(stmt->expression());
+ RECURSE(stmt->statement());
}
void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->tag()));
+ RECURSE(Visit(stmt->tag()));
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchStatement::SwitchType switch_type = stmt->switch_type();
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
if (!clause->is_default()) {
Expression* label = clause->label();
- CHECK_ALIVE(Visit(label));
+ RECURSE(Visit(label));
SwitchStatement::SwitchType label_switch_type =
label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
@@ -159,7 +150,7 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
else if (switch_type != label_switch_type)
switch_type = SwitchStatement::GENERIC_SWITCH;
}
- CHECK_ALIVE(VisitStatements(clause->statements()));
+ RECURSE(VisitStatements(clause->statements()));
}
if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
switch_type = SwitchStatement::GENERIC_SWITCH;
@@ -177,9 +168,8 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->body()));
- CHECK_ALIVE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->cond()));
if (!stmt->cond()->ToBooleanIsTrue()) {
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
@@ -188,9 +178,8 @@ void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->cond()));
- CHECK_ALIVE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->body()));
if (!stmt->cond()->ToBooleanIsTrue()) {
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
@@ -199,171 +188,178 @@ void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
void AstTyper::VisitForStatement(ForStatement* stmt) {
- ASSERT(!HasStackOverflow());
if (stmt->init() != NULL) {
- CHECK_ALIVE(Visit(stmt->init()));
+ RECURSE(Visit(stmt->init()));
}
if (stmt->cond() != NULL) {
- CHECK_ALIVE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->cond()));
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
}
- CHECK_ALIVE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->body()));
if (stmt->next() != NULL) {
- CHECK_ALIVE(Visit(stmt->next()));
+ RECURSE(Visit(stmt->next()));
}
}
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->enumerable()));
- CHECK_ALIVE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->enumerable()));
+ RECURSE(Visit(stmt->body()));
stmt->RecordTypeFeedback(oracle());
}
void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->iterable()));
- CHECK_ALIVE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->iterable()));
+ RECURSE(Visit(stmt->body()));
}
void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->try_block()));
- CHECK_ALIVE(Visit(stmt->catch_block()));
+ RECURSE(Visit(stmt->try_block()));
+ RECURSE(Visit(stmt->catch_block()));
}
void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->try_block()));
- CHECK_ALIVE(Visit(stmt->finally_block()));
+ RECURSE(Visit(stmt->try_block()));
+ RECURSE(Visit(stmt->finally_block()));
}
void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitConditional(Conditional* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->condition()));
- CHECK_ALIVE(Visit(expr->then_expression()));
- CHECK_ALIVE(Visit(expr->else_expression()));
+ RECURSE(Visit(expr->condition()));
+ RECURSE(Visit(expr->then_expression()));
+ RECURSE(Visit(expr->else_expression()));
expr->condition()->RecordToBooleanTypeFeedback(oracle());
+
+ NarrowType(expr, Bounds::Either(
+ expr->then_expression()->bounds(),
+ expr->else_expression()->bounds(), isolate_));
}
void AstTyper::VisitVariableProxy(VariableProxy* expr) {
- ASSERT(!HasStackOverflow());
+ // TODO(rossberg): typing of variables
}
void AstTyper::VisitLiteral(Literal* expr) {
- ASSERT(!HasStackOverflow());
+ Type* type = Type::Constant(expr->value(), isolate_);
+ NarrowType(expr, Bounds(type, isolate_));
}
void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
- ASSERT(!HasStackOverflow());
+ NarrowType(expr, Bounds(Type::RegExp(), isolate_));
}
void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
- ASSERT(!HasStackOverflow());
ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
for (int i = 0; i < properties->length(); ++i) {
ObjectLiteral::Property* prop = properties->at(i);
- CHECK_ALIVE(Visit(prop->value()));
+ RECURSE(Visit(prop->value()));
if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
!CompileTimeValue::IsCompileTimeValue(prop->value())) ||
prop->kind() == ObjectLiteral::Property::COMPUTED) {
- if (prop->key()->value()->IsInternalizedString() && prop->emit_store())
+ if (prop->key()->value()->IsInternalizedString() && prop->emit_store()) {
prop->RecordTypeFeedback(oracle());
+ }
}
}
+
+ NarrowType(expr, Bounds(Type::Object(), isolate_));
}
void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
- ASSERT(!HasStackOverflow());
ZoneList<Expression*>* values = expr->values();
for (int i = 0; i < values->length(); ++i) {
Expression* value = values->at(i);
- CHECK_ALIVE(Visit(value));
+ RECURSE(Visit(value));
}
+
+ NarrowType(expr, Bounds(Type::Array(), isolate_));
}
void AstTyper::VisitAssignment(Assignment* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->target()));
- CHECK_ALIVE(Visit(expr->value()));
-
// TODO(rossberg): Can we clean this up?
if (expr->is_compound()) {
- CHECK_ALIVE(Visit(expr->binary_operation()));
+ RECURSE(Visit(expr->binary_operation()));
Expression* target = expr->target();
Property* prop = target->AsProperty();
if (prop != NULL) {
prop->RecordTypeFeedback(oracle(), zone());
- if (!prop->key()->IsPropertyName()) // i.e., keyed
+ if (!prop->key()->IsPropertyName()) { // i.e., keyed
expr->RecordTypeFeedback(oracle(), zone());
+ }
}
- return;
+
+ NarrowType(expr, expr->binary_operation()->bounds());
+ } else {
+ RECURSE(Visit(expr->target()));
+ RECURSE(Visit(expr->value()));
+
+ if (expr->target()->AsProperty()) {
+ expr->RecordTypeFeedback(oracle(), zone());
+ }
+
+ NarrowType(expr, expr->value()->bounds());
}
- if (expr->target()->AsProperty())
- expr->RecordTypeFeedback(oracle(), zone());
+ // TODO(rossberg): handle target variables
}
void AstTyper::VisitYield(Yield* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->generator_object()));
- CHECK_ALIVE(Visit(expr->expression()));
+ RECURSE(Visit(expr->generator_object()));
+ RECURSE(Visit(expr->expression()));
+
+ // We don't know anything about the type.
}
void AstTyper::VisitThrow(Throw* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->exception()));
+ RECURSE(Visit(expr->exception()));
+
+ NarrowType(expr, Bounds(Type::None(), isolate_));
}
void AstTyper::VisitProperty(Property* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->obj()));
- CHECK_ALIVE(Visit(expr->key()));
+ RECURSE(Visit(expr->obj()));
+ RECURSE(Visit(expr->key()));
expr->RecordTypeFeedback(oracle(), zone());
+
+ // We don't know anything about the type.
}
void AstTyper::VisitCall(Call* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->expression()));
+ RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
- CHECK_ALIVE(Visit(arg));
+ RECURSE(Visit(arg));
}
Expression* callee = expr->expression();
@@ -374,160 +370,228 @@ void AstTyper::VisitCall(Call* expr) {
} else {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
}
+
+ // We don't know anything about the type.
}
void AstTyper::VisitCallNew(CallNew* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->expression()));
+ RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
- CHECK_ALIVE(Visit(arg));
+ RECURSE(Visit(arg));
}
expr->RecordTypeFeedback(oracle());
+
+ // We don't know anything about the type.
}
void AstTyper::VisitCallRuntime(CallRuntime* expr) {
- ASSERT(!HasStackOverflow());
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
- CHECK_ALIVE(Visit(arg));
+ RECURSE(Visit(arg));
}
+
+ // We don't know anything about the type.
}
void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->expression()));
+ RECURSE(Visit(expr->expression()));
// Collect type feedback.
Handle<Type> op_type = oracle()->UnaryType(expr->UnaryOperationFeedbackId());
- MergeLowerType(expr->expression(), op_type);
+ NarrowLowerType(expr->expression(), op_type);
if (expr->op() == Token::NOT) {
// TODO(rossberg): only do in test or value context.
expr->expression()->RecordToBooleanTypeFeedback(oracle());
}
+
+ switch (expr->op()) {
+ case Token::NOT:
+ case Token::DELETE:
+ NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+ break;
+ case Token::VOID:
+ NarrowType(expr, Bounds(Type::Undefined(), isolate_));
+ break;
+ case Token::ADD:
+ case Token::SUB: {
+ Type* upper = *expr->expression()->bounds().upper;
+ if (!upper->Is(Type::Number())) upper = Type::Number();
+ NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
+ break;
+ }
+ case Token::BIT_NOT:
+ NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
+ break;
+ case Token::TYPEOF:
+ NarrowType(expr, Bounds(Type::InternalizedString(), isolate_));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void AstTyper::VisitCountOperation(CountOperation* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->expression()));
+ RECURSE(Visit(expr->expression()));
expr->RecordTypeFeedback(oracle(), zone());
Property* prop = expr->expression()->AsProperty();
if (prop != NULL) {
prop->RecordTypeFeedback(oracle(), zone());
}
+
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
}
void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->left()));
- CHECK_ALIVE(Visit(expr->right()));
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
// Collect type feedback.
- Handle<Type> left_type, right_type, result_type;
+ Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &result_type, &fixed_right_arg);
- MergeLowerType(expr->left(), left_type);
- MergeLowerType(expr->right(), right_type);
- expr->set_result_type(result_type);
+ &left_type, &right_type, &type, &fixed_right_arg);
+ NarrowLowerType(expr, type);
+ NarrowLowerType(expr->left(), left_type);
+ NarrowLowerType(expr->right(), right_type);
expr->set_fixed_right_arg(fixed_right_arg);
if (expr->op() == Token::OR || expr->op() == Token::AND) {
expr->left()->RecordToBooleanTypeFeedback(oracle());
}
+
+ switch (expr->op()) {
+ case Token::COMMA:
+ NarrowType(expr, expr->right()->bounds());
+ break;
+ case Token::OR:
+ case Token::AND:
+ NarrowType(expr, Bounds::Either(
+ expr->left()->bounds(), expr->right()->bounds(), isolate_));
+ break;
+ case Token::BIT_OR:
+ case Token::BIT_AND: {
+ Type* upper = Type::Union(
+ expr->left()->bounds().upper, expr->right()->bounds().upper);
+ if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
+ NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
+ break;
+ }
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SAR:
+ NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
+ break;
+ case Token::SHR:
+ NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_));
+ break;
+ case Token::ADD: {
+ Bounds l = expr->left()->bounds();
+ Bounds r = expr->right()->bounds();
+ Type* lower =
+ l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
+ Type::Smi() :
+ l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
+ Type::String() : Type::None();
+ Type* upper =
+ l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
+ Type::Number() :
+ l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
+ Type::String() : Type::NumberOrString();
+ NarrowType(expr, Bounds(lower, upper, isolate_));
+ break;
+ }
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void AstTyper::VisitCompareOperation(CompareOperation* expr) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(expr->left()));
- CHECK_ALIVE(Visit(expr->right()));
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
// Collect type feedback.
Handle<Type> left_type, right_type, combined_type;
oracle()->CompareType(expr->CompareOperationFeedbackId(),
&left_type, &right_type, &combined_type);
- MergeLowerType(expr->left(), left_type);
- MergeLowerType(expr->right(), right_type);
+ NarrowLowerType(expr->left(), left_type);
+ NarrowLowerType(expr->right(), right_type);
expr->set_combined_type(combined_type);
+
+ NarrowType(expr, Bounds(Type::Boolean(), isolate_));
}
void AstTyper::VisitThisFunction(ThisFunction* expr) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
- ASSERT(!HasStackOverflow());
for (int i = 0; i < decls->length(); ++i) {
Declaration* decl = decls->at(i);
- CHECK_ALIVE(Visit(decl));
+ RECURSE(Visit(decl));
}
}
void AstTyper::VisitVariableDeclaration(VariableDeclaration* declaration) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(declaration->fun()));
+ RECURSE(Visit(declaration->fun()));
}
void AstTyper::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(declaration->module()));
+ RECURSE(Visit(declaration->module()));
}
void AstTyper::VisitImportDeclaration(ImportDeclaration* declaration) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(declaration->module()));
+ RECURSE(Visit(declaration->module()));
}
void AstTyper::VisitExportDeclaration(ExportDeclaration* declaration) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitModuleLiteral(ModuleLiteral* module) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(module->body()));
+ RECURSE(Visit(module->body()));
}
void AstTyper::VisitModuleVariable(ModuleVariable* module) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitModulePath(ModulePath* module) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(module->module()));
+ RECURSE(Visit(module->module()));
}
void AstTyper::VisitModuleUrl(ModuleUrl* module) {
- ASSERT(!HasStackOverflow());
}
void AstTyper::VisitModuleStatement(ModuleStatement* stmt) {
- ASSERT(!HasStackOverflow());
- CHECK_ALIVE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->body()));
}
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
index 2d3fac0650..ceef984365 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/typing.h
@@ -34,6 +34,7 @@
#include "ast.h"
#include "compiler.h"
#include "type-info.h"
+#include "types.h"
#include "zone.h"
#include "scopes.h"
@@ -62,11 +63,11 @@ class AstTyper: public AstVisitor {
TypeFeedbackOracle* oracle() { return &oracle_; }
Zone* zone() const { return info_->zone(); }
- void MergeLowerType(Expression* e, Handle<Type> t) {
- e->set_lower_type(handle(Type::Union(e->lower_type(), t), isolate_));
+ void NarrowType(Expression* e, Bounds b) {
+ e->set_bounds(Bounds::Both(e->bounds(), b, isolate_));
}
- void MergeUpperType(Expression* e, Handle<Type> t) {
- e->set_upper_type(handle(Type::Intersect(e->upper_type(), t), isolate_));
+ void NarrowLowerType(Expression* e, Handle<Type> t) {
+ e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 04065b0479..bd32467786 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -52,14 +52,17 @@ static inline uchar TableGet(const int32_t* table, int index) {
return table[D * index];
}
+
static inline uchar GetEntry(int32_t entry) {
return entry & (kStartBit - 1);
}
+
static inline bool IsStart(int32_t entry) {
return (entry & kStartBit) != 0;
}
+
/**
* Look up a character in the unicode table using a mix of binary and
* interpolation search. For a uniformly distributed array
@@ -106,6 +109,7 @@ struct MultiCharacterSpecialCase {
uchar chars[kW];
};
+
// Look up the mapping for the given character in the specified table,
// which is of the specified length and uses the specified special case
// mapping for multi-char mappings. The next parameter is the character
@@ -456,6 +460,7 @@ bool Uppercase::Is(uchar c) {
}
}
+
// Lowercase: point.category == 'Ll'
static const uint16_t kLowercaseTable0Size = 463;
@@ -567,6 +572,7 @@ bool Lowercase::Is(uchar c) {
}
}
+
// Letter: point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl' ]
static const uint16_t kLetterTable0Size = 435;
@@ -703,6 +709,7 @@ bool Letter::Is(uchar c) {
}
}
+
// Space: point.category == 'Zs'
static const uint16_t kSpaceTable0Size = 4;
@@ -724,6 +731,7 @@ bool Space::Is(uchar c) {
}
}
+
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -758,6 +766,7 @@ bool Number::Is(uchar c) {
}
}
+
// WhiteSpace: 'Ws' in point.properties
static const uint16_t kWhiteSpaceTable0Size = 7;
@@ -779,6 +788,7 @@ bool WhiteSpace::Is(uchar c) {
}
}
+
// LineTerminator: 'Lt' in point.properties
static const uint16_t kLineTerminatorTable0Size = 2;
@@ -800,6 +810,7 @@ bool LineTerminator::Is(uchar c) {
}
}
+
// CombiningMark: point.category in ['Mn', 'Mc']
static const uint16_t kCombiningMarkTable0Size = 258;
@@ -871,6 +882,7 @@ bool CombiningMark::Is(uchar c) {
}
}
+
// ConnectorPunctuation: point.category == 'Pc'
static const uint16_t kConnectorPunctuationTable0Size = 1;
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index ca83e38f86..905e178fec 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -73,11 +73,12 @@ Counters::Counters(Isolate* isolate) {
count_of_FIXED_ARRAY_##name##_ = \
StatsCounter("c:" "V8.CountOf_FIXED_ARRAY-" #name); \
size_of_FIXED_ARRAY_##name##_ = \
- StatsCounter("c:" "V8.SizeOf_FIXED_ARRAY-" #name); \
+ StatsCounter("c:" "V8.SizeOf_FIXED_ARRAY-" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
}
+
void Counters::ResetHistograms() {
#define HT(name, caption) name##_.Reset();
HISTOGRAM_TIMER_LIST(HT)
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index cb67105c3f..cfec0c0c41 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -263,6 +263,7 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number,
return heap_number;
}
+
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
if (FLAG_stress_compaction) {
@@ -323,6 +324,7 @@ void V8::InitializeOncePerProcessImpl() {
Bootstrapper::InitializeOncePerProcess();
}
+
void V8::InitializeOncePerProcess() {
CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 4932da93f9..c3f1f01f94 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -418,6 +418,17 @@ enum CpuImplementer {
};
+enum CpuPart {
+ CPU_UNKNOWN,
+ CORTEX_A15,
+ CORTEX_A12,
+ CORTEX_A9,
+ CORTEX_A8,
+ CORTEX_A7,
+ CORTEX_A5
+};
+
+
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
@@ -434,6 +445,7 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
UNALIGNED_ACCESSES = 4, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
VFP32DREGS = 6, // ARM
+ NEON = 7, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 925e1982c0..2df187a572 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -388,6 +388,7 @@ bool ThreadManager::IsArchived() {
return data != NULL && data->thread_state() != NULL;
}
+
void ThreadManager::Iterate(ObjectVisitor* v) {
// Expecting no threads during serialization/deserialization
for (ThreadState* state = FirstThreadStateInUse();
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index ff9f8f2366..fd3f4a5095 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -317,6 +317,11 @@ template <typename sourcechar, typename sinkchar>
INLINE(static void CopyCharsUnsigned(sinkchar* dest,
const sourcechar* src,
int chars));
+#if defined(V8_HOST_ARCH_ARM)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#endif
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
@@ -375,6 +380,105 @@ void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
}
+#if defined(V8_HOST_ARCH_ARM)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 2);
+ break;
+ case 3:
+ memcpy(dest, src, 3);
+ break;
+ case 4:
+ memcpy(dest, src, 4);
+ break;
+ case 5:
+ memcpy(dest, src, 5);
+ break;
+ case 6:
+ memcpy(dest, src, 6);
+ break;
+ case 7:
+ memcpy(dest, src, 7);
+ break;
+ case 8:
+ memcpy(dest, src, 8);
+ break;
+ case 9:
+ memcpy(dest, src, 9);
+ break;
+ case 10:
+ memcpy(dest, src, 10);
+ break;
+ case 11:
+ memcpy(dest, src, 11);
+ break;
+ case 12:
+ memcpy(dest, src, 12);
+ break;
+ case 13:
+ memcpy(dest, src, 13);
+ break;
+ case 14:
+ memcpy(dest, src, 14);
+ break;
+ case 15:
+ memcpy(dest, src, 15);
+ break;
+ default:
+ OS::MemCopy(dest, src, chars);
+ break;
+ }
+}
+
+
+void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
+ if (chars >= OS::kMinComplexConvertMemCopy) {
+ OS::MemCopyUint16Uint8(dest, src, chars);
+ } else {
+ OS::MemCopyUint16Uint8Wrapper(dest, src, chars);
+ }
+}
+
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 4);
+ break;
+ case 3:
+ memcpy(dest, src, 6);
+ break;
+ case 4:
+ memcpy(dest, src, 8);
+ break;
+ case 5:
+ memcpy(dest, src, 10);
+ break;
+ case 6:
+ memcpy(dest, src, 12);
+ break;
+ case 7:
+ memcpy(dest, src, 14);
+ break;
+ default:
+ OS::MemCopy(dest, src, chars * sizeof(*dest));
+ break;
+ }
+}
+#endif
+
+
class StringBuilder : public SimpleStringBuilder {
public:
explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 0041c67c5a..1f1ff7add7 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 20
-#define BUILD_NUMBER 2
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index 5d9c89e312..2b5d7d71f2 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -89,8 +89,6 @@
#undef THIS
#undef CONST
#undef NAN
-#undef TRUE
-#undef FALSE
#undef UNKNOWN
#undef NONE
#undef ANY
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 1c231a70b6..b9af527eea 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -369,7 +369,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The 11th byte is int3 (0xCC) in the return sequence and
// REX.WB (0x48+register bit) for the call sequence.
#ifdef ENABLE_DEBUGGER_SUPPORT
- return pc_[10] != 0xCC;
+ return pc_[2 + kPointerSize] != 0xCC;
#else
return false;
#endif
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 3a3ee9cdb0..aaab839121 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -902,11 +902,13 @@ void Assembler::clc() {
emit(0xF8);
}
+
void Assembler::cld() {
EnsureSpace ensure_space(this);
emit(0xFC);
}
+
void Assembler::cdq() {
EnsureSpace ensure_space(this);
emit(0x99);
@@ -2522,6 +2524,7 @@ void Assembler::emit_farith(int b1, int b2, int i) {
emit(b2 + i);
}
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
@@ -2582,6 +2585,7 @@ void Assembler::movq(XMMRegister dst, XMMRegister src) {
}
}
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3035,10 +3039,12 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
+
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
+
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
@@ -3075,6 +3081,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
reloc_info_writer.Write(&rinfo);
}
+
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 2b44a778c7..d34e4f70d9 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -726,6 +726,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ Abort("no cases left");
}
+
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1194,7 +1195,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
rbx, // Result.
rcx, // Scratch 1.
rdx, // Scratch 2.
- false, // Input is known to be smi?
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 923384853f..e090437d55 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -61,6 +61,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
}
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -222,7 +232,40 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rcx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx, rcx, rdx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
}
@@ -256,8 +299,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in rax.
Label check_heap_number, call_builtin;
- __ SmiTest(rax);
- __ j(not_zero, &check_heap_number, Label::kNear);
+ __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
__ Ret();
__ bind(&check_heap_number);
@@ -575,324 +617,87 @@ class FloatingPointHelper : public AllStatic {
};
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus, Label::kNear);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done, Label::kNear);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
-
- __ push(rax); // the operand
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(mode_));
- __ Push(Smi::FromInt(operand_type_));
-
- __ push(rcx); // Push return address.
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
+ Label check_negative, process_64_bits, done;
+ int double_offset = offset();
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
+ // Account for return address and saved regs if input is rsp.
+ if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label slow;
- GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance slow_near) {
- Label done;
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNeg(rax, rax, &done, Label::kNear);
- __ jmp(slow, slow_near);
- __ bind(&done);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNot(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Operand is a float, negate its value by flipping the sign bit.
- if (mode_ == UNARY_OVERWRITE) {
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
- } else {
- // Allocate a heap number before calculating the answer,
- // so we don't have an untagged double around during GC.
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
+ Register scratch1;
+ Register scratch_candidates[3] = { rbx, rdx, rdi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
}
- __ bind(&heapnumber_allocated);
- // rcx: allocated 'empty' number
-
- // Copy the double value to the new heap number, flipping the sign.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
+ // Since we must use rcx for shifts below, use some other register (rax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead
+ // for the result.
+ Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(rsp);
+ __ movl(scratch1, mantissa_operand);
+ __ movsd(xmm0, mantissa_operand);
+ __ movl(rcx, exponent_operand);
+ if (stash_exponent_copy) __ push(rcx);
+
+ __ andl(rcx, Immediate(HeapNumber::kExponentMask));
+ __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
+ __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
+ __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
+
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ __ subl(rcx, Immediate(delta));
+ __ xorl(result_reg, result_reg);
+ __ cmpl(rcx, Immediate(31));
+ __ j(above, &done);
+ __ shll_cl(scratch1);
+ __ jmp(&check_negative);
+
+ __ bind(&process_64_bits);
+ __ cvttsd2siq(result_reg, xmm0);
+ __ jmp(&done, Label::kNear);
+
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ movl(result_reg, scratch1);
+ __ negl(result_reg);
+ if (stash_exponent_copy) {
+ __ cmpl(MemOperand(rsp, 0), Immediate(0));
+ } else {
+ __ cmpl(exponent_operand, Immediate(0));
+ }
+ __ cmovl(greater, result_reg, scratch1);
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ addq(rsp, Immediate(kDoubleSize));
+ }
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(rcx));
+ __ movl(final_result_reg, result_reg);
+ }
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
}
@@ -1208,8 +1013,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_left_stub);
@@ -1219,8 +1024,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_right_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_right_stub);
@@ -1295,8 +1100,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime);
- StringAddStub string_add_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_stub);
@@ -1778,12 +1583,18 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
__ JumpIfSmi(rax, &rax_is_smi);
__ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
__ jmp(&done);
__ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ JumpIfNotSmi(rax, &rax_is_object);
+
__ bind(&rax_is_smi);
__ SmiToInteger32(rcx, rax);
@@ -1818,7 +1629,9 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the rdx heap number in rcx.
- IntegerConvert(masm, r8, rdx);
+ DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
// Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
@@ -1838,7 +1651,10 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
+ DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
__ bind(&done);
__ movl(rax, r8);
}
@@ -2266,7 +2082,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -2295,7 +2112,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
support_wrapper_);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -2358,7 +2176,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3299,7 +3118,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register result,
Register scratch1,
Register scratch2,
- bool object_is_smi,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@@ -3323,39 +3141,36 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label is_smi;
Label load_result_from_cache;
Factory* factory = masm->isolate()->factory();
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object,
+ factory->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ Register probe = mask;
+ __ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
__ bind(&is_smi);
__ SmiToInteger32(scratch, object);
GenerateConvertHashCodeToIndex(masm, scratch, mask);
- Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmpq(object,
FieldOperand(number_string_cache,
@@ -3394,7 +3209,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ movq(rbx, Operand(rsp, kPointerSize));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
+ GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
__ ret(1 * kPointerSize);
__ bind(&runtime);
@@ -3437,10 +3252,9 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
__ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(scratch, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(scratch, Immediate(kInternalizedTag | kStringTag));
- __ j(not_equal, label);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
}
@@ -3554,7 +3368,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label first_non_object;
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(below, &first_non_object, Label::kNear);
- // Return non-zero (eax (not rax) is not zero)
+ // Return non-zero (rax (not rax) is not zero)
Label return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
@@ -3616,7 +3430,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
masm, &check_for_strings, rdx, kScratchRegister);
// We've already checked for object identity, so if both operands are
- // internalized strings they aren't equal. Register eax (not rax) already
+ // internalized strings they aren't equal. Register rax (not rax) already
// holds a non-zero value, which indicates not equal, so just return.
__ ret(0);
}
@@ -3736,17 +3550,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- __ JumpIfNotSmi(rcx, &miss);
- if (FLAG_debug_code) {
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ Cmp(rcx, terminal_kind_sentinel);
- __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
- }
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
// Make sure the function is the Array() function
__ LoadArrayFunction(rcx);
@@ -3765,7 +3577,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ bind(&megamorphic);
__ Move(FieldOperand(rbx, Cell::kValueOffset),
TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
+ __ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
@@ -3775,14 +3587,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ cmpq(rdi, rcx);
__ j(not_equal, &not_array_function);
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- initial_kind_sentinel);
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ __ push(rax);
+ __ push(rdi);
+ __ push(rbx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(rbx);
+ __ pop(rdi);
+ __ pop(rax);
+ }
__ jmp(&done);
__ bind(&not_array_function);
@@ -3952,6 +3772,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -4694,7 +4515,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
- if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfSmi(rax, &call_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &call_runtime);
@@ -4703,20 +4528,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rdx, &call_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -4752,7 +4573,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
- if (flags_ != NO_STRING_ADD_FLAGS) {
+ if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
@@ -4972,7 +4793,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm, rcx);
// Build a frame
{
@@ -4987,7 +4808,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm, rcx);
// Build a frame
{
@@ -5040,7 +4861,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- false,
&not_cached);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
@@ -5328,6 +5148,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ bind(&hash_not_zero);
}
+
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -5851,14 +5672,10 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(tmp1, Immediate(kInternalizedTag | kStringTag));
- __ j(not_equal, &miss, Label::kNear);
-
- __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(tmp2, Immediate(kInternalizedTag | kStringTag));
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
@@ -5895,7 +5712,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
__ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
@@ -5968,10 +5784,10 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// strings.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -6766,18 +6582,20 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ j(zero, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ incl(rdx);
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &normal_sequence);
-
- // The type cell may have gone megamorphic, don't overwrite if so
- __ movq(rcx, FieldOperand(rbx, kPointerSize));
- __ JumpIfNotSmi(rcx, &normal_sequence);
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &normal_sequence);
// Save the resulting elements kind in type info
__ Integer32ToSmi(rdx, rdx);
- __ movq(FieldOperand(rbx, kPointerSize), rdx);
+ __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
__ SmiToInteger32(rdx, rdx);
__ bind(&normal_sequence);
@@ -6806,7 +6624,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -6879,7 +6697,17 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
- __ JumpIfNotSmi(rdx, &no_info);
+
+ // The type cell may have undefined in its value.
+ __ Cmp(rdx, undefined_sentinel);
+ __ j(equal, &no_info);
+
+ // The type cell has either an AllocationSite or a JSFunction
+ __ Cmp(FieldOperand(rdx, 0),
+ Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
+ __ j(not_equal, &no_info);
+
+ __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index f6cfad048e..e430bf2c80 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -81,77 +81,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -205,21 +134,6 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 1 << 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
- // Stub needs a frame before calling the runtime
- ERECT_FRAME = 1 << 3,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -308,7 +222,6 @@ class NumberToStringStub: public PlatformCodeStub {
Register result,
Register scratch1,
Register scratch2,
- bool object_is_smi,
Label* not_found);
private:
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 9643872a8c..a823bf2e6d 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -253,7 +253,7 @@ ModuloFunction CreateModuloFunction() {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -262,9 +262,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- rsp[0] : return address
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, allocation_site_info_found);
+ ASSERT(allocation_memento_found != NULL);
+ __ TestJSArrayForAllocationMemento(rdx, rdi);
+ __ j(equal, allocation_memento_found);
}
// Set transitioned map.
@@ -292,7 +292,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label allocated, new_backing_store, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
+ __ TestJSArrayForAllocationMemento(rdx, rdi);
__ j(equal, fail);
}
@@ -418,7 +418,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
+ __ TestJSArrayForAllocationMemento(rdx, rdi);
__ j(equal, fail);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index f2f7ed0735..d7a73d75c9 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -451,16 +451,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER || type() == SOFT) {
- __ Set(arg_reg_4, 0);
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ movq(arg_reg_4,
- Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ movq(arg_reg_4,
+ Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
__ subq(arg5, rbp);
__ neg(arg5);
@@ -503,12 +498,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(Operand(rbx, dst_offset));
}
- // Remove the bailout id from the stack.
- if (type() == EAGER || type() == SOFT) {
- __ addq(rsp, Immediate(kPointerSize));
- } else {
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
+ // Remove the bailout id and return address from the stack.
+ __ addq(rsp, Immediate(2 * kPointerSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index d787775047..eefa70372e 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -49,6 +49,7 @@ enum OperandType {
BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
};
+
//------------------------------------------------------------------
// Tables
//------------------------------------------------------------------
@@ -293,6 +294,7 @@ static InstructionDesc cmov_instructions[16] = {
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
};
+
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -301,6 +303,7 @@ enum UnimplementedOpcodeAction {
ABORT_ON_UNIMPLEMENTED_OPCODE
};
+
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
@@ -1763,6 +1766,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
return instr_len;
}
+
//------------------------------------------------------------------------------
@@ -1822,6 +1826,7 @@ const char* NameConverter::NameInCode(byte* addr) const {
return "";
}
+
//------------------------------------------------------------------------------
Disassembler::Disassembler(const NameConverter& converter)
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 9ad7f586b6..bac4e793b2 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -3645,7 +3645,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -4353,10 +4353,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
// TODO(svenpanne): Allowing format strings in Comment would be nice here...
Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
+ UnaryOpStub stub(expr->op());
// UnaryOpStub expects the argument to be in the
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
@@ -4423,7 +4420,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ JumpIfSmi(rax, &no_conversion, Label::kNear);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ JumpIfSmi(rax, &no_conversion, Label::kNear);
+ }
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index a8de443940..f26b234fed 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -339,10 +339,10 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// Is the string internalized? We already know it's a string so a single
// bit test is enough.
- STATIC_ASSERT(kInternalizedTag != 0);
+ STATIC_ASSERT(kNotInternalizedTag != 0);
__ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, not_unique);
+ Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, not_unique);
__ bind(&unique);
}
@@ -673,8 +673,8 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -686,7 +686,7 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -702,7 +702,7 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1449,8 +1449,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
no_reg);
@@ -1618,8 +1619,8 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ movq(rax, rdx);
__ Ret();
@@ -1643,8 +1644,8 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ movq(rax, rdx);
__ Ret();
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index de43f86a3d..c9b808c10c 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -281,8 +281,7 @@ bool LCodeGen::GenerateBody() {
bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -298,47 +297,24 @@ bool LCodeGen::GenerateJumpTable() {
}
if (jump_table_[i].needs_frame) {
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ call(kScratchRegister);
- }
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ jmp(kScratchRegister);
- }
+ __ bind(&needs_frame);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ call(kScratchRegister);
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
return !is_aborted();
@@ -689,13 +665,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@@ -950,11 +921,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1834,12 +1800,12 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == no_condition) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@@ -1859,6 +1825,25 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
+void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32() || r.IsDouble()) {
+ EmitBranch(instr, no_condition);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsTaggedNumber()) {
+ EmitBranch(instr, no_condition);
+ }
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, equal);
+ }
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@@ -2027,7 +2012,7 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Condition cc = TokenToCondition(instr->op(), instr->is_double());
@@ -2613,6 +2598,16 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
}
+void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) {
+ Register object = ToRegister(instr->object());
+ ExternalReference sites_list_address = instr->GetReference(isolate());
+ __ Load(kScratchRegister, sites_list_address);
+ __ movq(FieldOperand(object, instr->hydrogen()->store_field().offset()),
+ kScratchRegister);
+ __ Store(sites_list_address, object);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3871,7 +3866,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Move(rbx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
@@ -4316,7 +4311,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
+ __ TestJSArrayForAllocationMemento(object, temp);
DeoptimizeIf(equal, instr->environment());
}
@@ -4324,7 +4319,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub stub(instr->hydrogen()->flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -5042,94 +5037,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- Immediate(instance_size >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- Immediate(initial_map->pre_allocated_property_fields()));
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- Immediate(initial_map->unused_property_fields()));
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- Immediate(initial_map->inobject_properties()));
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ movq(FieldOperand(result, JSObject::kMapOffset), map);
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ movq(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Push(Smi::FromInt(instance_size));
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5453,38 +5360,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(obj);
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index c89ec1fd0e..0a430964d0 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -127,7 +127,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index aed4f36647..71db17c931 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -202,6 +202,16 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
+ } else {
+ __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
+ __ movq(dst, kScratchRegister);
+ }
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 95a44f0384..2cec68b097 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -185,7 +185,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -275,6 +275,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
+ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) {
+ switch (hydrogen()->known_list()) {
+ case HLinkObjectInList::ALLOCATION_SITE_LIST:
+ return ExternalReference::allocation_sites_list_address(isolate);
+ }
+
+ UNREACHABLE();
+ // Return a dummy value
+ return ExternalReference::isolate_address(isolate);
+}
+
+
+void LLinkObjectInList::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" offset %d", hydrogen()->store_field().offset());
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -328,7 +346,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -1168,6 +1185,7 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
return AssignEnvironment(DefineAsRegister(result));
}
+
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
LMathAbs* result = new(zone()) LMathAbs(input);
@@ -1598,8 +1616,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
@@ -1607,7 +1625,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1621,7 +1639,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@@ -1911,6 +1929,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
+ return new(zone()) LIsNumberAndBranch(
+ UseRegisterOrConstantAtStart(instr->value()));
+}
+
+
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
@@ -2016,6 +2046,13 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LLinkObjectInList* result = new(zone()) LLinkObjectInList(object);
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2326,13 +2363,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@@ -2354,14 +2384,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2535,14 +2557,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), rax);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index a7530be14e..32ee0b9d31 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -81,7 +80,7 @@ class LCodeGen;
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -92,7 +91,6 @@ class LCodeGen;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
@@ -107,7 +105,6 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
@@ -120,9 +117,11 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
+ V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@@ -672,9 +671,9 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -682,8 +681,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -866,6 +866,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
};
+class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNumberAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+};
+
+
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1018,20 +1031,6 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
@@ -1622,6 +1621,23 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
+class LLinkObjectInList: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LLinkObjectInList(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ ExternalReference GetReference(Isolate* isolate);
+
+ DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list")
+ DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2368,19 +2384,6 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LAllocateObject(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 1, 1> {
public:
LAllocate(LOperand* size, LOperand* temp) {
@@ -2466,20 +2469,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e5bee67bb4..b3e15905aa 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -972,6 +972,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
}
+
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
@@ -1029,6 +1030,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
return kScratchRegister;
}
+
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
movq(dst,
@@ -1156,6 +1158,7 @@ void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
void MacroAssembler::SmiTest(Register src) {
+ AssertSmi(src);
testq(src, src);
}
@@ -2314,11 +2317,15 @@ static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
T operand_or_register,
Label* not_unique_name,
Label::Distance distance) {
- STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
- masm->cmpb(operand_or_register, Immediate(kInternalizedTag));
- masm->j(less, not_unique_name, distance);
- masm->cmpb(operand_or_register, Immediate(SYMBOL_TYPE));
- masm->j(greater, not_unique_name, distance);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ masm->testb(operand_or_register,
+ Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ masm->j(zero, &succeed, Label::kNear);
+ masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
+ masm->j(not_equal, not_unique_name, distance);
+
+ masm->bind(&succeed);
}
@@ -3897,52 +3904,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
lea(result_end, Operand(element_count, element_size, header_size));
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- incq(result);
- }
+ Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -3952,7 +3915,7 @@ void MacroAssembler::Allocate(Register object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3971,6 +3934,13 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+ testq(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, "Allocation is not double aligned");
+ }
+
// Calculate new top and bail out if new space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
@@ -3986,13 +3956,6 @@ void MacroAssembler::Allocate(Register object_size,
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
addq(result, Immediate(kHeapObjectTag));
@@ -4710,25 +4673,25 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
j(not_equal, &next);
}
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg) {
- Label no_info_available;
+ Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
movq(kScratchRegister, new_space_start);
cmpq(scratch_reg, kScratchRegister);
- j(less, &no_info_available);
+ j(less, &no_memento_available);
cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, &no_info_available);
- CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Heap::kAllocationSiteInfoMapRootIndex);
- bind(&no_info_available);
+ j(greater, &no_memento_available);
+ CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Heap::kAllocationMementoMapRootIndex);
+ bind(&no_memento_available);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 124153b52d..9d5d2a31c5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -1354,14 +1354,14 @@ class MacroAssembler: public Assembler {
void CheckEnumCache(Register null_value,
Label* call_runtime);
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg);
private:
// Order general registers are pushed by Pushad.
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 4b3ee400f3..a903ea11d6 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -757,86 +757,53 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* miss_restore_name,
- Label* slow) {
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
+void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1, miss_label);
-
- // Check that we are allowed to write this.
- if (object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- // holder == object indicates that no property was found.
- if (lookup->holder() != *object) {
- holder = lookup->holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- Register holder_reg = CheckPrototypes(
- object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
- // If no property was found, and the holder (the last object in the
- // prototype chain) is in slow mode, we need to do a negative lookup on the
- // holder.
- if (lookup->holder() == *object) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm,
- Handle<GlobalObject>(GlobalObject::cast(holder)),
- name,
- scratch1,
- miss_restore_name);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
- }
- }
- }
-
- Register storage_reg = name_reg;
-
if (details.type() == CONSTANT_FUNCTION) {
Handle<HeapObject> constant(
HeapObject::cast(descriptors->GetValue(descriptor)));
__ LoadHeapObject(scratch1, constant);
__ cmpq(value_reg, scratch1);
- __ j(not_equal, miss_restore_name);
+ __ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_restore_name);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_restore_name);
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -848,7 +815,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_restore_name, DONT_DO_SMI_CHECK);
+ miss_label, DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@@ -918,14 +885,11 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ movq(storage_reg, value_reg);
}
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
+ receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
@@ -941,14 +905,11 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ movq(storage_reg, value_reg);
}
__ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
+ scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -961,24 +922,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
+void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1107,6 +1059,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ Move(scratch1, Handle<Map>(object->map()));
+
Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1212,7 +1168,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
@@ -1222,6 +1179,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
}
+void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
+
+
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
@@ -1268,7 +1236,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
return reg;
}
@@ -1289,7 +1257,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
}
@@ -1641,11 +1609,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
- Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
- Handle<Cell> kind_feedback_cell =
- isolate()->factory()->NewCell(kind);
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
__ movq(rax, Immediate(argc));
- __ Move(rbx, kind_feedback_cell);
+ __ Move(rbx, site_feedback_cell);
__ Move(rdi, function);
ArrayConstructorStub stub(isolate());
@@ -2674,23 +2642,18 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(receiver(), &miss);
- CheckPrototypes(object, receiver(), holder,
- scratch1(), scratch2(), scratch3(), name, &miss);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ Push(callback); // callback info
- __ push(this->name());
+ __ Push(name);
__ push(value());
__ push(scratch1()); // restore return address
@@ -2699,12 +2662,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2758,20 +2717,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ push(this->name());
@@ -2784,12 +2729,8 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -3000,7 +2941,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- HandlerFrontendFooter(&success, &miss);
+ HandlerFrontendFooter(name, &success, &miss);
__ bind(&success);
Counters* counters = isolate()->counters();
@@ -3013,7 +2954,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
}
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
CodeHandleList* handlers,
Handle<Name> name,
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 2a0a0e2846..9ee00edcba 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -92,18 +92,15 @@ void Zone::DeleteAll() {
#endif
// Find a segment with a suitable size to keep around.
- Segment* keep = segment_head_;
- while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
- keep = keep->next();
- }
-
+ Segment* keep = NULL;
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
for (Segment* current = segment_head_; current != NULL; ) {
Segment* next = current->next();
- if (current == keep) {
+ if (keep == NULL && current->size() <= kMaximumKeptSegmentSize) {
// Unlink the segment we wish to keep from the list.
- current->clear_next();
+ keep = current;
+ keep->clear_next();
} else {
int size = current->size();
#ifdef DEBUG
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index a65de867dc..9df5c7bccc 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -30,7 +30,7 @@
'v8_code': 1,
'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
},
- 'includes': ['../../build/common.gypi'],
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
{
'target_name': 'cctest',
@@ -54,7 +54,6 @@
'test-bignum.cc',
'test-bignum-dtoa.cc',
'test-circular-queue.cc',
- 'test-compare-nil-ic-stub.cc',
'test-compiler.cc',
'test-conversions.cc',
'test-cpu-profiler.cc',
@@ -105,12 +104,15 @@
'test-utils.cc',
'test-version.cc',
'test-weakmaps.cc',
+ 'test-weaksets.cc',
'test-weaktypedarrays.cc'
],
'conditions': [
['v8_target_arch=="ia32"', {
'sources': [
'test-assembler-ia32.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-ia32.cc',
'test-disasm-ia32.cc',
'test-log-stack-tracer.cc'
],
@@ -118,6 +120,8 @@
['v8_target_arch=="x64"', {
'sources': [
'test-assembler-x64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'
],
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index e37f3dbbef..9e8f6085e9 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -47,6 +47,7 @@ test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
# We do not yet shrink weak maps after they have been emptied by the GC
test-weakmaps/Shrinking: FAIL
+test-weaksets/WeakSet_Shrinking: FAIL
# Deferred stack trace formatting is temporarily disabled.
test-heap/ReleaseStackTraceData: PASS || FAIL
@@ -101,6 +102,12 @@ test-threads/ThreadJoinSelf: SKIP
##############################################################################
[ $arch == nacl_ia32 || $arch == nacl_x64 ]
+# NaCl builds have problems with threaded tests since Pepper_28.
+# V8 Issue 2786
+test-api/Threading1: SKIP
+test-lockers/MultithreadedParallelIsolates: SKIP
+test-lockers/ExtensionsRegistration: SKIP
+
# These tests fail as there is no /tmp directory in Native Client.
test-log/LogAccessorCallbacks: SKIP
test-log/LogCallbacks: SKIP
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 87d8d66363..7d96ea64c2 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -258,6 +258,7 @@ static void CheckAccessorArgsCorrect(
info.GetReturnValue().Set(17);
}
+
THREADED_TEST(DirectCall) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -284,6 +285,7 @@ static void EmptyGetter(Local<String> name,
info.GetReturnValue().Set(v8::Handle<v8::Value>());
}
+
THREADED_TEST(EmptyResult) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 34432e36ac..fa671dc661 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -102,12 +102,14 @@ static void ExpectString(const char* code, const char* expected) {
CHECK_EQ(expected, *utf8);
}
+
static void ExpectInt32(const char* code, int expected) {
Local<Value> result = CompileRun(code);
CHECK(result->IsInt32());
CHECK_EQ(expected, result->Int32Value());
}
+
static void ExpectBoolean(const char* code, bool expected) {
Local<Value> result = CompileRun(code);
CHECK(result->IsBoolean());
@@ -860,10 +862,12 @@ static void handle_callback_impl(const v8::FunctionCallbackInfo<Value>& info,
info.GetReturnValue().Set(v8_num(102));
}
+
static void handle_callback(const v8::FunctionCallbackInfo<Value>& info) {
return handle_callback_impl(info, FUNCTION_ADDR(handle_callback));
}
+
static void handle_callback_2(const v8::FunctionCallbackInfo<Value>& info) {
return handle_callback_impl(info, FUNCTION_ADDR(handle_callback_2));
}
@@ -1077,6 +1081,7 @@ Handle<Value> TestFastReturnValues() {
return scope.Close(CompileRun("callback_object.callback()"));
}
+
THREADED_PROFILED_TEST(FastReturnValues) {
LocalContext env;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -1693,6 +1698,7 @@ static void EchoNamedProperty(Local<String> name,
info.GetReturnValue().Set(name);
}
+
// Helper functions for Interceptor/Accessor interaction tests
void SimpleAccessorGetter(Local<String> name,
@@ -1755,6 +1761,7 @@ void AddInterceptor(Handle<FunctionTemplate> templ,
templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
}
+
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1772,6 +1779,7 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
ExpectInt32("child.accessor_age", 10);
}
+
THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1792,6 +1800,7 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
ExpectInt32("child.accessor_age", 10);
}
+
THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1811,6 +1820,7 @@ THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
ExpectString("parent.name", "Alice");
}
+
THREADED_TEST(SwitchFromInterceptorToAccessor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
@@ -1828,6 +1838,7 @@ THREADED_TEST(SwitchFromInterceptorToAccessor) {
ExpectInt32("obj.accessor_age", 10000);
}
+
THREADED_TEST(SwitchFromAccessorToInterceptor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
@@ -1845,6 +1856,7 @@ THREADED_TEST(SwitchFromAccessorToInterceptor) {
ExpectInt32("obj.interceptor_age", 9999);
}
+
THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1864,6 +1876,7 @@ THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
ExpectInt32("child.accessor_age", 10000);
}
+
THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1883,6 +1896,7 @@ THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
ExpectInt32("child.interceptor_age", 9999);
}
+
THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
@@ -1907,6 +1921,7 @@ THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
}
+
THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
@@ -1931,6 +1946,7 @@ THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
}
+
THREADED_TEST(SwitchFromInterceptorToProperty) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1948,6 +1964,7 @@ THREADED_TEST(SwitchFromInterceptorToProperty) {
ExpectInt32("child.age", 10000);
}
+
THREADED_TEST(SwitchFromPropertyToInterceptor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
@@ -1965,6 +1982,7 @@ THREADED_TEST(SwitchFromPropertyToInterceptor) {
ExpectInt32("child.interceptor_age", 9999);
}
+
THREADED_TEST(NamedPropertyHandlerGetter) {
echo_named_call_count = 0;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -2322,6 +2340,16 @@ THREADED_TEST(GlobalObjectInternalFields) {
}
+THREADED_TEST(GlobalObjectHasRealIndexedProperty) {
+ LocalContext env;
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+ v8::Local<v8::Object> global = env->Global();
+ global->Set(0, v8::String::New("value"));
+ CHECK(global->HasRealIndexedProperty(0));
+}
+
+
static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
@@ -2404,6 +2432,7 @@ static void CheckEmbedderData(LocalContext* env,
CHECK((*env)->GetEmbedderData(index)->StrictEquals(data));
}
+
THREADED_TEST(EmbedderData) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -2553,6 +2582,14 @@ class ScopedArrayBufferContents {
const v8::ArrayBuffer::Contents contents_;
};
+template <typename T>
+static void CheckInternalFieldsAreZero(v8::Handle<T> value) {
+ CHECK_EQ(T::kInternalFieldCount, value->InternalFieldCount());
+ for (int i = 0; i < value->InternalFieldCount(); i++) {
+ CHECK_EQ(0, value->GetInternalField(i)->Int32Value());
+ }
+}
+
THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
i::FLAG_harmony_array_buffer = true;
@@ -2563,6 +2600,7 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
v8::HandleScope handle_scope(isolate);
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(1024);
+ CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -2607,6 +2645,7 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
"u8_a[0] = 0xAA;"
"u8_a[1] = 0xFF; u8_a.buffer");
Local<v8::ArrayBuffer> ab1 = Local<v8::ArrayBuffer>::Cast(result);
+ CheckInternalFieldsAreZero(ab1);
CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
CHECK(!ab1->IsExternal());
ScopedArrayBufferContents ab1_contents(ab1->Externalize());
@@ -2647,6 +2686,7 @@ THREADED_TEST(ArrayBuffer_External) {
i::ScopedVector<uint8_t> my_data(100);
memset(my_data.start(), 0, 100);
Local<v8::ArrayBuffer> ab3 = v8::ArrayBuffer::New(my_data.start(), 100);
+ CheckInternalFieldsAreZero(ab3);
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
CHECK(ab3->IsExternal());
@@ -2699,6 +2739,7 @@ static Handle<TypedArray> CreateAndCheck(Handle<v8::ArrayBuffer> ab,
int byteOffset,
int length) {
v8::Handle<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
CHECK_EQ(byteOffset, static_cast<int>(ta->ByteOffset()));
CHECK_EQ(length, static_cast<int>(ta->Length()));
CHECK_EQ(length * kElementSize, static_cast<int>(ta->ByteLength()));
@@ -2736,6 +2777,7 @@ THREADED_TEST(ArrayBuffer_NeuteringApi) {
CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
v8::Handle<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
CHECK_EQ(1, static_cast<int>(dv->ByteOffset()));
CHECK_EQ(1023, static_cast<int>(dv->ByteLength()));
@@ -2754,6 +2796,7 @@ THREADED_TEST(ArrayBuffer_NeuteringApi) {
CheckDataViewIsNeutered(dv);
}
+
THREADED_TEST(ArrayBuffer_NeuteringScript) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4386,6 +4429,7 @@ TEST(ExceptionOrder) {
fun->Call(fun, argc, a5);
}
+
void ThrowValue(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(1, args.Length());
@@ -4608,6 +4652,7 @@ THREADED_TEST(SimplePropertyRead) {
}
}
+
THREADED_TEST(DefinePropertyOnAPIAccessor) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -4655,6 +4700,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
}
+
THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Local<ObjectTemplate> templ = ObjectTemplate::New();
@@ -6318,6 +6364,7 @@ void WhammyPropertyGetter(Local<String> name,
info.GetReturnValue().Set(whammy->getScript()->Run());
}
+
THREADED_TEST(WeakReference) {
v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New();
@@ -7453,6 +7500,7 @@ static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
CHECK_EQ(" new o.foo();", *source_line);
}
+
TEST(ExceptionInNativeScript) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -10713,6 +10761,7 @@ static void FastApiCallback_SimpleSignature(
args.GetReturnValue().Set(args[0]->Int32Value() + 1);
}
+
// Helper to maximize the odds of object moving.
static void GenerateSomeGarbage() {
CompileRun(
@@ -10814,6 +10863,7 @@ static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
CHECK_EQ(31, p_getter_count);
}
+
THREADED_PROFILED_TEST(LoadICFastApi_DirectCall_GCMoveStub) {
LoadICFastApi_DirectCall_GCMoveStub(DirectGetterCallback);
}
@@ -10869,6 +10919,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
CHECK_EQ(100, interceptor_call_count);
}
+
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
int interceptor_call_count = 0;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -10900,6 +10951,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
CHECK_EQ(100, interceptor_call_count);
}
+
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
int interceptor_call_count = 0;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -10937,6 +10989,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
CHECK_GE(interceptor_call_count, 50);
}
+
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
int interceptor_call_count = 0;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -10974,6 +11027,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
CHECK_GE(interceptor_call_count, 50);
}
+
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
int interceptor_call_count = 0;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -11014,6 +11068,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
CHECK_GE(interceptor_call_count, 50);
}
+
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
int interceptor_call_count = 0;
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -11054,6 +11109,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
CHECK_GE(interceptor_call_count, 50);
}
+
THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
@@ -11078,6 +11134,7 @@ THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
}
+
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
@@ -11106,6 +11163,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
}
+
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
@@ -11139,6 +11197,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
+
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
@@ -11175,6 +11234,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
+
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
@@ -11325,6 +11385,7 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
+
// Test the map transition before the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -11427,6 +11488,7 @@ static void InterceptorICExceptionGetter(
}
}
+
// Test interceptor load/call IC where the interceptor throws an
// exception once in a while.
THREADED_TEST(InterceptorICGetterExceptions) {
@@ -11470,6 +11532,7 @@ static void InterceptorICExceptionSetter(
}
}
+
// Test interceptor store IC where the interceptor throws an exception
// once in a while.
THREADED_TEST(InterceptorICSetterExceptions) {
@@ -11633,6 +11696,7 @@ static void WebKitLike(Handle<Message> message, Handle<Value> data) {
message->GetScriptResourceName();
}
+
THREADED_TEST(ExceptionsDoNotPropagatePastTryCatch) {
LocalContext context;
HandleScope scope(context->GetIsolate());
@@ -11951,24 +12015,28 @@ TEST(Threading1) {
ApiTestFuzzer::TearDown();
}
+
TEST(Threading2) {
ApiTestFuzzer::SetUp(ApiTestFuzzer::SECOND_PART);
ApiTestFuzzer::RunAllTests();
ApiTestFuzzer::TearDown();
}
+
TEST(Threading3) {
ApiTestFuzzer::SetUp(ApiTestFuzzer::THIRD_PART);
ApiTestFuzzer::RunAllTests();
ApiTestFuzzer::TearDown();
}
+
TEST(Threading4) {
ApiTestFuzzer::SetUp(ApiTestFuzzer::FOURTH_PART);
ApiTestFuzzer::RunAllTests();
ApiTestFuzzer::TearDown();
}
+
void ApiTestFuzzer::CallTest() {
if (kLogThreading)
printf("Start test %d\n", test_number_);
@@ -12583,6 +12651,7 @@ void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
env->Exit();
}
+
void SetFunctionEntryHookTest::RunTest() {
// Work in a new isolate throughout.
v8::Isolate* isolate = v8::Isolate::New();
@@ -12671,13 +12740,18 @@ static bool FunctionNameIs(const char* expected,
const char* tail = event->name.str + kPreambleLen;
size_t tail_len = event->name.len - kPreambleLen;
size_t expected_len = strlen(expected);
- if (tail_len == expected_len + 1) {
- if (*tail == '*' || *tail == '~') {
- --tail_len;
- ++tail;
- } else {
- return false;
- }
+ if (tail_len > 1 && (*tail == '*' || *tail == '~')) {
+ --tail_len;
+ ++tail;
+ }
+
+ // Check for tails like 'bar :1'.
+ if (tail_len > expected_len + 2 &&
+ tail[expected_len] == ' ' &&
+ tail[expected_len + 1] == ':' &&
+ tail[expected_len + 2] &&
+ !strncmp(tail, expected, expected_len)) {
+ return true;
}
if (tail_len != expected_len)
@@ -13126,6 +13200,7 @@ THREADED_TEST(PropertyEnumeration) {
CheckOwnProperties(elms->Get(v8::Integer::New(3)), elmc4, elmv4);
}
+
THREADED_TEST(PropertyEnumeration2) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -14234,6 +14309,7 @@ static void ForceSetInterceptSetter(
info.GetReturnValue().SetUndefined();
}
+
TEST(ForceSet) {
force_set_get_count = 0;
force_set_set_count = 0;
@@ -14275,6 +14351,7 @@ TEST(ForceSet) {
CHECK_EQ(2, force_set_get_count);
}
+
TEST(ForceSetWithInterceptor) {
force_set_get_count = 0;
force_set_set_count = 0;
@@ -15734,6 +15811,7 @@ void TypedArrayTestHelper(v8::ExternalArrayType array_type,
backing_store.start(), (kElementCount+2)*sizeof(ElementType));
Local<TypedArray> ta =
TypedArray::New(ab, 2*sizeof(ElementType), kElementCount);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
CHECK_EQ(kElementCount, static_cast<int>(ta->Length()));
CHECK_EQ(2*sizeof(ElementType), static_cast<int>(ta->ByteOffset()));
CHECK_EQ(kElementCount*sizeof(ElementType),
@@ -15819,6 +15897,7 @@ THREADED_TEST(DataView) {
backing_store.start(), 2 + kSize);
Local<v8::DataView> dv =
v8::DataView::New(ab, 2, kSize);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
CHECK_EQ(2, static_cast<int>(dv->ByteOffset()));
CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
CHECK_EQ(ab, dv->Buffer());
@@ -15838,6 +15917,7 @@ THREADED_TEST(DataView) {
"new " #View "(ab)"); \
CHECK(result->IsArrayBufferView()); \
CHECK(result->Is##View()); \
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(result.As<v8::View>()); \
}
IS_ARRAY_BUFFER_VIEW_TEST(Uint8Array)
@@ -16019,6 +16099,7 @@ static void StackTraceForUncaughtExceptionListener(
stack_trace->GetFrame(1));
}
+
TEST(CaptureStackTraceForUncaughtException) {
report_count = 0;
LocalContext env;
@@ -16326,6 +16407,7 @@ TEST(DynamicWithSourceURLInStackTrace) {
CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
}
+
static void CreateGarbageInOldSpace() {
i::Factory* factory = i::Isolate::Current()->factory();
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -16335,6 +16417,7 @@ static void CreateGarbageInOldSpace() {
}
}
+
// Test that idle notification can be handled and eventually returns true.
TEST(IdleNotification) {
const intptr_t MB = 1024 * 1024;
@@ -16552,6 +16635,7 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
bool found_resource_[4];
};
+
TEST(VisitExternalStrings) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -16611,6 +16695,7 @@ static double DoubleToDateTime(double input) {
return (input < 0) ? -(floor(-input)) : floor(input);
}
+
// We don't have a consistent way to write 64-bit constants syntactically, so we
// split them into two 32-bit constants and combine them programmatically.
static double DoubleFromBits(uint32_t high_bits, uint32_t low_bits) {
@@ -16850,6 +16935,7 @@ THREADED_TEST(ScriptOrigin) {
CHECK_EQ(0, script_origin_g.ResourceLineOffset()->Int32Value());
}
+
THREADED_TEST(FunctionGetInferredName) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -16862,6 +16948,7 @@ THREADED_TEST(FunctionGetInferredName) {
CHECK_EQ("foo.bar.baz", *v8::String::Utf8Value(f->GetInferredName()));
}
+
THREADED_TEST(ScriptLineNumber) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -17092,18 +17179,22 @@ void PrologueCallback(v8::GCType, v8::GCCallbackFlags) {
++prologue_call_count;
}
+
void EpilogueCallback(v8::GCType, v8::GCCallbackFlags) {
++epilogue_call_count;
}
+
void PrologueCallbackSecond(v8::GCType, v8::GCCallbackFlags) {
++prologue_call_count_second;
}
+
void EpilogueCallbackSecond(v8::GCType, v8::GCCallbackFlags) {
++epilogue_call_count_second;
}
+
TEST(GCCallbacks) {
LocalContext context;
@@ -17494,6 +17585,7 @@ TEST(GCInFailedAccessCheckCallback) {
v8::V8::SetFailedAccessCheckCallbackFunction(NULL);
}
+
TEST(DefaultIsolateGetCurrent) {
CHECK(v8::Isolate::GetCurrent() != NULL);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
@@ -17501,6 +17593,7 @@ TEST(DefaultIsolateGetCurrent) {
printf("*** %s\n", "DefaultIsolateGetCurrent success");
}
+
TEST(IsolateNewDispose) {
v8::Isolate* current_isolate = v8::Isolate::GetCurrent();
v8::Isolate* isolate = v8::Isolate::New();
@@ -17516,6 +17609,7 @@ TEST(IsolateNewDispose) {
CHECK_EQ(last_message, NULL);
}
+
TEST(IsolateEnterExitDefault) {
v8::Isolate* current_isolate = v8::Isolate::GetCurrent();
CHECK(current_isolate != NULL); // Default isolate.
@@ -17534,6 +17628,7 @@ TEST(IsolateEnterExitDefault) {
ExpectString("'still working 3'", "still working 3");
}
+
TEST(DisposeDefaultIsolate) {
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
@@ -17551,6 +17646,7 @@ TEST(DisposeDefaultIsolate) {
CHECK_NE(last_message, NULL);
}
+
TEST(RunDefaultAndAnotherIsolate) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext context;
@@ -17593,6 +17689,7 @@ TEST(RunDefaultAndAnotherIsolate) {
ExpectTrue("function f() { return bar == 371; }; f()");
}
+
TEST(DisposeIsolateWhenInUse) {
v8::Isolate* isolate = v8::Isolate::New();
CHECK(isolate);
@@ -17609,6 +17706,7 @@ TEST(DisposeIsolateWhenInUse) {
CHECK_NE(last_message, NULL);
}
+
TEST(RunTwoIsolatesOnSingleThread) {
// Run isolate 1.
v8::Isolate* isolate1 = v8::Isolate::New();
@@ -17733,6 +17831,7 @@ TEST(RunTwoIsolatesOnSingleThread) {
}
}
+
static int CalcFibonacci(v8::Isolate* isolate, int limit) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope scope(isolate);
@@ -17768,6 +17867,7 @@ class IsolateThread : public v8::internal::Thread {
int result_;
};
+
TEST(MultipleIsolatesOnIndividualThreads) {
v8::Isolate* isolate1 = v8::Isolate::New();
v8::Isolate* isolate2 = v8::Isolate::New();
@@ -17796,6 +17896,7 @@ TEST(MultipleIsolatesOnIndividualThreads) {
isolate2->Dispose();
}
+
TEST(IsolateDifferentContexts) {
v8::Isolate* isolate = v8::Isolate::New();
Local<v8::Context> context;
@@ -17884,26 +17985,32 @@ static void InitializeTestHelper(InitDefaultIsolateThread::TestCase testCase) {
CHECK_EQ(thread.result(), true);
}
+
TEST(InitializeDefaultIsolateOnSecondaryThread1) {
InitializeTestHelper(InitDefaultIsolateThread::IgnoreOOM);
}
+
TEST(InitializeDefaultIsolateOnSecondaryThread2) {
InitializeTestHelper(InitDefaultIsolateThread::SetResourceConstraints);
}
+
TEST(InitializeDefaultIsolateOnSecondaryThread3) {
InitializeTestHelper(InitDefaultIsolateThread::SetFatalHandler);
}
+
TEST(InitializeDefaultIsolateOnSecondaryThread4) {
InitializeTestHelper(InitDefaultIsolateThread::SetCounterFunction);
}
+
TEST(InitializeDefaultIsolateOnSecondaryThread5) {
InitializeTestHelper(InitDefaultIsolateThread::SetCreateHistogramFunction);
}
+
TEST(InitializeDefaultIsolateOnSecondaryThread6) {
InitializeTestHelper(InitDefaultIsolateThread::SetAddHistogramSampleFunction);
}
@@ -19546,6 +19653,17 @@ THREADED_TEST(Regress2746) {
}
+THREADED_TEST(Regress260106) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(DummyCallHandler);
+ CompileRun("for (var i = 0; i < 128; i++) Object.prototype[i] = 0;");
+ Local<Function> function = templ->GetFunction();
+ CHECK(!function.IsEmpty());
+ CHECK(function->IsFunction());
+}
+
+
#ifndef WIN32
class ThreadInterruptTest {
public:
@@ -19608,4 +19726,180 @@ THREADED_TEST(SemaphoreInterruption) {
ThreadInterruptTest().RunTest();
}
+
+static bool NamedAccessAlwaysBlocked(Local<v8::Object> global,
+ Local<Value> name,
+ v8::AccessType type,
+ Local<Value> data) {
+ i::PrintF("Named access blocked.\n");
+ return false;
+}
+
+
+static bool IndexAccessAlwaysBlocked(Local<v8::Object> global,
+ uint32_t key,
+ v8::AccessType type,
+ Local<Value> data) {
+ i::PrintF("Indexed access blocked.\n");
+ return false;
+}
+
+
+void UnreachableCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(false);
+}
+
+
+TEST(JSONStringifyAccessCheck) {
+ v8::V8::Initialize();
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+ // Create an ObjectTemplate for global objects and install access
+ // check callbacks that will block access.
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
+ IndexAccessAlwaysBlocked);
+
+ // Create a context and set an x property on it's global object.
+ LocalContext context0(NULL, global_template);
+ v8::Handle<v8::Object> global0 = context0->Global();
+ global0->Set(v8_str("x"), v8_num(42));
+ ExpectString("JSON.stringify(this)", "{\"x\":42}");
+
+ for (int i = 0; i < 2; i++) {
+ if (i == 1) {
+ // Install a toJSON function on the second run.
+ v8::Handle<v8::FunctionTemplate> toJSON =
+ v8::FunctionTemplate::New(UnreachableCallback);
+
+ global0->Set(v8_str("toJSON"), toJSON->GetFunction());
+ }
+ // Create a context with a different security token so that the
+ // failed access check callback will be called on each access.
+ LocalContext context1(NULL, global_template);
+ context1->Global()->Set(v8_str("other"), global0);
+
+ ExpectString("JSON.stringify(other)", "{}");
+ ExpectString("JSON.stringify({ 'a' : other, 'b' : ['c'] })",
+ "{\"a\":{},\"b\":[\"c\"]}");
+ ExpectString("JSON.stringify([other, 'b', 'c'])",
+ "[{},\"b\",\"c\"]");
+
+ v8::Handle<v8::Array> array = v8::Array::New(2);
+ array->Set(0, v8_str("a"));
+ array->Set(1, v8_str("b"));
+ context1->Global()->Set(v8_str("array"), array);
+ ExpectString("JSON.stringify(array)", "[\"a\",\"b\"]");
+ array->TurnOnAccessCheck();
+ ExpectString("JSON.stringify(array)", "[]");
+ ExpectString("JSON.stringify([array])", "[[]]");
+ ExpectString("JSON.stringify({'a' : array})", "{\"a\":[]}");
+ }
+}
+
+
+bool access_check_fail_thrown = false;
+bool catch_callback_called = false;
+
+
+// Failed access check callback that performs a GC on each invocation.
+void FailedAccessCheckThrows(Local<v8::Object> target,
+ v8::AccessType type,
+ Local<v8::Value> data) {
+ access_check_fail_thrown = true;
+ i::PrintF("Access check failed. Error thrown.\n");
+ v8::ThrowException(v8::Exception::Error(v8_str("cross context")));
+}
+
+
+void CatcherCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ i::PrintF("%s\n", *String::Utf8Value(args[i]));
+ }
+ catch_callback_called = true;
+}
+
+
+void HasOwnPropertyCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args[0]->ToObject()->HasOwnProperty(args[1]->ToString());
+}
+
+
+void CheckCorrectThrow(const char* script) {
+ // Test that the script, when wrapped into a try-catch, triggers the catch
+ // clause due to failed access check throwing an exception.
+ // The subsequent try-catch should run without any exception.
+ access_check_fail_thrown = false;
+ catch_callback_called = false;
+ i::ScopedVector<char> source(1024);
+ i::OS::SNPrintF(source, "try { %s; } catch (e) { catcher(e); }", script);
+ CompileRun(source.start());
+ CHECK(access_check_fail_thrown);
+ CHECK(catch_callback_called);
+
+ access_check_fail_thrown = false;
+ catch_callback_called = false;
+ CompileRun("try { [1, 2, 3].sort(); } catch (e) { catcher(e) };");
+ CHECK(!access_check_fail_thrown);
+ CHECK(!catch_callback_called);
+}
+
+
+TEST(AccessCheckThrows) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::V8::Initialize();
+ v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+ // Create an ObjectTemplate for global objects and install access
+ // check callbacks that will block access.
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
+ IndexAccessAlwaysBlocked);
+
+ // Create a context and set an x property on it's global object.
+ LocalContext context0(NULL, global_template);
+ context0->Global()->Set(v8_str("x"), v8_num(42));
+ v8::Handle<v8::Object> global0 = context0->Global();
+
+ // Create a context with a different security token so that the
+ // failed access check callback will be called on each access.
+ LocalContext context1(NULL, global_template);
+ context1->Global()->Set(v8_str("other"), global0);
+
+ v8::Handle<v8::FunctionTemplate> catcher_fun =
+ v8::FunctionTemplate::New(CatcherCallback);
+ context1->Global()->Set(v8_str("catcher"), catcher_fun->GetFunction());
+
+ v8::Handle<v8::FunctionTemplate> has_own_property_fun =
+ v8::FunctionTemplate::New(HasOwnPropertyCallback);
+ context1->Global()->Set(v8_str("has_own_property"),
+ has_own_property_fun->GetFunction());
+
+ { v8::TryCatch try_catch;
+ access_check_fail_thrown = false;
+ CompileRun("other.x;");
+ CHECK(access_check_fail_thrown);
+ CHECK(try_catch.HasCaught());
+ }
+
+ CheckCorrectThrow("other.x");
+ CheckCorrectThrow("other[1]");
+ CheckCorrectThrow("JSON.stringify(other)");
+ CheckCorrectThrow("has_own_property(other, 'x')");
+ CheckCorrectThrow("%GetProperty(other, 'x')");
+ CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 1, 0)");
+ CheckCorrectThrow("%IgnoreAttributesAndSetProperty(other, 'x', 'foo')");
+ CheckCorrectThrow("%DeleteProperty(other, 'x', 0)");
+ CheckCorrectThrow("%DeleteProperty(other, '1', 0)");
+ CheckCorrectThrow("%HasLocalProperty(other, 'x')");
+ CheckCorrectThrow("%HasProperty(other, 'x')");
+ CheckCorrectThrow("%HasElement(other, 1)");
+ CheckCorrectThrow("%IsPropertyEnumerable(other, 'x')");
+ CheckCorrectThrow("%GetPropertyNames(other)");
+ CheckCorrectThrow("%GetLocalPropertyNames(other, true)");
+ CheckCorrectThrow("%DefineOrRedefineAccessorProperty("
+ "other, 'x', null, null, 1)");
+}
+
#endif // WIN32
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 232f846be0..c79e74019e 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -619,6 +619,7 @@ TEST(7) {
TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
}
+
TEST(8) {
// Test VFP multi load/store with ia_w.
CcTest::InitializeVM();
@@ -1226,4 +1227,186 @@ TEST(14) {
CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
}
+
+TEST(15) {
+ // Test the Neon instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint32_t src0;
+ uint32_t src1;
+ uint32_t src2;
+ uint32_t src3;
+ uint32_t src4;
+ uint32_t src5;
+ uint32_t src6;
+ uint32_t src7;
+ uint32_t dst0;
+ uint32_t dst1;
+ uint32_t dst2;
+ uint32_t dst3;
+ uint32_t dst4;
+ uint32_t dst5;
+ uint32_t dst6;
+ uint32_t dst7;
+ uint32_t srcA0;
+ uint32_t srcA1;
+ uint32_t dstA0;
+ uint32_t dstA1;
+ uint32_t dstA2;
+ uint32_t dstA3;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Assembler assm(isolate, NULL, 0);
+
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ CpuFeatureScope scope(&assm, NEON);
+
+ __ stm(db_w, sp, r4.bit() | lr.bit());
+ // Move 32 bytes with neon.
+ __ add(r4, r0, Operand(OFFSET_OF(T, src0)));
+ __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
+ __ add(r4, r0, Operand(OFFSET_OF(T, dst0)));
+ __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
+
+ // Expand 8 bytes into 8 words(16 bits).
+ __ add(r4, r0, Operand(OFFSET_OF(T, srcA0)));
+ __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(r4));
+ __ vmovl(NeonU8, q0, d0);
+ __ add(r4, r0, Operand(OFFSET_OF(T, dstA0)));
+ __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(r4));
+
+ __ ldm(ia_w, sp, r4.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.src0 = 0x01020304;
+ t.src1 = 0x11121314;
+ t.src2 = 0x21222324;
+ t.src3 = 0x31323334;
+ t.src4 = 0x41424344;
+ t.src5 = 0x51525354;
+ t.src6 = 0x61626364;
+ t.src7 = 0x71727374;
+ t.dst0 = 0;
+ t.dst1 = 0;
+ t.dst2 = 0;
+ t.dst3 = 0;
+ t.dst4 = 0;
+ t.dst5 = 0;
+ t.dst6 = 0;
+ t.dst7 = 0;
+ t.srcA0 = 0x41424344;
+ t.srcA1 = 0x81828384;
+ t.dstA0 = 0;
+ t.dstA1 = 0;
+ t.dstA2 = 0;
+ t.dstA3 = 0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(0x01020304, t.dst0);
+ CHECK_EQ(0x11121314, t.dst1);
+ CHECK_EQ(0x21222324, t.dst2);
+ CHECK_EQ(0x31323334, t.dst3);
+ CHECK_EQ(0x41424344, t.dst4);
+ CHECK_EQ(0x51525354, t.dst5);
+ CHECK_EQ(0x61626364, t.dst6);
+ CHECK_EQ(0x71727374, t.dst7);
+ CHECK_EQ(0x00430044, t.dstA0);
+ CHECK_EQ(0x00410042, t.dstA1);
+ CHECK_EQ(0x00830084, t.dstA2);
+ CHECK_EQ(0x00810082, t.dstA3);
+ }
+}
+
+
+TEST(16) {
+ // Test the pkh, uxtb, uxtab and uxtb16 instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint32_t src0;
+ uint32_t src1;
+ uint32_t src2;
+ uint32_t dst0;
+ uint32_t dst1;
+ uint32_t dst2;
+ uint32_t dst3;
+ uint32_t dst4;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Assembler assm(isolate, NULL, 0);
+
+ __ stm(db_w, sp, r4.bit() | lr.bit());
+
+ __ mov(r4, Operand(r0));
+ __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src0)));
+ __ ldr(r1, MemOperand(r4, OFFSET_OF(T, src1)));
+
+ __ pkhbt(r2, r0, Operand(r1, LSL, 8));
+ __ str(r2, MemOperand(r4, OFFSET_OF(T, dst0)));
+
+ __ pkhtb(r2, r0, Operand(r1, ASR, 8));
+ __ str(r2, MemOperand(r4, OFFSET_OF(T, dst1)));
+
+ __ uxtb16(r2, Operand(r0, ROR, 8));
+ __ str(r2, MemOperand(r4, OFFSET_OF(T, dst2)));
+
+ __ uxtb(r2, Operand(r0, ROR, 8));
+ __ str(r2, MemOperand(r4, OFFSET_OF(T, dst3)));
+
+ __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src2)));
+ __ uxtab(r2, r0, Operand(r1, ROR, 8));
+ __ str(r2, MemOperand(r4, OFFSET_OF(T, dst4)));
+
+ __ ldm(ia_w, sp, r4.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.src0 = 0x01020304;
+ t.src1 = 0x11121314;
+ t.src2 = 0x11121300;
+ t.dst0 = 0;
+ t.dst1 = 0;
+ t.dst2 = 0;
+ t.dst3 = 0;
+ t.dst4 = 0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(0x12130304, t.dst0);
+ CHECK_EQ(0x01021213, t.dst1);
+ CHECK_EQ(0x00010003, t.dst2);
+ CHECK_EQ(0x00000003, t.dst3);
+ CHECK_EQ(0x11121313, t.dst4);
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 880370f0f8..76eecc02e7 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -473,6 +473,95 @@ TEST(AssemblerMultiByteNop) {
}
+#ifdef __GNUC__
+#define ELEMENT_COUNT 4
+
+void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+
+ CHECK(args[0]->IsArray());
+ v8::Local<v8::Array> vec = v8::Local<v8::Array>::Cast(args[0]);
+ CHECK_EQ(ELEMENT_COUNT, vec->Length());
+
+ v8::internal::byte buffer[256];
+ Assembler assm(isolate, buffer, sizeof buffer);
+
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatureScope fscope(&assm, SSE2);
+
+ // Remove return address from the stack for fix stack frame alignment.
+ __ pop(ecx);
+
+ // Store input vector on the stack.
+ for (int i = 0; i < ELEMENT_COUNT; ++i) {
+ __ push(Immediate(vec->Get(i)->Int32Value()));
+ }
+
+ // Read vector into a xmm register.
+ __ pxor(xmm0, xmm0);
+ __ movdqa(xmm0, Operand(esp, 0));
+ // Create mask and store it in the return register.
+ __ movmskps(eax, xmm0);
+
+ // Remove unused data from the stack.
+ __ add(esp, Immediate(ELEMENT_COUNT * sizeof(int32_t)));
+ // Restore return address.
+ __ push(ecx);
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ int res = f();
+ args.GetReturnValue().Set(v8::Integer::New(res));
+}
+
+
+TEST(StackAlignmentForSSE2) {
+ CcTest::InitializeVM();
+ if (!CpuFeatures::IsSupported(SSE2)) return;
+
+ CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
+
+ LocalContext env(NULL, global_template);
+ CompileRun(
+ "function foo(vec) {"
+ " return do_sse2(vec);"
+ "}");
+
+ v8::Local<v8::Object> global_object = env->Global();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+
+ int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
+ v8::Local<v8::Array> v8_vec = v8::Array::New(ELEMENT_COUNT);
+ for (int i = 0; i < ELEMENT_COUNT; i++) {
+ v8_vec->Set(i, v8_num(vec[i]));
+ }
+
+ v8::Local<v8::Value> args[] = { v8_vec };
+ v8::Local<v8::Value> result = foo->Call(global_object, 1, args);
+
+ // The mask should be 0b1000.
+ CHECK_EQ(8, result->Int32Value());
+}
+
+#undef ELEMENT_COUNT
+#endif // __GNUC__
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 669475ad8a..d5aaf4f212 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -62,6 +62,7 @@ using v8::internal::rdx;
using v8::internal::rsi;
using v8::internal::rsp;
using v8::internal::times_1;
+using v8::internal::xmm0;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@@ -110,6 +111,7 @@ TEST(AssemblerX64ReturnOperation) {
CHECK_EQ(2, result);
}
+
TEST(AssemblerX64StackOperations) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -142,6 +144,7 @@ TEST(AssemblerX64StackOperations) {
CHECK_EQ(2, result);
}
+
TEST(AssemblerX64ArithmeticOperations) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -164,6 +167,7 @@ TEST(AssemblerX64ArithmeticOperations) {
CHECK_EQ(5, result);
}
+
TEST(AssemblerX64ImulOperation) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -192,6 +196,7 @@ TEST(AssemblerX64ImulOperation) {
CHECK_EQ(-1, result);
}
+
TEST(AssemblerX64MemoryOperands) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -226,6 +231,7 @@ TEST(AssemblerX64MemoryOperands) {
CHECK_EQ(3, result);
}
+
TEST(AssemblerX64ControlFlow) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -255,6 +261,7 @@ TEST(AssemblerX64ControlFlow) {
CHECK_EQ(3, result);
}
+
TEST(AssemblerX64LoopImmediates) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -429,6 +436,92 @@ TEST(AssemblerMultiByteNop) {
}
+#ifdef __GNUC__
+#define ELEMENT_COUNT 4
+
+void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::internal::byte buffer[1024];
+
+ CHECK(args[0]->IsArray());
+ v8::Local<v8::Array> vec = v8::Local<v8::Array>::Cast(args[0]);
+ CHECK_EQ(ELEMENT_COUNT, vec->Length());
+
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof(buffer));
+
+ // Remove return address from the stack for fix stack frame alignment.
+ __ pop(rcx);
+
+ // Store input vector on the stack.
+ for (int i = 0; i < ELEMENT_COUNT; i++) {
+ __ movl(rax, Immediate(vec->Get(i)->Int32Value()));
+ __ shl(rax, Immediate(0x20));
+ __ or_(rax, Immediate(vec->Get(++i)->Int32Value()));
+ __ push(rax);
+ }
+
+ // Read vector into a xmm register.
+ __ xorps(xmm0, xmm0);
+ __ movdqa(xmm0, Operand(rsp, 0));
+ // Create mask and store it in the return register.
+ __ movmskps(rax, xmm0);
+
+ // Remove unused data from the stack.
+ __ addq(rsp, Immediate(ELEMENT_COUNT * sizeof(int32_t)));
+ // Restore return address.
+ __ push(rcx);
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ v8::internal::Handle<Code>())->ToObjectChecked());
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ int res = f();
+ args.GetReturnValue().Set(v8::Integer::New(res));
+}
+
+
+TEST(StackAlignmentForSSE2) {
+ CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
+
+ LocalContext env(NULL, global_template);
+ CompileRun(
+ "function foo(vec) {"
+ " return do_sse2(vec);"
+ "}");
+
+ v8::Local<v8::Object> global_object = env->Global();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+
+ int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
+ v8::Local<v8::Array> v8_vec = v8::Array::New(ELEMENT_COUNT);
+ for (int i = 0; i < ELEMENT_COUNT; i++) {
+ v8_vec->Set(i, v8_num(vec[i]));
+ }
+
+ v8::Local<v8::Value> args[] = { v8_vec };
+ v8::Local<v8::Value> result = foo->Call(global_object, 1, args);
+
+ // The mask should be 0b1000.
+ CHECK_EQ(8, result->Int32Value());
+}
+
+#undef ELEMENT_COUNT
+#endif // __GNUC__
#undef __
diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc
index 12b593f59f..4d7856e276 100644
--- a/deps/v8/test/cctest/test-circular-queue.cc
+++ b/deps/v8/test/cctest/test-circular-queue.cc
@@ -42,8 +42,6 @@ TEST(SamplingCircularQueue) {
3);
// Check that we are using non-reserved values.
- CHECK_NE(SamplingCircularQueue::kClear, 1);
- CHECK_NE(SamplingCircularQueue::kEnd, 1);
// Fill up the first chunk.
CHECK_EQ(NULL, scq.StartDequeue());
for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
@@ -153,8 +151,6 @@ TEST(SamplingCircularQueueMultithreading) {
scq.FlushResidualRecords();
// Check that we are using non-reserved values.
- CHECK_NE(SamplingCircularQueue::kClear, 1);
- CHECK_NE(SamplingCircularQueue::kEnd, 1);
ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
new file mode 100644
index 0000000000..6f8de60471
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -0,0 +1,181 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+using namespace v8::internal;
+
+#define __ assm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ assm.set_allow_stub_calls(false);
+ int offset =
+ source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+ DoubleToIStub stub(source_reg, destination_reg, offset, true);
+ byte* start = stub.GetCode(isolate)->instruction_start();
+
+ __ push(ebx);
+ __ push(ecx);
+ __ push(edx);
+ __ push(esi);
+ __ push(edi);
+
+ if (!source_reg.is(esp)) {
+ __ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
+ }
+
+ int param_offset = 7 * kPointerSize;
+ // Save registers make sure they don't get clobbered.
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+ __ push(reg);
+ param_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument
+ __ push(MemOperand(esp, param_offset));
+ __ push(MemOperand(esp, param_offset));
+
+ // Call through to the actual stub
+ __ call(start, RelocInfo::EXTERNAL_REFERENCE);
+
+ __ add(esp, Immediate(kDoubleSize));
+
+ // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+ __ cmp(reg, MemOperand(esp, 0));
+ __ Assert(equal, "register was clobbered");
+ __ add(esp, Immediate(kPointerSize));
+ }
+ }
+
+ __ mov(eax, destination_reg);
+
+ __ pop(edi);
+ __ pop(esi);
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(ebx);
+
+ __ ret(kDoubleSize);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ return reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, esi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, esi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, esi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, esi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, esi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, esi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, eax));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, ebx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, ecx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, edx));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, edi));
+ RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, esi));
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
new file mode 100644
index 0000000000..e30c160e7a
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -0,0 +1,149 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+using namespace v8::internal;
+
+
+#define __ assm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ assm.set_allow_stub_calls(false);
+ int offset =
+ source_reg.is(rsp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+ DoubleToIStub stub(source_reg, destination_reg, offset, true);
+ byte* start = stub.GetCode(isolate)->instruction_start();
+
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rdx);
+ __ push(rsi);
+ __ push(rdi);
+
+ if (!source_reg.is(rsp)) {
+ __ lea(source_reg, MemOperand(rsp, -8 * kPointerSize - offset));
+ }
+
+ int param_offset = 7 * kPointerSize;
+ // Save registers make sure they don't get clobbered.
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
+ __ push(reg);
+ param_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(MemOperand(rsp, 0), xmm0);
+
+ // Call through to the actual stub
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+
+ __ addq(rsp, Immediate(kDoubleSize));
+
+ // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
+ __ cmpq(reg, MemOperand(rsp, 0));
+ __ Assert(equal, "register was clobbered");
+ __ addq(rsp, Immediate(kPointerSize));
+ }
+ }
+
+ __ movq(rax, destination_reg);
+
+ __ pop(rdi);
+ __ pop(rsi);
+ __ pop(rdx);
+ __ pop(rcx);
+ __ pop(rbx);
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ return reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {rsp, rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
+ Register dest_registers[] = {rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
+
+ for (size_t s = 0; s < sizeof(*source_registers); s++) {
+ for (size_t d = 0; d < sizeof(*dest_registers); d++) {
+ RunAllTruncationTests(
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d]));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
new file mode 100644
index 0000000000..405069626b
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -0,0 +1,130 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+using namespace v8::internal;
+
+
+int STDCALL ConvertDToICVersion(double d) {
+ Address double_ptr = reinterpret_cast<Address>(&d);
+ uint32_t exponent_bits = Memory::uint32_at(double_ptr + kDoubleSize / 2);
+ int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
+ int32_t exponent = (((exponent_bits & shifted_mask) >>
+ (Double::kPhysicalSignificandSize - 32)) -
+ HeapNumber::kExponentBias);
+ uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
+ int result = 0;
+ uint32_t max_exponent =
+ static_cast<uint32_t>(Double::kPhysicalSignificandSize);
+ if (unsigned_exponent >= max_exponent) {
+ if ((exponent - Double::kPhysicalSignificandSize) < 32) {
+ result = Memory::uint32_at(double_ptr) <<
+ (exponent - Double::kPhysicalSignificandSize);
+ }
+ } else {
+ uint64_t big_result =
+ (BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
+ big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
+ result = static_cast<uint32_t>(big_result);
+ }
+ if (static_cast<int32_t>(exponent_bits) < 0) {
+ return (0 - result);
+ } else {
+ return result;
+ }
+}
+
+
+void RunOneTruncationTestWithTest(ConvertDToIFunc func,
+ double from,
+ double raw) {
+ uint64_t to = static_cast<int64_t>(raw);
+ int result = (*func)(from);
+ CHECK_EQ(static_cast<int>(to), result);
+}
+
+
+// #define NaN and Infinity so that it's possible to cut-and-paste these tests
+// directly to a .js file and run them.
+#define NaN (OS::nan_value())
+#define Infinity (std::numeric_limits<double>::infinity())
+#define RunOneTruncationTest(p1, p2) RunOneTruncationTestWithTest(func, p1, p2)
+
+void RunAllTruncationTests(ConvertDToIFunc func) {
+ RunOneTruncationTest(0, 0);
+ RunOneTruncationTest(0.5, 0);
+ RunOneTruncationTest(-0.5, 0);
+ RunOneTruncationTest(1.5, 1);
+ RunOneTruncationTest(-1.5, -1);
+ RunOneTruncationTest(5.5, 5);
+ RunOneTruncationTest(-5.0, -5);
+ RunOneTruncationTest(NaN, 0);
+ RunOneTruncationTest(Infinity, 0);
+ RunOneTruncationTest(-NaN, 0);
+ RunOneTruncationTest(-Infinity, 0);
+
+ RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(-4.5036e+15, -372629504);
+
+ RunOneTruncationTest(4503603922337791.0, -1);
+ RunOneTruncationTest(-4503603922337791.0, 1);
+ RunOneTruncationTest(4503601774854143.0, 2147483647);
+ RunOneTruncationTest(-4503601774854143.0, -2147483647);
+ RunOneTruncationTest(9007207844675582.0, -2);
+ RunOneTruncationTest(-9007207844675582.0, 2);
+
+ RunOneTruncationTest(2.4178527921507624e+24, -536870912);
+ RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
+ RunOneTruncationTest(2.417853945072267e+24, -536870912);
+ RunOneTruncationTest(-2.417853945072267e+24, 536870912);
+
+ RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
+ RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
+ RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
+ RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+
+ RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
+ RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
+ RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
+ RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+}
+
+#undef NaN
+#undef Infinity
+#undef RunOneTruncationTest
diff --git a/deps/v8/test/cctest/test-code-stubs.h b/deps/v8/test/cctest/test-code-stubs.h
new file mode 100644
index 0000000000..eab8e63b2a
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TEST_CODE_STUBS_H_
+#define V8_TEST_CODE_STUBS_H_
+
+#if V8_TARGET_ARCH_IA32
+#if __GNUC__
+#define STDCALL __attribute__((stdcall))
+#else
+#define STDCALL __stdcall
+#endif
+#else
+#define STDCALL
+#endif
+
+typedef int32_t STDCALL ConvertDToIFuncType(double input);
+typedef ConvertDToIFuncType* ConvertDToIFunc;
+
+int STDCALL ConvertDToICVersion(double d);
+
+void RunAllTruncationTests(ConvertDToIFunc func);
+
+#endif
diff --git a/deps/v8/test/cctest/test-compare-nil-ic-stub.cc b/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
deleted file mode 100644
index 258ab012c7..0000000000
--- a/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2006-2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "v8.h"
-#include "cctest.h"
-#include "code-stubs.h"
-
-
-using namespace v8::internal;
-
-typedef CompareNilICStub::State State;
-
-TEST(StateConstructors) {
- State state;
- state.Add(CompareNilICStub::MONOMORPHIC_MAP);
- State state2(state);
- CHECK_EQ(state.ToIntegral(), state2.ToIntegral());
-}
-
-TEST(ExternalICStateParsing) {
- State state;
- state.Add(CompareNilICStub::UNDEFINED);
- CompareNilICStub stub(kUndefinedValue, state);
- CompareNilICStub stub2(stub.GetExtraICState());
- CHECK_EQ(stub.GetNilValue(), stub2.GetNilValue());
- CHECK_EQ(stub.GetState().ToIntegral(), stub2.GetState().ToIntegral());
-}
-
-TEST(SettingState) {
- State state;
- CHECK(state.IsEmpty());
- state.Add(CompareNilICStub::NULL_TYPE);
- CHECK(!state.IsEmpty());
- CHECK(state.Contains(CompareNilICStub::NULL_TYPE));
- CHECK(!state.Contains(CompareNilICStub::UNDEFINED));
- CHECK(!state.Contains(CompareNilICStub::UNDETECTABLE));
- state.Add(CompareNilICStub::UNDEFINED);
- CHECK(state.Contains(CompareNilICStub::UNDEFINED));
- CHECK(state.Contains(CompareNilICStub::NULL_TYPE));
- CHECK(!state.Contains(CompareNilICStub::UNDETECTABLE));
-}
-
-TEST(ClearState) {
- State state;
- state.Add(CompareNilICStub::NULL_TYPE);
- state.RemoveAll();
- CHECK(state.IsEmpty());
-}
-
-TEST(Generic) {
- State state;
- CHECK(State::Generic() != state);
- state.Add(CompareNilICStub::UNDEFINED);
- CHECK(state != State::Generic());
- state.Add(CompareNilICStub::NULL_TYPE);
- CHECK(state != State::Generic());
- state.Add(CompareNilICStub::UNDETECTABLE);
- CHECK(state != State::Generic());
- state.Add(CompareNilICStub::GENERIC);
- CHECK(state == State::Generic());
-}
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index cf2092e4d3..9e194eafff 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -37,12 +37,14 @@ using namespace v8::internal;
TEST(Hex) {
UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0x0", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(1.0, StringToDouble(&uc, "0x1", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(16.0, StringToDouble(&uc, "0x10", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(255.0, StringToDouble(&uc, "0xff", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(175.0, StringToDouble(&uc, "0xAF", ALLOW_HEX | ALLOW_OCTALS));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0x0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble(&uc, "0x1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(16.0, StringToDouble(&uc, "0x10", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(255.0, StringToDouble(&uc, "0xff",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(175.0, StringToDouble(&uc, "0xAF",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
CHECK_EQ(0.0, StringToDouble(&uc, "0x0", ALLOW_HEX));
CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX));
@@ -55,12 +57,32 @@ TEST(Hex) {
TEST(Octal) {
UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "00", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(1.0, StringToDouble(&uc, "01", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(7.0, StringToDouble(&uc, "07", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(8.0, StringToDouble(&uc, "010", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(63.0, StringToDouble(&uc, "077", ALLOW_HEX | ALLOW_OCTALS));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0o0", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0O0", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble(&uc, "0o1", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(7.0, StringToDouble(&uc, "0o7", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(8.0, StringToDouble(&uc, "0o10",
+ ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(63.0, StringToDouble(&uc, "0o77",
+ ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.0, StringToDouble(&uc, "0o0", ALLOW_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0O0", ALLOW_OCTAL));
+ CHECK_EQ(1.0, StringToDouble(&uc, "0o1", ALLOW_OCTAL));
+ CHECK_EQ(7.0, StringToDouble(&uc, "0o7", ALLOW_OCTAL));
+ CHECK_EQ(8.0, StringToDouble(&uc, "0o10", ALLOW_OCTAL));
+ CHECK_EQ(63.0, StringToDouble(&uc, "0o77", ALLOW_OCTAL));
+}
+
+
+TEST(ImplicitOctal) {
+ UnicodeCache uc;
+ CHECK_EQ(0.0, StringToDouble(&uc, "0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "00", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble(&uc, "01", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(7.0, StringToDouble(&uc, "07", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(8.0, StringToDouble(&uc, "010", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(63.0, StringToDouble(&uc, "077", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
CHECK_EQ(0.0, StringToDouble(&uc, "0", ALLOW_HEX));
CHECK_EQ(0.0, StringToDouble(&uc, "00", ALLOW_HEX));
@@ -71,26 +93,53 @@ TEST(Octal) {
const double x = 010000000000; // Power of 2, no rounding errors.
CHECK_EQ(x * x * x * x * x, StringToDouble(&uc, "01" "0000000000" "0000000000"
- "0000000000" "0000000000" "0000000000", ALLOW_OCTALS));
+ "0000000000" "0000000000" "0000000000", ALLOW_IMPLICIT_OCTAL));
}
-TEST(MalformedOctal) {
+TEST(Binary) {
UnicodeCache uc;
- CHECK_EQ(8.0, StringToDouble(&uc, "08", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(81.0, StringToDouble(&uc, "081", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(78.0, StringToDouble(&uc, "078", ALLOW_HEX | ALLOW_OCTALS));
-
- CHECK(std::isnan(StringToDouble(&uc, "07.7", ALLOW_HEX | ALLOW_OCTALS)));
- CHECK(std::isnan(StringToDouble(&uc, "07.8", ALLOW_HEX | ALLOW_OCTALS)));
- CHECK(std::isnan(StringToDouble(&uc, "07e8", ALLOW_HEX | ALLOW_OCTALS)));
- CHECK(std::isnan(StringToDouble(&uc, "07e7", ALLOW_HEX | ALLOW_OCTALS)));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0b0",
+ ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0B0",
+ ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble(&uc, "0b1",
+ ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(2.0, StringToDouble(&uc, "0b10",
+ ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(3.0, StringToDouble(&uc, "0b11",
+ ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.0, StringToDouble(&uc, "0b0", ALLOW_BINARY));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0B0", ALLOW_BINARY));
+ CHECK_EQ(1.0, StringToDouble(&uc, "0b1", ALLOW_BINARY));
+ CHECK_EQ(2.0, StringToDouble(&uc, "0b10", ALLOW_BINARY));
+ CHECK_EQ(3.0, StringToDouble(&uc, "0b11", ALLOW_BINARY));
+}
- CHECK_EQ(8.7, StringToDouble(&uc, "08.7", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(8e7, StringToDouble(&uc, "08e7", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.001, StringToDouble(&uc, "0.001", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.713, StringToDouble(&uc, "0.713", ALLOW_HEX | ALLOW_OCTALS));
+TEST(MalformedOctal) {
+ UnicodeCache uc;
+ CHECK_EQ(8.0, StringToDouble(&uc, "08", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(81.0, StringToDouble(&uc, "081", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(78.0, StringToDouble(&uc, "078", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK(std::isnan(StringToDouble(&uc, "07.7",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+ CHECK(std::isnan(StringToDouble(&uc, "07.8",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+ CHECK(std::isnan(StringToDouble(&uc, "07e8",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+ CHECK(std::isnan(StringToDouble(&uc, "07e7",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+
+ CHECK_EQ(8.7, StringToDouble(&uc, "08.7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(8e7, StringToDouble(&uc, "08e7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.001, StringToDouble(&uc, "0.001",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.713, StringToDouble(&uc, "0.713",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
CHECK_EQ(8.0, StringToDouble(&uc, "08", ALLOW_HEX));
CHECK_EQ(81.0, StringToDouble(&uc, "081", ALLOW_HEX));
@@ -112,12 +161,12 @@ TEST(MalformedOctal) {
TEST(TrailingJunk) {
UnicodeCache uc;
CHECK_EQ(8.0, StringToDouble(&uc, "8q", ALLOW_TRAILING_JUNK));
- CHECK_EQ(63.0,
- StringToDouble(&uc, "077qqq", ALLOW_OCTALS | ALLOW_TRAILING_JUNK));
- CHECK_EQ(10.0,
- StringToDouble(&uc, "10e", ALLOW_OCTALS | ALLOW_TRAILING_JUNK));
- CHECK_EQ(10.0,
- StringToDouble(&uc, "10e-", ALLOW_OCTALS | ALLOW_TRAILING_JUNK));
+ CHECK_EQ(63.0, StringToDouble(&uc, "077qqq",
+ ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
+ CHECK_EQ(10.0, StringToDouble(&uc, "10e",
+ ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
+ CHECK_EQ(10.0, StringToDouble(&uc, "10e-",
+ ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
}
@@ -130,6 +179,7 @@ TEST(NonStrDecimalLiteral) {
CHECK_EQ(0.0, StringToDouble(&uc, " ", NO_FLAGS));
}
+
TEST(IntegerStrLiteral) {
UnicodeCache uc;
CHECK_EQ(0.0, StringToDouble(&uc, "0.0", NO_FLAGS));
@@ -143,14 +193,17 @@ TEST(IntegerStrLiteral) {
CHECK(std::isnan(StringToDouble(&uc, " - 1 ", NO_FLAGS)));
CHECK(std::isnan(StringToDouble(&uc, " + 1 ", NO_FLAGS)));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e0", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e1", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e-1", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e-100000", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e+100000", ALLOW_HEX | ALLOW_OCTALS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0.", ALLOW_HEX | ALLOW_OCTALS));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0e0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0e1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0e-1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0e-100000",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0e+100000",
+ ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble(&uc, "0.", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
}
+
TEST(LongNumberStr) {
UnicodeCache uc;
CHECK_EQ(1e10, StringToDouble(&uc, "1" "0000000000", NO_FLAGS));
@@ -204,6 +257,7 @@ TEST(MaximumSignificantDigits) {
CHECK_EQ(4.4501477170144022721148e-308, StringToDouble(&uc, num, NO_FLAGS));
}
+
TEST(MinimumExponent) {
UnicodeCache uc;
// Same test but with different point-position.
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e59687b215..eb72550c2c 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -27,24 +27,21 @@
//
// Tests of profiles generator and utilities.
-#define V8_DISABLE_DEPRECATIONS 1
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
#include "platform.h"
#include "utils.h"
#include "../include/v8-profiler.h"
-#undef V8_DISABLE_DEPRECATIONS
-
using i::CodeEntry;
using i::CpuProfile;
using i::CpuProfiler;
using i::CpuProfilesCollection;
+using i::Heap;
using i::ProfileGenerator;
using i::ProfileNode;
using i::ProfilerEventsProcessor;
using i::ScopedVector;
-using i::TokenEnumerator;
using i::Vector;
@@ -53,10 +50,11 @@ TEST(StartStop) {
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
processor.Start();
- processor.Stop();
+ processor.StopSynchronously();
processor.Join();
}
+
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
@@ -160,7 +158,7 @@ TEST(CodeEvents) {
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(&processor, aaa_code->address());
- processor.Stop();
+ processor.StopSynchronously();
processor.Join();
// Check the state of profile generator.
@@ -189,6 +187,7 @@ static int CompareProfileNodes(const T* p1, const T* p2) {
return strcmp((*p1)->entry()->name(), (*p2)->entry()->name());
}
+
TEST(TickEvents) {
TestSetup test_setup;
LocalContext env;
@@ -221,10 +220,9 @@ TEST(TickEvents) {
frame2_code->instruction_end() - 1,
frame1_code->instruction_end() - 1);
- processor.Stop();
+ processor.StopSynchronously();
processor.Join();
- CpuProfile* profile =
- profiles->StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
+ CpuProfile* profile = profiles->StopProfiling("", 1);
CHECK_NE(NULL, profile);
// Check call trees.
@@ -286,10 +284,9 @@ TEST(Issue1398) {
sample->stack[i] = code->address();
}
- processor.Stop();
+ processor.StopSynchronously();
processor.Join();
- CpuProfile* profile =
- profiles->StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
+ CpuProfile* profile = profiles->StopProfiling("", 1);
CHECK_NE(NULL, profile);
int actual_depth = 0;
@@ -393,63 +390,6 @@ TEST(DeleteCpuProfile) {
}
-TEST(DeleteCpuProfileDifferentTokens) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
-
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
- v8::Local<v8::String> name1 = v8::String::New("1");
- cpu_profiler->StartCpuProfiling(name1);
- const v8::CpuProfile* p1 = cpu_profiler->StopCpuProfiling(name1);
- CHECK_NE(NULL, p1);
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- unsigned uid1 = p1->GetUid();
- CHECK_EQ(p1, cpu_profiler->FindCpuProfile(uid1));
- v8::Local<v8::String> token1 = v8::String::New("token1");
- const v8::CpuProfile* p1_t1 = cpu_profiler->FindCpuProfile(uid1, token1);
- CHECK_NE(NULL, p1_t1);
- CHECK_NE(p1, p1_t1);
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- const_cast<v8::CpuProfile*>(p1)->Delete();
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid1));
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid1, token1));
- const_cast<v8::CpuProfile*>(p1_t1)->Delete();
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
-
- v8::Local<v8::String> name2 = v8::String::New("2");
- cpu_profiler->StartCpuProfiling(name2);
- v8::Local<v8::String> token2 = v8::String::New("token2");
- const v8::CpuProfile* p2_t2 = cpu_profiler->StopCpuProfiling(name2, token2);
- CHECK_NE(NULL, p2_t2);
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- unsigned uid2 = p2_t2->GetUid();
- CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
- const v8::CpuProfile* p2 = cpu_profiler->FindCpuProfile(uid2);
- CHECK_NE(p2_t2, p2);
- v8::Local<v8::String> name3 = v8::String::New("3");
- cpu_profiler->StartCpuProfiling(name3);
- const v8::CpuProfile* p3 = cpu_profiler->StopCpuProfiling(name3);
- CHECK_NE(NULL, p3);
- CHECK_EQ(2, cpu_profiler->GetProfileCount());
- unsigned uid3 = p3->GetUid();
- CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid3));
- CHECK_EQ(p3, cpu_profiler->FindCpuProfile(uid3));
- const_cast<v8::CpuProfile*>(p2_t2)->Delete();
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid2));
- CHECK_EQ(p3, cpu_profiler->FindCpuProfile(uid3));
- const_cast<v8::CpuProfile*>(p2)->Delete();
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid2));
- CHECK_EQ(p3, cpu_profiler->FindCpuProfile(uid3));
- const_cast<v8::CpuProfile*>(p3)->Delete();
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid3));
-}
-
-
TEST(GetProfilerWhenIsolateIsNotInitialized) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
CHECK(i::Isolate::Current()->IsDefaultIsolate());
@@ -470,6 +410,33 @@ TEST(GetProfilerWhenIsolateIsNotInitialized) {
}
+static const v8::CpuProfile* RunProfiler(
+ LocalContext& env, v8::Handle<v8::Function> function,
+ v8::Handle<v8::Value> argv[], int argc,
+ unsigned min_js_samples) {
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+
+ i::Sampler* sampler =
+ reinterpret_cast<i::Isolate*>(env->GetIsolate())->logger()->sampler();
+ sampler->StartCountingSamples();
+ do {
+ function->Call(env->Global(), argc, argv);
+ } while (sampler->js_and_external_sample_count() < min_js_samples);
+
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ return profile;
+}
+
+
static bool ContainsString(v8::Handle<v8::String> string,
const Vector<v8::Handle<v8::String> >& vector) {
for (int i = 0; i < vector.length(); i++) {
@@ -583,24 +550,11 @@ TEST(CollectCpuProfile) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t profiling_interval_ms = 200;
-#if defined(_WIN32) || defined(_WIN64)
- // 200ms is not enough on Windows. See
- // https://code.google.com/p/v8/issues/detail?id=2628
- profiling_interval_ms = 500;
-#endif
v8::Handle<v8::Value> args[] = { v8::Integer::New(profiling_interval_ms) };
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 200);
function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
@@ -623,6 +577,7 @@ TEST(CollectCpuProfile) {
const char* delayBranch[] = { "delay", "loop" };
CheckSimpleBranch(fooNode, delayBranch, ARRAY_SIZE(delayBranch));
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -656,23 +611,14 @@ TEST(SampleWhenFrameIsNotSetup) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t repeat_count = 100;
#if defined(USE_SIMULATOR)
// Simulators are much slower.
repeat_count = 1;
#endif
v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
@@ -694,6 +640,7 @@ TEST(SampleWhenFrameIsNotSetup) {
}
}
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -712,16 +659,15 @@ class TestApiCallbacks {
: min_duration_ms_(min_duration_ms),
is_warming_up_(false) {}
- static v8::Handle<v8::Value> Getter(v8::Local<v8::String> name,
- const v8::AccessorInfo& info) {
+ static void Getter(v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
TestApiCallbacks* data = fromInfo(info);
data->Wait();
- return v8::Int32::New(2013);
}
static void Setter(v8::Local<v8::String> name,
v8::Local<v8::Value> value,
- const v8::AccessorInfo& info) {
+ const v8::PropertyCallbackInfo<void>& info) {
TestApiCallbacks* data = fromInfo(info);
data->Wait();
}
@@ -744,13 +690,8 @@ class TestApiCallbacks {
}
}
- static TestApiCallbacks* fromInfo(const v8::AccessorInfo& info) {
- void* data = v8::External::Cast(*info.Data())->Value();
- return reinterpret_cast<TestApiCallbacks*>(data);
- }
-
- static TestApiCallbacks* fromInfo(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+ template<typename T>
+ static TestApiCallbacks* fromInfo(const T& info) {
void* data = v8::External::Cast(*info.Data())->Value();
return reinterpret_cast<TestApiCallbacks*>(data);
}
@@ -786,25 +727,17 @@ TEST(NativeAccessorUninitializedIC) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t repeat_count = 1;
v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 180);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* startNode = GetChild(root, "start");
GetChild(startNode, "get foo");
GetChild(startNode, "set foo");
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -844,25 +777,17 @@ TEST(NativeAccessorMonomorphicIC) {
accessors.set_warming_up(false);
}
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t repeat_count = 100;
v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* startNode = GetChild(root, "start");
GetChild(startNode, "get foo");
GetChild(startNode, "set foo");
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -897,24 +822,16 @@ TEST(NativeMethodUninitializedIC) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t repeat_count = 1;
v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* startNode = GetChild(root, "start");
GetChild(startNode, "fooMethod");
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -951,25 +868,17 @@ TEST(NativeMethodMonomorphicIC) {
callbacks.set_warming_up(false);
}
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t repeat_count = 100;
v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
GetChild(root, "start");
const v8::CpuProfileNode* startNode = GetChild(root, "start");
GetChild(startNode, "fooMethod");
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -996,19 +905,10 @@ TEST(BoundFunctionCall) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t duration_ms = 100;
v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
@@ -1021,6 +921,7 @@ TEST(BoundFunctionCall) {
const v8::CpuProfileNode* startNode = GetChild(root, "start");
GetChild(startNode, "foo");
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -1053,28 +954,17 @@ TEST(FunctionCallSample) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ // Collect garbage that might have be generated while installing extensions.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
v8::Script::Compile(v8::String::New(call_function_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t duration_ms = 100;
-#if defined(_WIN32) || defined(_WIN64)
- // 100ms is not enough on Windows. See
- // https://code.google.com/p/v8/issues/detail?id=2628
- duration_ms = 400;
-#endif
v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
-
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
@@ -1108,6 +998,7 @@ TEST(FunctionCallSample) {
CheckChildrenNames(unresolvedNode, names);
}
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
@@ -1145,24 +1036,11 @@ TEST(FunctionApplySample) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("start")));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
-
- cpu_profiler->StartCpuProfiling(profile_name);
int32_t duration_ms = 100;
-#if defined(_WIN32) || defined(_WIN64)
- // 100ms is not enough on Windows. See
- // https://code.google.com/p/v8/issues/detail?id=2628
- duration_ms = 400;
-#endif
v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
- function->Call(env->Global(), ARRAY_SIZE(args), args);
- const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
- CHECK_NE(NULL, profile);
- // Dump collected profile to have a better diagnostic in case of failure.
- reinterpret_cast<i::CpuProfile*>(
- const_cast<v8::CpuProfile*>(profile))->Print();
+ const v8::CpuProfile* profile =
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
@@ -1200,5 +1078,6 @@ TEST(FunctionApplySample) {
}
}
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->DeleteAllCpuProfiles();
}
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 0152bb7a81..6336481dcc 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -107,6 +107,7 @@ static int64_t TimeFromYearMonthDay(DateCache* date_cache,
return (result + day - 1) * DateCache::kMsPerDay;
}
+
static void CheckDST(int64_t time) {
Isolate* isolate = Isolate::Current();
DateCache* date_cache = isolate->date_cache();
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 788f7af437..484eb8e3fb 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -29,6 +29,7 @@
#include <stdlib.h>
+#define V8_DISABLE_DEPRECATIONS 1
#include "v8.h"
#include "api.h"
@@ -39,6 +40,7 @@
#include "platform.h"
#include "stub-cache.h"
#include "utils.h"
+#undef V8_DISABLE_DEPRECATIONS
using ::v8::internal::EmbeddedVector;
@@ -820,6 +822,8 @@ struct EvaluateCheck {
const char* expr; // An expression to evaluate when a break point is hit.
v8::Handle<v8::Value> expected; // The expected result.
};
+
+
// Array of checks to do.
struct EvaluateCheck* checks = NULL;
// Source for The JavaScript function which can do the evaluation when a break
@@ -1393,6 +1397,7 @@ static void CallWithBreakPoints(v8::Local<v8::Object> recv,
}
}
+
// Test GC during break point processing.
TEST(GCDuringBreakPointProcessing) {
break_point_hit_count = 0;
@@ -2526,6 +2531,7 @@ static void CheckDebugEvent(const v8::Debug::EventDetails& eventDetails) {
if (eventDetails.GetEvent() == v8::Break) ++debugEventCount;
}
+
// Test that the conditional breakpoints work event if code generation from
// strings is prohibited in the debugee context.
TEST(ConditionalBreakpointWithCodeGenerationDisallowed) {
@@ -2571,6 +2577,7 @@ static void CheckDebugEval(const v8::Debug::EventDetails& eventDetails) {
}
}
+
// Test that the evaluation of expressions when a break point is hit generates
// the correct results in case code generation from strings is disallowed in the
// debugee context.
@@ -2625,6 +2632,7 @@ int AsciiToUtf16(const char* input_buffer, uint16_t* output_buffer) {
return i;
}
+
// Copies a 16-bit string to a C string by dropping the high byte of
// each character. Does not check for buffer overflow.
// Can be used in any thread. Requires string length as an input.
@@ -2712,6 +2720,7 @@ static void DebugProcessDebugMessagesHandler(
}
}
+
// Test that the evaluation of expressions works even from ProcessDebugMessages
// i.e. with empty stack.
TEST(DebugEvaluateWithoutStack) {
@@ -4265,6 +4274,7 @@ TEST(NoBreakWhenBootstrapping) {
CheckDebuggerUnloaded();
}
+
static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
v8::Handle<v8::Array> result = v8::Array::New(3);
result->Set(v8::Integer::New(0), v8::String::New("a"));
@@ -4709,6 +4719,7 @@ ThreadBarrier::ThreadBarrier(int num_threads)
invalid_ = false; // A barrier may only be used once. Then it is invalid.
}
+
// Do not call, due to race condition with Wait().
// Could be resolved with Pthread condition variables.
ThreadBarrier::~ThreadBarrier() {
@@ -4717,6 +4728,7 @@ ThreadBarrier::~ThreadBarrier() {
delete sem_;
}
+
void ThreadBarrier::Wait() {
lock_->Lock();
CHECK(!invalid_);
@@ -4736,6 +4748,7 @@ void ThreadBarrier::Wait() {
}
}
+
// A set containing enough barriers and semaphores for any of the tests.
class Barriers {
public:
@@ -4845,6 +4858,7 @@ int GetSourceLineFromBreakEventMessage(char *message) {
return res;
}
+
/* Test MessageQueues */
/* Tests the message queues that hold debugger commands and
* response messages to the debugger. Fills queues and makes
@@ -4876,6 +4890,7 @@ static void MessageHandler(const uint16_t* message, int length,
message_queue_barriers.semaphore_1->Wait();
}
+
void MessageQueueDebuggerThread::Run() {
const int kBufferSize = 1000;
uint16_t buffer_1[kBufferSize];
@@ -5175,6 +5190,7 @@ void V8Thread::Run() {
CompileRun(source);
}
+
void DebuggerThread::Run() {
const int kBufSize = 1000;
uint16_t buffer[kBufSize];
@@ -5209,6 +5225,7 @@ TEST(ThreadedDebugging) {
debugger_thread.Join();
}
+
/* Test RecursiveBreakpoints */
/* In this test, the debugger evaluates a function with a breakpoint, after
* hitting a breakpoint in another function. We do this with both values
@@ -5400,6 +5417,7 @@ void BreakpointsDebuggerThread::Run() {
v8::Debug::SendCommand(buffer, AsciiToUtf16(command_8, buffer));
}
+
void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
i::FLAG_debugger_auto_break = true;
@@ -5418,10 +5436,12 @@ void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
breakpoints_debugger_thread.Join();
}
+
TEST(RecursiveBreakpoints) {
TestRecursiveBreakpointsGeneric(false);
}
+
TEST(RecursiveBreakpointsGlobal) {
TestRecursiveBreakpointsGeneric(true);
}
@@ -6270,8 +6290,6 @@ TEST(ContextData) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope scope(isolate);
- v8::Debug::SetMessageHandler2(ContextCheckMessageHandler);
-
// Create two contexts.
v8::Handle<v8::Context> context_1;
v8::Handle<v8::Context> context_2;
@@ -6281,6 +6299,8 @@ TEST(ContextData) {
context_1 = v8::Context::New(isolate, NULL, global_template, global_object);
context_2 = v8::Context::New(isolate, NULL, global_template, global_object);
+ v8::Debug::SetMessageHandler2(ContextCheckMessageHandler);
+
// Default data value is undefined.
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
CHECK(context_2->GetEmbedderData(0)->IsUndefined());
@@ -6438,7 +6458,8 @@ TEST(RegExpDebugBreak) {
// Common part of EvalContextData and NestedBreakEventContextData tests.
-static void ExecuteScriptForContextCheck() {
+static void ExecuteScriptForContextCheck(
+ v8::Debug::MessageHandler2 message_handler) {
// Create a context.
v8::Handle<v8::Context> context_1;
v8::Handle<v8::ObjectTemplate> global_template =
@@ -6446,6 +6467,8 @@ static void ExecuteScriptForContextCheck() {
context_1 =
v8::Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
+ v8::Debug::SetMessageHandler2(message_handler);
+
// Default data value is undefined.
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
@@ -6465,6 +6488,8 @@ static void ExecuteScriptForContextCheck() {
v8::Local<v8::Function> f = CompileFunction(source, "f");
f->Call(context_1->Global(), 0, NULL);
}
+
+ v8::Debug::SetMessageHandler2(NULL);
}
@@ -6474,13 +6499,11 @@ static void ExecuteScriptForContextCheck() {
// Message.GetEventContext.
TEST(EvalContextData) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Debug::SetMessageHandler2(ContextCheckMessageHandler);
- ExecuteScriptForContextCheck();
+ ExecuteScriptForContextCheck(ContextCheckMessageHandler);
// One time compile event and one time break event.
CHECK_GT(message_handler_hit_count, 2);
- v8::Debug::SetMessageHandler2(NULL);
CheckDebuggerUnloaded();
}
@@ -6539,16 +6562,14 @@ TEST(NestedBreakEventContextData) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
break_count = 0;
message_handler_hit_count = 0;
- v8::Debug::SetMessageHandler2(DebugEvalContextCheckMessageHandler);
- ExecuteScriptForContextCheck();
+ ExecuteScriptForContextCheck(DebugEvalContextCheckMessageHandler);
// One time compile event and two times break event.
CHECK_GT(message_handler_hit_count, 3);
// One break from the source and another from the evaluate request.
CHECK_EQ(break_count, 2);
- v8::Debug::SetMessageHandler2(NULL);
CheckDebuggerUnloaded();
}
@@ -6873,6 +6894,7 @@ static void CountingMessageHandler(const v8::Debug::Message& message) {
counting_message_handler_counter++;
}
+
// Test that debug messages get processed when ProcessDebugMessages is called.
TEST(ProcessDebugMessages) {
DebugLocalContext env;
@@ -7112,14 +7134,15 @@ static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
CHECK_EQ(expected_callback_data, details.GetCallbackData());
}
+
// Check that event details contain context where debug event occured.
TEST(DebugEventContext) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope scope(isolate);
expected_callback_data = v8::Int32::New(2010);
+ expected_context = v8::Context::New(isolate);
v8::Debug::SetDebugEventListener2(DebugEventContextChecker,
expected_callback_data);
- expected_context = v8::Context::New(isolate);
v8::Context::Scope context_scope(expected_context);
v8::Script::Compile(v8::String::New("(function(){debugger;})();"))->Run();
expected_context.Clear();
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 85b472d30a..9d6623ea9a 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -405,6 +405,17 @@ TEST(Type3) {
"e6ff3f94 usat r3, #31, r4, lsl #31");
COMPARE(usat(r8, 0, Operand(r5, ASR, 17)),
"e6e088d5 usat r8, #0, r5, asr #17");
+
+ COMPARE(pkhbt(r3, r4, Operand(r5, LSL, 17)),
+ "e6843895 pkhbt r3, r4, r5, lsl #17");
+ COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
+ "e68438d5 pkhtb r3, r4, r5, asr #17");
+ COMPARE(uxtb(r3, Operand(r4, ROR, 8)),
+ "e6ef3474 uxtb r3, r4, ror #8");
+ COMPARE(uxtab(r3, r4, Operand(r5, ROR, 8)),
+ "e6e43475 uxtab r3, r4, r5, ror #8");
+ COMPARE(uxtb16(r3, Operand(r4, ROR, 8)),
+ "e6cf3474 uxtb16 r3, r4, ror #8");
}
VERIFY_RUN();
@@ -662,6 +673,23 @@ TEST(Vfp) {
}
+TEST(Neon) {
+ SET_UP();
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ CpuFeatureScope scope(&assm, NEON);
+ COMPARE(vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(r1)),
+ "f421420f vld1.8 {d4, d5, d6, d7}, [r1]");
+ COMPARE(vst1(Neon16, NeonListOperand(d17, 4), NeonMemOperand(r9)),
+ "f449124f vst1.16 {d17, d18, d19, d20}, [r9]");
+ COMPARE(vmovl(NeonU8, q4, d2),
+ "f3884a12 vmovl.u8 q4, d2");
+ }
+
+ VERIFY_RUN();
+}
+
+
TEST(LoadStore) {
SET_UP();
@@ -858,6 +886,11 @@ TEST(LoadStore) {
"e1eba7ff strd r10, [fp, #+127]!");
COMPARE(strd(ip, sp, MemOperand(sp, -127, PreIndex)),
"e16dc7ff strd ip, [sp, #-127]!");
+
+ COMPARE(pld(MemOperand(r1, 0)),
+ "f5d1f000 pld [r1]");
+ COMPARE(pld(MemOperand(r2, 128)),
+ "f5d2f080 pld [r2, #+128]");
}
VERIFY_RUN();
diff --git a/deps/v8/test/cctest/test-double.cc b/deps/v8/test/cctest/test-double.cc
index 0e50bdcc8a..2c9f0c21bb 100644
--- a/deps/v8/test/cctest/test-double.cc
+++ b/deps/v8/test/cctest/test-double.cc
@@ -50,6 +50,7 @@ TEST(Uint64Conversions) {
CHECK_EQ(1.7976931348623157e308, Double(max_double64).value());
}
+
TEST(AsDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsDiyFp();
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 5973941edc..e30fcc00c9 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -127,6 +127,43 @@ static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
}
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+// Check that snapshot has no unretained entries except root.
+static bool ValidateSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
+ i::HeapSnapshot* heap_snapshot = const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+
+ i::HashMap visited(AddressesMatch);
+ i::List<i::HeapGraphEdge>& edges = heap_snapshot->edges();
+ for (int i = 0; i < edges.length(); ++i) {
+ i::HashMap::Entry* entry = visited.Lookup(
+ reinterpret_cast<void*>(edges[i].to()),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(edges[i].to())),
+ true);
+ uint32_t ref_count = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(entry->value));
+ entry->value = reinterpret_cast<void*>(ref_count + 1);
+ }
+ uint32_t unretained_entries_count = 0;
+ i::List<i::HeapEntry>& entries = heap_snapshot->entries();
+ for (int i = 0; i < entries.length(); ++i) {
+ i::HashMap::Entry* entry = visited.Lookup(
+ reinterpret_cast<void*>(&entries[i]),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&entries[i])),
+ false);
+ if (!entry && entries[i].id() != 1) {
+ entries[i].Print("entry with no retainer", "", depth, 0);
+ ++unretained_entries_count;
+ }
+ }
+ return unretained_entries_count == 0;
+}
+
+
TEST(HeapSnapshot) {
LocalContext env2;
v8::HandleScope scope(env2->GetIsolate());
@@ -141,6 +178,7 @@ TEST(HeapSnapshot) {
"var c2 = new C2(a2);");
const v8::HeapSnapshot* snapshot_env2 =
heap_profiler->TakeHeapSnapshot(v8_str("env2"));
+ CHECK(ValidateSnapshot(snapshot_env2));
const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
// Verify, that JS global object of env2 has '..2' properties.
@@ -176,6 +214,7 @@ TEST(HeapSnapshotObjectSizes) {
"(function() { x.a.a = x.b; })();");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("sizes"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* x =
GetProperty(global, v8::HeapGraphEdge::kProperty, "x");
@@ -204,6 +243,7 @@ TEST(BoundFunctionInSnapshot) {
"boundFunction = myFunction.bind(new AAAAA(), 20, new Number(12)); \n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("sizes"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* f =
GetProperty(global, v8::HeapGraphEdge::kProperty, "boundFunction");
@@ -242,6 +282,7 @@ TEST(HeapSnapshotEntryChildren) {
"a = new A;");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("children"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
for (int i = 0, count = global->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = global->GetChild(i);
@@ -269,6 +310,7 @@ TEST(HeapSnapshotCodeObjects) {
"compiled(1)");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("code"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* compiled =
@@ -332,6 +374,7 @@ TEST(HeapSnapshotHeapNumbers) {
"b = 2.5; // b is HeapNumber");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("numbers"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_EQ(NULL, GetProperty(global, v8::HeapGraphEdge::kProperty, "a"));
const v8::HeapGraphNode* b =
@@ -340,6 +383,7 @@ TEST(HeapSnapshotHeapNumbers) {
CHECK_EQ(v8::HeapGraphNode::kHeapNumber, b->GetType());
}
+
TEST(HeapSnapshotSlicedString) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -352,6 +396,7 @@ TEST(HeapSnapshotSlicedString) {
"child_string = parent_string.slice(100);");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("strings"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* parent_string =
GetProperty(global, v8::HeapGraphEdge::kProperty, "parent_string");
@@ -364,6 +409,7 @@ TEST(HeapSnapshotSlicedString) {
CHECK_EQ(parent_string, parent);
}
+
TEST(HeapSnapshotInternalReferences) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope scope(isolate);
@@ -379,6 +425,7 @@ TEST(HeapSnapshotInternalReferences) {
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("internals"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
// The first reference will not present, because it's a Smi.
CHECK_EQ(NULL, GetProperty(global_node, v8::HeapGraphEdge::kInternal, "0"));
@@ -407,6 +454,7 @@ TEST(HeapSnapshotAddressReuse) {
" a[i] = new A();\n");
const v8::HeapSnapshot* snapshot1 =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot1"));
+ CHECK(ValidateSnapshot(snapshot1));
v8::SnapshotObjectId maxId1 = snapshot1->GetMaxSnapshotJSObjectId();
CompileRun(
@@ -416,6 +464,7 @@ TEST(HeapSnapshotAddressReuse) {
const v8::HeapSnapshot* snapshot2 =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot2"));
+ CHECK(ValidateSnapshot(snapshot2));
const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
const v8::HeapGraphNode* array_node =
@@ -449,6 +498,7 @@ TEST(HeapEntryIdsAndArrayShift) {
" a.push(new AnObject());\n");
const v8::HeapSnapshot* snapshot1 =
heap_profiler->TakeHeapSnapshot(v8_str("s1"));
+ CHECK(ValidateSnapshot(snapshot1));
CompileRun(
"for (var i = 0; i < 1; ++i)\n"
@@ -458,6 +508,7 @@ TEST(HeapEntryIdsAndArrayShift) {
const v8::HeapSnapshot* snapshot2 =
heap_profiler->TakeHeapSnapshot(v8_str("s2"));
+ CHECK(ValidateSnapshot(snapshot2));
const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
@@ -481,6 +532,7 @@ TEST(HeapEntryIdsAndArrayShift) {
CHECK_EQ_SNAPSHOT_OBJECT_ID(k1->GetId(), k2->GetId());
}
+
TEST(HeapEntryIdsAndGC) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -495,11 +547,13 @@ TEST(HeapEntryIdsAndGC) {
v8::Local<v8::String> s2_str = v8_str("s2");
const v8::HeapSnapshot* snapshot1 =
heap_profiler->TakeHeapSnapshot(s1_str);
+ CHECK(ValidateSnapshot(snapshot1));
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
const v8::HeapSnapshot* snapshot2 =
heap_profiler->TakeHeapSnapshot(s2_str);
+ CHECK(ValidateSnapshot(snapshot2));
CHECK_GT(snapshot1->GetMaxSnapshotJSObjectId(), 7000);
CHECK(snapshot1->GetMaxSnapshotJSObjectId() <=
@@ -550,6 +604,7 @@ TEST(HeapSnapshotRootPreservedAfterSorting) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("s"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root1 = snapshot->GetRoot();
const_cast<i::HeapSnapshot*>(reinterpret_cast<const i::HeapSnapshot*>(
snapshot))->GetSortedEntriesList();
@@ -617,6 +672,8 @@ TEST(HeapSnapshotJSONSerialization) {
"var b = new B(a);");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("json"));
+ CHECK(ValidateSnapshot(snapshot));
+
TestJSONStream stream;
snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
CHECK_GT(stream.size(), 0);
@@ -713,6 +770,7 @@ TEST(HeapSnapshotJSONSerializationAborting) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("abort"));
+ CHECK(ValidateSnapshot(snapshot));
TestJSONStream stream(5);
snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
CHECK_GT(stream.size(), 0);
@@ -952,6 +1010,7 @@ TEST(HeapSnapshotGetNodeById) {
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("id"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root = snapshot->GetRoot();
CheckChildrenIds(snapshot, root, 0, 3);
// Check a big id, which should not exist yet.
@@ -966,6 +1025,7 @@ TEST(HeapSnapshotGetSnapshotObjectId) {
CompileRun("globalObject = {};\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("get_snapshot_object_id"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
GetProperty(global, v8::HeapGraphEdge::kProperty, "globalObject");
@@ -990,6 +1050,7 @@ TEST(HeapSnapshotUnknownSnapshotObjectId) {
CompileRun("globalObject = {};\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("unknown_object_id"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* node =
snapshot->GetNodeById(v8::HeapProfiler::kUnknownObjectId);
CHECK_EQ(NULL, node);
@@ -1017,6 +1078,7 @@ class TestActivityControl : public v8::ActivityControl {
};
}
+
TEST(TakeHeapSnapshotAborting) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1035,6 +1097,8 @@ TEST(TakeHeapSnapshotAborting) {
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("full"),
&control);
+ CHECK(ValidateSnapshot(snapshot));
+
CHECK_NE(NULL, snapshot);
CHECK_EQ(snapshots_count + 1, heap_profiler->GetSnapshotCount());
CHECK_EQ(control.total(), control.done());
@@ -1145,6 +1209,7 @@ TEST(HeapSnapshotRetainedObjectInfo) {
CHECK_EQ(0, TestRetainedObjectInfo::instances.length());
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("retained"));
+ CHECK(ValidateSnapshot(snapshot));
CHECK_EQ(3, TestRetainedObjectInfo::instances.length());
for (int i = 0; i < TestRetainedObjectInfo::instances.length(); ++i) {
@@ -1237,6 +1302,7 @@ TEST(HeapSnapshotImplicitReferences) {
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("implicit_refs"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_object = GetGlobalObject(snapshot);
const v8::HeapGraphNode* obj0 = GetProperty(
@@ -1301,6 +1367,7 @@ TEST(DeleteHeapSnapshot) {
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
const v8::HeapSnapshot* s1 =
heap_profiler->TakeHeapSnapshot(v8_str("1"));
+
CHECK_NE(NULL, s1);
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
unsigned uid1 = s1->GetUid();
@@ -1340,6 +1407,7 @@ class NameResolver : public v8::HeapProfiler::ObjectNameResolver {
}
};
+
TEST(GlobalObjectName) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1352,6 +1420,7 @@ TEST(GlobalObjectName) {
heap_profiler->TakeHeapSnapshot(v8_str("document"),
NULL,
&name_resolver);
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
CHECK_EQ("Object / Global object name" ,
@@ -1382,6 +1451,7 @@ TEST(NodesIteration) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("iteration"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
// Verify that we can find this object by iteration.
@@ -1403,6 +1473,7 @@ TEST(GetHeapValue) {
CompileRun("a = { s_prop: \'value\', n_prop: 0.1 };");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("value"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(global->GetHeapValue()->IsObject());
v8::Local<v8::Object> js_global =
@@ -1437,6 +1508,7 @@ TEST(GetHeapValueForDeletedObject) {
CompileRun("a = { p: { r: {} } };");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* obj = GetProperty(
global, v8::HeapGraphEdge::kProperty, "a");
@@ -1523,6 +1595,7 @@ TEST(FastCaseAccessors) {
"});\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("fastCaseAccessors"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
@@ -1556,6 +1629,7 @@ TEST(SlowCaseAccessors) {
"});\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("slowCaseAccessors"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
@@ -1584,6 +1658,7 @@ TEST(HiddenPropertiesFastCase) {
"c = new C(2012);\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("HiddenPropertiesFastCase1"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* c =
GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
@@ -1598,6 +1673,7 @@ TEST(HiddenPropertiesFastCase) {
snapshot = heap_profiler->TakeHeapSnapshot(
v8_str("HiddenPropertiesFastCase2"));
+ CHECK(ValidateSnapshot(snapshot));
global = GetGlobalObject(snapshot);
c = GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
CHECK_NE(NULL, c);
@@ -1606,6 +1682,7 @@ TEST(HiddenPropertiesFastCase) {
CHECK_NE(NULL, hidden_props);
}
+
bool HasWeakEdge(const v8::HeapGraphNode* node) {
for (int i = 0; i < node->GetChildrenCount(); ++i) {
const v8::HeapGraphEdge* handle_edge = node->GetChild(i);
@@ -1620,6 +1697,7 @@ bool HasWeakGlobalHandle() {
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("weaks"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* gc_roots = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
CHECK_NE(NULL, gc_roots);
@@ -1650,26 +1728,6 @@ TEST(WeakGlobalHandle) {
}
-TEST(WeakNativeContextRefs) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
-
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("weaks"));
- const v8::HeapGraphNode* gc_roots = GetNode(
- snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
- CHECK_NE(NULL, gc_roots);
- const v8::HeapGraphNode* global_handles = GetNode(
- gc_roots, v8::HeapGraphNode::kSynthetic, "(Global handles)");
- CHECK_NE(NULL, global_handles);
- const v8::HeapGraphNode* native_context = GetNode(
- global_handles, v8::HeapGraphNode::kHidden, "system / NativeContext");
- CHECK_NE(NULL, native_context);
- CHECK(HasWeakEdge(native_context));
-}
-
-
TEST(SfiAndJsFunctionWeakRefs) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1679,14 +1737,15 @@ TEST(SfiAndJsFunctionWeakRefs) {
"fun = (function (x) { return function () { return x + 1; } })(1);");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("fun"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
const v8::HeapGraphNode* fun =
GetProperty(global, v8::HeapGraphEdge::kProperty, "fun");
- CHECK(HasWeakEdge(fun));
+ CHECK(!HasWeakEdge(fun));
const v8::HeapGraphNode* shared =
GetProperty(fun, v8::HeapGraphEdge::kInternal, "shared");
- CHECK(HasWeakEdge(shared));
+ CHECK(!HasWeakEdge(shared));
}
@@ -1700,6 +1759,7 @@ TEST(NoDebugObjectInSnapshot) {
CompileRun("foo = {};");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root = snapshot->GetRoot();
int globals_count = 0;
for (int i = 0; i < root->GetChildrenCount(); ++i) {
@@ -1725,6 +1785,7 @@ TEST(AllStrongGcRootsHaveNames) {
CompileRun("foo = {};");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* gc_roots = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
CHECK_NE(NULL, gc_roots);
@@ -1747,6 +1808,7 @@ TEST(NoRefsToNonEssentialEntries) {
CompileRun("global_object = {};\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
GetProperty(global, v8::HeapGraphEdge::kProperty, "global_object");
@@ -1767,6 +1829,7 @@ TEST(MapHasDescriptorsAndTransitions) {
CompileRun("obj = { a: 10 };\n");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
GetProperty(global, v8::HeapGraphEdge::kProperty, "obj");
@@ -1805,6 +1868,8 @@ TEST(ManyLocalsInSharedContext) {
"var ok = eval(result.join('\\n'));");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
const v8::HeapGraphNode* ok_object =
@@ -1826,3 +1891,72 @@ TEST(ManyLocalsInSharedContext) {
CHECK_NE(NULL, f_object);
}
}
+
+
+TEST(AllocationSitesAreVisible) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ CompileRun(
+ "fun = function () { var a = [3, 2, 1]; return a; }\n"
+ "fun();");
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ CHECK_NE(NULL, global);
+ const v8::HeapGraphNode* fun_code =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "fun");
+ CHECK_NE(NULL, fun_code);
+ const v8::HeapGraphNode* literals =
+ GetProperty(fun_code, v8::HeapGraphEdge::kInternal, "literals");
+ CHECK_NE(NULL, literals);
+ CHECK_EQ(v8::HeapGraphNode::kArray, literals->GetType());
+ CHECK_EQ(2, literals->GetChildrenCount());
+
+ // The second value in the literals array should be the boilerplate,
+ // after an AllocationSite.
+ const v8::HeapGraphEdge* prop = literals->GetChild(1);
+ const v8::HeapGraphNode* allocation_site = prop->GetToNode();
+ v8::String::Utf8Value name(allocation_site->GetName());
+ CHECK_EQ("system / AllocationSite", *name);
+ const v8::HeapGraphNode* transition_info =
+ GetProperty(allocation_site, v8::HeapGraphEdge::kInternal,
+ "transition_info");
+ CHECK_NE(NULL, transition_info);
+
+ const v8::HeapGraphNode* elements =
+ GetProperty(transition_info, v8::HeapGraphEdge::kInternal,
+ "elements");
+ CHECK_NE(NULL, elements);
+ CHECK_EQ(v8::HeapGraphNode::kArray, elements->GetType());
+ CHECK_EQ(v8::internal::FixedArray::SizeFor(3), elements->GetSelfSize());
+
+ CHECK(transition_info->GetHeapValue()->IsArray());
+ v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(
+ transition_info->GetHeapValue());
+ // Verify the array is "a" in the code above.
+ CHECK_EQ(3, array->Length());
+ CHECK_EQ(v8::Integer::New(3), array->Get(v8::Integer::New(0)));
+ CHECK_EQ(v8::Integer::New(2), array->Get(v8::Integer::New(1)));
+ CHECK_EQ(v8::Integer::New(1), array->Get(v8::Integer::New(2)));
+}
+
+
+TEST(JSFunctionHasCodeLink) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ CompileRun("function foo(x, y) { return x + y; }\n");
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* foo_func =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
+ CHECK_NE(NULL, foo_func);
+ const v8::HeapGraphNode* code =
+ GetProperty(foo_func, v8::HeapGraphEdge::kInternal, "code");
+ CHECK_NE(NULL, code);
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index be1098cc4c..d2b915690e 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -127,7 +127,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
Address obj_addr = obj->address();
for (int i = 0; i < obj->Size(); i += kPointerSize) {
- Object* found = heap->FindCodeObject(obj_addr + i);
+ Object* found = isolate->FindCodeObject(obj_addr + i);
CHECK_EQ(code, found);
}
@@ -137,8 +137,8 @@ static void CheckFindCodeObject(Isolate* isolate) {
Handle<Code>())->ToObjectChecked();
CHECK(copy->IsCode());
HeapObject* obj_copy = HeapObject::cast(copy);
- Object* not_right = heap->FindCodeObject(obj_copy->address() +
- obj_copy->Size() / 2);
+ Object* not_right = isolate->FindCodeObject(obj_copy->address() +
+ obj_copy->Size() / 2);
CHECK(not_right != code);
}
@@ -661,7 +661,7 @@ TEST(ObjectProperties) {
CHECK(obj->HasLocalProperty(*first));
// delete first
- CHECK(obj->DeleteProperty(*first, JSObject::NORMAL_DELETION));
+ JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION);
CHECK(!obj->HasLocalProperty(*first));
// add first and then second
@@ -673,9 +673,9 @@ TEST(ObjectProperties) {
CHECK(obj->HasLocalProperty(*second));
// delete first and then second
- CHECK(obj->DeleteProperty(*first, JSObject::NORMAL_DELETION));
+ JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION);
CHECK(obj->HasLocalProperty(*second));
- CHECK(obj->DeleteProperty(*second, JSObject::NORMAL_DELETION));
+ JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION);
CHECK(!obj->HasLocalProperty(*first));
CHECK(!obj->HasLocalProperty(*second));
@@ -688,9 +688,9 @@ TEST(ObjectProperties) {
CHECK(obj->HasLocalProperty(*second));
// delete second and then first
- CHECK(obj->DeleteProperty(*second, JSObject::NORMAL_DELETION));
+ JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION);
CHECK(obj->HasLocalProperty(*first));
- CHECK(obj->DeleteProperty(*first, JSObject::NORMAL_DELETION));
+ JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION);
CHECK(!obj->HasLocalProperty(*first));
CHECK(!obj->HasLocalProperty(*second));
@@ -1327,6 +1327,11 @@ TEST(TestInternalWeakLists) {
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i] = v8::Context::New(v8::Isolate::GetCurrent());
+ // Collect garbage that might have been created by one of the
+ // installed extensions.
+ isolate->compilation_cache()->Clear();
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+
bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
CHECK_EQ(i + 1, CountNativeContexts());
@@ -1388,6 +1393,7 @@ TEST(TestInternalWeakLists) {
}
// Force compilation cache cleanup.
+ HEAP->NotifyContextDisposed();
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
// Dispose the native contexts one by one.
@@ -1942,7 +1948,7 @@ TEST(PrototypeTransitionClearing) {
// Verify that only dead prototype transitions are cleared.
CHECK_EQ(10, baseObject->map()->NumberOfProtoTransitions());
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
const int transitions = 10 - 3;
CHECK_EQ(transitions, baseObject->map()->NumberOfProtoTransitions());
@@ -2346,6 +2352,31 @@ TEST(OptimizedAllocationArrayLiterals) {
}
+TEST(OptimizedPretenuringCallNew) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_pretenuring_call_new = true;
+ CcTest::InitializeVM();
+ if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
+ v8::HandleScope scope(CcTest::isolate());
+ HEAP->SetNewSpaceHighPromotionModeActive(true);
+
+ AlwaysAllocateScope always_allocate;
+ v8::Local<v8::Value> res = CompileRun(
+ "function g() { this.a = 0; }"
+ "function f() {"
+ " return new g();"
+ "};"
+ "f(); f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+ CHECK(HEAP->InOldPointerSpace(*o));
+}
+
+
static int CountMapTransitions(Map* map) {
return map->transitions()->number_of_transitions();
}
@@ -3011,6 +3042,10 @@ TEST(Regress169209) {
i::FLAG_harmony_typed_arrays = false;
i::FLAG_harmony_array_buffer = false;
+ // Disable loading the i18n extension which breaks the assumptions of this
+ // test about the heap layout.
+ i::FLAG_enable_i18n = false;
+
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
@@ -3135,7 +3170,7 @@ TEST(Regress169928) {
array_data->set(1, Smi::FromInt(2));
AllocateAllButNBytes(HEAP->new_space(),
- JSArray::kSize + AllocationSiteInfo::kSize +
+ JSArray::kSize + AllocationMemento::kSize +
kPointerSize);
Handle<JSArray> array = factory->NewJSArrayWithElements(array_data,
@@ -3145,16 +3180,16 @@ TEST(Regress169928) {
CHECK_EQ(Smi::FromInt(2), array->length());
CHECK(array->HasFastSmiOrObjectElements());
- // We need filler the size of AllocationSiteInfo object, plus an extra
+ // We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
MaybeObject* maybe_object = HEAP->AllocateRaw(
- AllocationSiteInfo::kSize + kPointerSize, NEW_SPACE, OLD_POINTER_SPACE);
+ AllocationMemento::kSize + kPointerSize, NEW_SPACE, OLD_POINTER_SPACE);
Object* obj = NULL;
CHECK(maybe_object->ToObject(&obj));
Address addr_obj = reinterpret_cast<Address>(
reinterpret_cast<byte*>(obj - kHeapObjectTag));
HEAP->CreateFillerObjectAt(addr_obj,
- AllocationSiteInfo::kSize + kPointerSize);
+ AllocationMemento::kSize + kPointerSize);
// Give the array a name, making sure not to allocate strings.
v8::Handle<v8::Object> array_obj = v8::Utils::ToLocal(array);
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
index 740b432f3e..a29972b583 100644
--- a/deps/v8/test/cctest/test-list.cc
+++ b/deps/v8/test/cctest/test-list.cc
@@ -51,6 +51,7 @@ class ZeroingAllocationPolicy {
}
};
+
// Check that we can add (a reference to) an element of the list
// itself.
TEST(ListAdd) {
@@ -66,6 +67,7 @@ TEST(ListAdd) {
CHECK_EQ(1, list[4]);
}
+
// Test that we can add all elements from a list to another list.
TEST(ListAddAll) {
List<int, ZeroingAllocationPolicy> list(4);
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 072bbffd24..a8e870e671 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -96,6 +96,7 @@ class KangarooThread : public v8::internal::Thread {
Persistent<v8::Context> context_;
};
+
// Migrates an isolate from one thread to another
TEST(KangarooIsolates) {
v8::Isolate* isolate = v8::Isolate::New();
@@ -114,6 +115,7 @@ TEST(KangarooIsolates) {
thread1->Join();
}
+
static void CalcFibAndCheck() {
Local<Value> v = CompileRun("function fib(n) {"
" if (n <= 2) return 1;"
@@ -192,6 +194,7 @@ class IsolateLockingThreadWithLocalContext : public JoinableThread {
v8::Isolate* isolate_;
};
+
static void StartJoinAndDeleteThreads(const i::List<JoinableThread*>& threads) {
for (int i = 0; i < threads.length(); i++) {
threads[i]->Start();
@@ -242,6 +245,7 @@ class IsolateNonlockingThread : public JoinableThread {
private:
};
+
// Run many threads each accessing its own isolate without locking
TEST(MultithreadedParallelIsolates) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
@@ -280,6 +284,7 @@ class IsolateNestedLockingThread : public JoinableThread {
v8::Isolate* isolate_;
};
+
// Run many threads with nested locks
TEST(IsolateNestedLocking) {
#if V8_TARGET_ARCH_MIPS
@@ -321,6 +326,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
v8::Isolate* isolate2_;
};
+
// Run parallel threads that lock and access different isolates in parallel
TEST(SeparateIsolatesLocksNonexclusive) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
@@ -397,6 +403,7 @@ class LockerUnlockerThread : public JoinableThread {
v8::Isolate* isolate_;
};
+
// Use unlocker inside of a Locker, multiple threads.
TEST(LockerUnlocker) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
@@ -450,6 +457,7 @@ class LockTwiceAndUnlockThread : public JoinableThread {
v8::Isolate* isolate_;
};
+
// Use Unlocker inside two Lockers.
TEST(LockTwiceAndUnlock) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
@@ -517,6 +525,7 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
v8::Isolate* isolate2_;
};
+
// Lock two isolates and unlock one of them.
TEST(LockAndUnlockDifferentIsolates) {
v8::Isolate* isolate1 = v8::Isolate::New();
@@ -571,6 +580,7 @@ class LockUnlockLockThread : public JoinableThread {
v8::Persistent<v8::Context> context_;
};
+
// Locker inside an Unlocker inside a Locker.
TEST(LockUnlockLockMultithreaded) {
#if V8_TARGET_ARCH_MIPS
@@ -626,6 +636,7 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
v8::Persistent<v8::Context> context_;
};
+
// Locker inside an Unlocker inside a Locker for default isolate.
TEST(LockUnlockLockDefaultIsolateMultithreaded) {
#if V8_TARGET_ARCH_MIPS
@@ -696,6 +707,7 @@ class IsolateGenesisThread : public JoinableThread {
const char** extension_names_;
};
+
// Test installing extensions in separate isolates concurrently.
// http://code.google.com/p/v8/issues/detail?id=1821
TEST(ExtensionsRegistration) {
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 20f135d8f6..7c3567c140 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -186,6 +186,7 @@ static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
return code->contains(addr);
}
+
static bool IsAddressWithinFuncCode(const char* func_name, Address addr) {
v8::Local<v8::Value> func = CcTest::env()->Global()->Get(v8_str(func_name));
CHECK(func->IsFunction());
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 6ccf58e690..81cb001b13 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -36,6 +36,7 @@
#include "v8.h"
#include "log.h"
+#include "log-utils.h"
#include "cpu-profiler.h"
#include "natives.h"
#include "v8threads.h"
@@ -395,6 +396,7 @@ TEST(Issue23768) {
static void ObjMethod1(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
+
TEST(LogCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
Logger* logger = initialize_logger.logger();
@@ -443,6 +445,7 @@ static void Prop2Getter(v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
}
+
TEST(LogAccessorCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
Logger* logger = initialize_logger.logger();
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 3945f1bba6..a2070a5ea8 100755..100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -801,6 +801,7 @@ static void SmiAddTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
+
TEST(SmiAdd) {
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
@@ -1397,6 +1398,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
}
}
+
TEST(SmiIndex) {
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 8be72d303a..626df16617 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -310,6 +310,7 @@ static void WeakPointerCallback(v8::Isolate* isolate,
handle->Dispose(isolate);
}
+
TEST(ObjectGroups) {
FLAG_incremental_marking = false;
CcTest::InitializeVM();
@@ -560,7 +561,7 @@ TEST(BootUpMemoryUse) {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(delta, 3100 * 1024);
} else {
- CHECK_LE(delta, 3400 * 1024);
+ CHECK_LE(delta, 3450 * 1024);
}
}
}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 3778fed813..44ddb6fa20 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -54,6 +54,7 @@ class HarmonyIsolate {
};
}
+
TEST(PerIsolateState) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -94,6 +95,7 @@ TEST(PerIsolateState) {
CHECK_EQ(3, CompileRun("count")->Int32Value());
}
+
TEST(EndOfMicrotaskDelivery) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -107,6 +109,7 @@ TEST(EndOfMicrotaskDelivery) {
CHECK_EQ(1, CompileRun("count")->Int32Value());
}
+
TEST(DeliveryOrdering) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -138,6 +141,7 @@ TEST(DeliveryOrdering) {
CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
}
+
TEST(DeliveryOrderingReentrant) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -169,6 +173,7 @@ TEST(DeliveryOrderingReentrant) {
CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
}
+
TEST(DeliveryOrderingDeliverChangeRecords) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -193,6 +198,7 @@ TEST(DeliveryOrderingDeliverChangeRecords) {
CHECK_EQ(2, CompileRun("ordering[3]")->Int32Value());
}
+
TEST(ObjectHashTableGrowth) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -222,6 +228,7 @@ TEST(ObjectHashTableGrowth) {
CHECK(CompileRun("ran")->BooleanValue());
}
+
TEST(GlobalObjectObservation) {
HarmonyIsolate isolate;
LocalContext context;
@@ -290,6 +297,7 @@ struct RecordExpectation {
Handle<Value> old_value;
};
+
// TODO(adamk): Use this helper elsewhere in this file.
static void ExpectRecords(Handle<Value> records,
const RecordExpectation expectations[],
@@ -360,6 +368,7 @@ TEST(APITestBasicMutation) {
EXPECT_RECORDS(CompileRun("records"), expected_records);
}
+
TEST(HiddenPrototypeObservation) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
@@ -420,20 +429,20 @@ TEST(ObservationWeakMap) {
"obj = null;");
i::Handle<i::JSObject> observation_state =
i::Isolate::Current()->factory()->observation_state();
- i::Handle<i::JSWeakMap> observerInfoMap =
+ i::Handle<i::JSWeakMap> callbackInfoMap =
i::Handle<i::JSWeakMap>::cast(
- i::GetProperty(observation_state, "observerInfoMap"));
+ i::GetProperty(observation_state, "callbackInfoMap"));
i::Handle<i::JSWeakMap> objectInfoMap =
i::Handle<i::JSWeakMap>::cast(
i::GetProperty(observation_state, "objectInfoMap"));
i::Handle<i::JSWeakMap> notifierTargetMap =
i::Handle<i::JSWeakMap>::cast(
i::GetProperty(observation_state, "notifierTargetMap"));
- CHECK_EQ(1, NumberOfElements(observerInfoMap));
+ CHECK_EQ(1, NumberOfElements(callbackInfoMap));
CHECK_EQ(1, NumberOfElements(objectInfoMap));
CHECK_EQ(1, NumberOfElements(notifierTargetMap));
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(0, NumberOfElements(observerInfoMap));
+ CHECK_EQ(0, NumberOfElements(callbackInfoMap));
CHECK_EQ(0, NumberOfElements(objectInfoMap));
CHECK_EQ(0, NumberOfElements(notifierTargetMap));
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 9879886991..999fe4c5bd 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -633,6 +633,7 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
} while (expected_tokens[i] != i::Token::ILLEGAL);
}
+
TEST(StreamScanner) {
v8::V8::Initialize();
@@ -1085,6 +1086,7 @@ enum ParserFlag {
kAllowModules,
kAllowGenerators,
kAllowForOf,
+ kAllowHarmonyNumericLiterals,
kParserFlagCount
};
@@ -1102,7 +1104,9 @@ static bool checkParserFlag(unsigned flags, ParserFlag flag) {
kAllowHarmonyScoping)); \
parser.set_allow_modules(checkParserFlag(flags, kAllowModules)); \
parser.set_allow_generators(checkParserFlag(flags, kAllowGenerators)); \
- parser.set_allow_for_of(checkParserFlag(flags, kAllowForOf));
+ parser.set_allow_for_of(checkParserFlag(flags, kAllowForOf)); \
+ parser.set_allow_harmony_numeric_literals( \
+ checkParserFlag(flags, kAllowHarmonyNumericLiterals));
void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
i::Isolate* isolate = i::Isolate::Current();
diff --git a/deps/v8/test/cctest/test-platform-tls.cc b/deps/v8/test/cctest/test-platform-tls.cc
index cc9ffebe38..31501d9ef7 100644
--- a/deps/v8/test/cctest/test-platform-tls.cc
+++ b/deps/v8/test/cctest/test-platform-tls.cc
@@ -43,6 +43,7 @@ static void* GetValue(int num) {
return reinterpret_cast<void*>(static_cast<intptr_t>(num + 1));
}
+
static void DoTest() {
for (int i = 0; i < kValueCount; i++) {
CHECK(!Thread::HasThreadLocal(keys[i]));
@@ -80,6 +81,7 @@ class TestThread : public Thread {
}
};
+
TEST(FastTLS) {
for (int i = 0; i < kValueCount; i++) {
keys[i] = Thread::CreateThreadLocalKey();
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 6c20b853c5..2d8eb201e8 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -35,3 +35,66 @@ using namespace ::v8::internal;
TEST(NumberOfCores) {
CHECK_GT(OS::NumberOfCores(), 0);
}
+
+
+#ifdef __GNUC__
+#define ASM __asm__ __volatile__
+
+#if defined(_M_X64) || defined(__x86_64__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov %%rsp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#elif defined(_M_IX86) || defined(__i386__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov %%esp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#elif defined(__ARMEL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("str %%sp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#elif defined(__MIPSEL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("sw $sp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#else
+#error Host architecture was not detected as supported by v8
+#endif
+
+void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ GET_STACK_POINTER();
+ args.GetReturnValue().Set(v8_num(sp_addr));
+}
+
+
+TEST(StackAlignment) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->Set(v8_str("get_stack_pointer"),
+ v8::FunctionTemplate::New(GetStackPointer));
+
+ LocalContext env(NULL, global_template);
+ CompileRun(
+ "function foo() {"
+ " return get_stack_pointer();"
+ "}");
+
+ v8::Local<v8::Object> global_object = env->Global();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+
+ v8::Local<v8::Value> result = foo->Call(global_object, 0, NULL);
+ CHECK_EQ(0, result->Int32Value() % OS::ActivationFrameAlignment());
+}
+
+#undef GET_STACK_POINTERS
+#undef ASM
+#endif // __GNUC__
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 7472669e32..7b8278ba66 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -43,48 +43,9 @@ using i::ProfileTree;
using i::ProfileGenerator;
using i::SampleRateCalculator;
using i::TickSample;
-using i::TokenEnumerator;
using i::Vector;
-namespace v8 {
-namespace internal {
-
-class TokenEnumeratorTester {
- public:
- static i::List<bool>* token_removed(TokenEnumerator* te) {
- return &te->token_removed_;
- }
-};
-
-} } // namespace v8::internal
-
-TEST(TokenEnumerator) {
- TokenEnumerator te;
- CHECK_EQ(TokenEnumerator::kNoSecurityToken, te.GetTokenId(NULL));
- v8::HandleScope hs(v8::Isolate::GetCurrent());
- v8::Local<v8::String> token1(v8::String::New("1x"));
- CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
- CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
- v8::Local<v8::String> token2(v8::String::New("2x"));
- CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
- CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
- CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
- {
- v8::HandleScope hs(v8::Isolate::GetCurrent());
- v8::Local<v8::String> token3(v8::String::New("3x"));
- CHECK_EQ(2, te.GetTokenId(*v8::Utils::OpenHandle(*token3)));
- CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
- CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
- }
- CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
- CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
- CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
-}
-
-
TEST(ProfileNodeFindOrAddChild) {
ProfileTree tree;
ProfileNode node(&tree, NULL);
@@ -121,8 +82,7 @@ TEST(ProfileNodeFindOrAddChildForSameFunction) {
CodeEntry entry2(i::Logger::FUNCTION_TAG, aaa);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry2));
// Now with a different security token.
- CodeEntry entry3(i::Logger::FUNCTION_TAG, aaa,
- TokenEnumerator::kNoSecurityToken + 1);
+ CodeEntry entry3(i::Logger::FUNCTION_TAG, aaa);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry3));
}
@@ -415,108 +375,11 @@ TEST(ProfileTreeCalculateTotalTicks) {
}
-TEST(ProfileTreeFilteredClone) {
- ProfileTree source_tree;
- const int token0 = 0, token1 = 1, token2 = 2;
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", token0);
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", token1);
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc", token0);
- CodeEntry entry4(i::Logger::FUNCTION_TAG, "ddd",
- TokenEnumerator::kInheritsSecurityToken);
-
- {
- CodeEntry* e1_e2_path[] = {&entry1, &entry2};
- Vector<CodeEntry*> e1_e2_path_vec(
- e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
- source_tree.AddPathFromStart(e1_e2_path_vec);
- CodeEntry* e2_e4_path[] = {&entry2, &entry4};
- Vector<CodeEntry*> e2_e4_path_vec(
- e2_e4_path, sizeof(e2_e4_path) / sizeof(e2_e4_path[0]));
- source_tree.AddPathFromStart(e2_e4_path_vec);
- CodeEntry* e3_e1_path[] = {&entry3, &entry1};
- Vector<CodeEntry*> e3_e1_path_vec(
- e3_e1_path, sizeof(e3_e1_path) / sizeof(e3_e1_path[0]));
- source_tree.AddPathFromStart(e3_e1_path_vec);
- CodeEntry* e3_e2_path[] = {&entry3, &entry2};
- Vector<CodeEntry*> e3_e2_path_vec(
- e3_e2_path, sizeof(e3_e2_path) / sizeof(e3_e2_path[0]));
- source_tree.AddPathFromStart(e3_e2_path_vec);
- source_tree.CalculateTotalTicks();
- // Results in -> {entry1,0,1,0} -> {entry2,1,1,1}
- // {root,0,4,-1} -> {entry2,0,1,1} -> {entry4,1,1,inherits}
- // -> {entry3,0,2,0} -> {entry1,1,1,0}
- // -> {entry2,1,1,1}
- CHECK_EQ(4, source_tree.root()->total_ticks());
- CHECK_EQ(0, source_tree.root()->self_ticks());
- }
-
- {
- ProfileTree token0_tree;
- token0_tree.FilteredClone(&source_tree, token0);
- // Should be -> {entry1,1,1,0}
- // {root,1,4,-1} -> {entry3,1,2,0} -> {entry1,1,1,0}
- // [self ticks from filtered nodes are attributed to their parents]
- CHECK_EQ(4, token0_tree.root()->total_ticks());
- CHECK_EQ(1, token0_tree.root()->self_ticks());
- ProfileTreeTestHelper token0_helper(&token0_tree);
- ProfileNode* node1 = token0_helper.Walk(&entry1);
- CHECK_NE(NULL, node1);
- CHECK_EQ(1, node1->total_ticks());
- CHECK_EQ(1, node1->self_ticks());
- CHECK_EQ(NULL, token0_helper.Walk(&entry2));
- ProfileNode* node3 = token0_helper.Walk(&entry3);
- CHECK_NE(NULL, node3);
- CHECK_EQ(2, node3->total_ticks());
- CHECK_EQ(1, node3->self_ticks());
- ProfileNode* node3_1 = token0_helper.Walk(&entry3, &entry1);
- CHECK_NE(NULL, node3_1);
- CHECK_EQ(1, node3_1->total_ticks());
- CHECK_EQ(1, node3_1->self_ticks());
- CHECK_EQ(NULL, token0_helper.Walk(&entry3, &entry2));
- }
-
- {
- ProfileTree token1_tree;
- token1_tree.FilteredClone(&source_tree, token1);
- // Should be
- // {root,1,4,-1} -> {entry2,2,3,1} -> {entry4,1,1,inherits}
- // [child nodes referring to the same entry get merged and
- // their self times summed up]
- CHECK_EQ(4, token1_tree.root()->total_ticks());
- CHECK_EQ(1, token1_tree.root()->self_ticks());
- ProfileTreeTestHelper token1_helper(&token1_tree);
- CHECK_EQ(NULL, token1_helper.Walk(&entry1));
- CHECK_EQ(NULL, token1_helper.Walk(&entry3));
- ProfileNode* node2 = token1_helper.Walk(&entry2);
- CHECK_NE(NULL, node2);
- CHECK_EQ(3, node2->total_ticks());
- CHECK_EQ(2, node2->self_ticks());
- ProfileNode* node2_4 = token1_helper.Walk(&entry2, &entry4);
- CHECK_NE(NULL, node2_4);
- CHECK_EQ(1, node2_4->total_ticks());
- CHECK_EQ(1, node2_4->self_ticks());
- }
-
- {
- ProfileTree token2_tree;
- token2_tree.FilteredClone(&source_tree, token2);
- // Should be
- // {root,4,4,-1}
- // [no nodes, all ticks get migrated into root node]
- CHECK_EQ(4, token2_tree.root()->total_ticks());
- CHECK_EQ(4, token2_tree.root()->self_ticks());
- ProfileTreeTestHelper token2_helper(&token2_tree);
- CHECK_EQ(NULL, token2_helper.Walk(&entry1));
- CHECK_EQ(NULL, token2_helper.Walk(&entry2));
- CHECK_EQ(NULL, token2_helper.Walk(&entry3));
- }
-}
-
-
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
+
TEST(CodeMapAddCode) {
CodeMap code_map;
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
@@ -622,8 +485,7 @@ TEST(RecordTickSample) {
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
- CpuProfile* profile =
- profiles.StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
+ CpuProfile* profile = profiles.StopProfiling("", 1);
CHECK_NE(NULL, profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
@@ -700,6 +562,7 @@ static void CheckNodeIds(ProfileNode* node, int* expectedId) {
}
}
+
TEST(SampleIds) {
TestSetup test_setup;
CpuProfilesCollection profiles;
@@ -735,8 +598,7 @@ TEST(SampleIds) {
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
- CpuProfile* profile =
- profiles.StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
+ CpuProfile* profile = profiles.StopProfiling("", 1);
int nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(7, nodeId - 1);
@@ -765,8 +627,7 @@ TEST(NoSamples) {
sample1.frames_count = 1;
generator.RecordTickSample(sample1);
- CpuProfile* profile =
- profiles.StopProfiling(TokenEnumerator::kNoSecurityToken, "", 1);
+ CpuProfile* profile = profiles.StopProfiling("", 1);
int nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(3, nodeId - 1);
@@ -860,7 +721,7 @@ TEST(RecordStackTraceAtStartProfiling) {
"a();\n"
"stopProfiling();");
CHECK_EQ(1, profiler->GetProfilesCount());
- CpuProfile* profile = profiler->GetProfile(NULL, 0);
+ CpuProfile* profile = profiler->GetProfile(0);
const ProfileTree* topDown = profile->top_down();
const ProfileNode* current = topDown->root();
const_cast<ProfileNode*>(current)->Print(0);
@@ -965,3 +826,59 @@ TEST(ProfileNodeScriptId) {
}
+
+
+static const char* line_number_test_source_existing_functions =
+"function foo_at_the_first_line() {\n"
+"}\n"
+"foo_at_the_first_line();\n"
+"function lazy_func_at_forth_line() {}\n";
+
+
+static const char* line_number_test_source_profile_time_functions =
+"// Empty first line\n"
+"function bar_at_the_second_line() {\n"
+" foo_at_the_first_line();\n"
+"}\n"
+"bar_at_the_second_line();\n"
+"function lazy_func_at_6th_line() {}";
+
+int GetFunctionLineNumber(LocalContext* env, const char* name) {
+ CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
+ CodeMap* code_map = profiler->generator()->code_map();
+ i::Handle<i::JSFunction> func = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(
+ (*(*env))->Global()->Get(v8_str(name))));
+ CodeEntry* func_entry = code_map->FindEntry(func->code()->address());
+ if (!func_entry)
+ FATAL(name);
+ return func_entry->line_number();
+}
+
+
+TEST(LineNumber) {
+ i::FLAG_use_inlining = false;
+
+ CcTest::InitializeVM();
+ LocalContext env;
+ i::Isolate* isolate = i::Isolate::Current();
+ TestSetup test_setup;
+
+ i::HandleScope scope(isolate);
+
+ CompileRun(line_number_test_source_existing_functions);
+
+ CpuProfiler* profiler = isolate->cpu_profiler();
+ profiler->StartProfiling("LineNumber");
+
+ CompileRun(line_number_test_source_profile_time_functions);
+
+ profiler->processor()->StopSynchronously();
+
+ CHECK_EQ(1, GetFunctionLineNumber(&env, "foo_at_the_first_line"));
+ CHECK_EQ(0, GetFunctionLineNumber(&env, "lazy_func_at_forth_line"));
+ CHECK_EQ(2, GetFunctionLineNumber(&env, "bar_at_the_second_line"));
+ CHECK_EQ(0, GetFunctionLineNumber(&env, "lazy_func_at_6th_line"));
+
+ profiler->StopProfiling("LineNumber");
+}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index f9eed1f04f..14989ee980 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -94,6 +94,7 @@ static SmartArrayPointer<const char> Parse(const char* input) {
return output;
}
+
static bool CheckSimple(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -112,6 +113,7 @@ struct MinMaxPair {
int max_match;
};
+
static MinMaxPair CheckMinMaxMatch(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope(v8::Isolate::GetCurrent());
@@ -377,6 +379,7 @@ TEST(Parser) {
CHECK_MIN_MAX("a(?!bbb|bb)c", 2, 2);
}
+
TEST(ParserRegression) {
CHECK_PARSE_EQ("[A-Z$-][x]", "(! [A-Z $ -] [x])");
CHECK_PARSE_EQ("a{3,4*}", "(: 'a{3,' (# 0 - g '4') '}')");
@@ -659,6 +662,7 @@ TEST(DispatchTableConstruction) {
}
}
+
// Test of debug-only syntax.
#ifdef DEBUG
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 50be5011f6..b29b1dcf0c 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -322,6 +322,7 @@ TEST(TerminateLoadICException) {
v8::Script::Compile(source)->Run();
}
+
void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating());
@@ -346,6 +347,7 @@ void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Script::Compile(v8::String::New("function f() { fail(); } f()"))->Run();
}
+
// Test that reentry into V8 while the termination exception is still pending
// (has not yet unwound the 0-level JS frame) does not crash.
TEST(TerminateAndReenterFromThreadItself) {
@@ -365,6 +367,7 @@ TEST(TerminateAndReenterFromThreadItself) {
"f()"))->Run()->IsTrue());
}
+
void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating());
@@ -384,6 +387,7 @@ void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(!v8::V8::IsExecutionTerminating());
}
+
// Test that a single thread of JavaScript execution can terminate
// itself and then resume execution.
TEST(TerminateCancelTerminateFromThreadItself) {
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 3b9c1ad80b..a35a88dc26 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -175,6 +175,7 @@ class ThreadIdValidationThread : public v8::internal::Thread {
i::Semaphore* semaphore_;
};
+
TEST(ThreadIdValidation) {
const int kNThreads = 100;
i::List<ThreadIdValidationThread*> threads(kNThreads);
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 78abeba0ab..b5f65954fa 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -62,6 +62,7 @@ static void CheckEqual(Handle<Type> type1, Handle<Type> type2) {
CHECK(type2->Is(type1));
}
+
static void CheckSub(Handle<Type> type1, Handle<Type> type2) {
CHECK(type1->Is(type2));
CHECK(!type2->Is(type1));
@@ -70,6 +71,7 @@ static void CheckSub(Handle<Type> type1, Handle<Type> type2) {
}
}
+
static void CheckUnordered(Handle<Type> type1, Handle<Type> type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
@@ -78,6 +80,7 @@ static void CheckUnordered(Handle<Type> type1, Handle<Type> type2) {
}
}
+
static void CheckOverlap(Handle<Type> type1, Handle<Type> type2) {
CHECK(type1->Maybe(type2));
CHECK(type2->Maybe(type1));
@@ -86,6 +89,7 @@ static void CheckOverlap(Handle<Type> type1, Handle<Type> type2) {
}
}
+
static void CheckDisjoint(Handle<Type> type1, Handle<Type> type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
new file mode 100644
index 0000000000..707f903284
--- /dev/null
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -0,0 +1,250 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "global-handles.h"
+#include "snapshot.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+static Handle<JSWeakSet> AllocateJSWeakSet(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ Handle<Map> map = factory->NewMap(JS_WEAK_SET_TYPE, JSWeakSet::kSize);
+ Handle<JSObject> weakset_obj = factory->NewJSObjectFromMap(map);
+ Handle<JSWeakSet> weakset(JSWeakSet::cast(*weakset_obj));
+ // Do not use handles for the hash table, it would make entries strong.
+ Object* table_obj = ObjectHashTable::Allocate(heap, 1)->ToObjectChecked();
+ ObjectHashTable* table = ObjectHashTable::cast(table_obj);
+ weakset->set_table(table);
+ weakset->set_next(Smi::FromInt(0));
+ return weakset;
+}
+
+static void PutIntoWeakSet(Handle<JSWeakSet> weakset,
+ Handle<JSObject> key,
+ Handle<Object> value) {
+ Handle<ObjectHashTable> table = PutIntoObjectHashTable(
+ Handle<ObjectHashTable>(ObjectHashTable::cast(weakset->table())),
+ Handle<JSObject>(JSObject::cast(*key)),
+ value);
+ weakset->set_table(*table);
+}
+
+static int NumberOfWeakCalls = 0;
+static void WeakPointerCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* handle,
+ void* id) {
+ ASSERT(id == reinterpret_cast<void*>(1234));
+ NumberOfWeakCalls++;
+ handle->Dispose(isolate);
+}
+
+
+TEST(WeakSet_Weakness) {
+ FLAG_incremental_marking = false;
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ // Keep global reference to the key.
+ Handle<Object> key;
+ {
+ HandleScope scope(isolate);
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ key = global_handles->Create(*object);
+ }
+ CHECK(!global_handles->IsWeak(key.location()));
+
+ // Put entry into weak set.
+ {
+ HandleScope scope(isolate);
+ PutIntoWeakSet(weakset,
+ Handle<JSObject>(JSObject::cast(*key)),
+ Handle<Smi>(Smi::FromInt(23), isolate));
+ }
+ CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
+
+ // Force a full GC.
+ heap->CollectAllGarbage(false);
+ CHECK_EQ(0, NumberOfWeakCalls);
+ CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(
+ 0, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
+
+ // Make the global reference to the key weak.
+ {
+ HandleScope scope(isolate);
+ global_handles->MakeWeak(key.location(),
+ reinterpret_cast<void*>(1234),
+ &WeakPointerCallback);
+ }
+ CHECK(global_handles->IsWeak(key.location()));
+
+ // Force a full GC.
+ // Perform two consecutive GCs because the first one will only clear
+ // weak references whereas the second one will also clear weak sets.
+ heap->CollectAllGarbage(false);
+ CHECK_EQ(1, NumberOfWeakCalls);
+ CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(
+ 0, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
+ heap->CollectAllGarbage(false);
+ CHECK_EQ(1, NumberOfWeakCalls);
+ CHECK_EQ(0, ObjectHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(
+ 1, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
+}
+
+
+TEST(WeakSet_Shrinking) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
+
+ // Check initial capacity.
+ CHECK_EQ(32, ObjectHashTable::cast(weakset->table())->Capacity());
+
+ // Fill up weak set to trigger capacity change.
+ {
+ HandleScope scope(isolate);
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ for (int i = 0; i < 32; i++) {
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ PutIntoWeakSet(weakset, object, Handle<Smi>(Smi::FromInt(i), isolate));
+ }
+ }
+
+ // Check increased capacity.
+ CHECK_EQ(128, ObjectHashTable::cast(weakset->table())->Capacity());
+
+ // Force a full GC.
+ CHECK_EQ(32, ObjectHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(
+ 0, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
+ heap->CollectAllGarbage(false);
+ CHECK_EQ(0, ObjectHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(
+ 32, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
+
+ // Check shrunk capacity.
+ CHECK_EQ(32, ObjectHashTable::cast(weakset->table())->Capacity());
+}
+
+
+// Test that weak set values on an evacuation candidate which are not reachable
+// by other paths are correctly recorded in the slots buffer.
+TEST(WeakSet_Regress2060a) {
+ FLAG_always_compact = true;
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ Handle<JSFunction> function =
+ factory->NewFunction(factory->function_string(), factory->null_value());
+ Handle<JSObject> key = factory->NewJSObject(function);
+ Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
+
+ // Start second old-space page so that values land on evacuation candidate.
+ Page* first_page = heap->old_pointer_space()->anchor()->next_page();
+ factory->NewFixedArray(900 * KB / kPointerSize, TENURED);
+
+ // Fill up weak set with values on an evacuation candidate.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < 32; i++) {
+ Handle<JSObject> object = factory->NewJSObject(function, TENURED);
+ CHECK(!heap->InNewSpace(object->address()));
+ CHECK(!first_page->Contains(object->address()));
+ PutIntoWeakSet(weakset, key, object);
+ }
+ }
+
+ // Force compacting garbage collection.
+ CHECK(FLAG_always_compact);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+}
+
+
+// Test that weak set keys on an evacuation candidate which are reachable by
+// other strong paths are correctly recorded in the slots buffer.
+TEST(WeakSet_Regress2060b) {
+ FLAG_always_compact = true;
+#ifdef VERIFY_HEAP
+ FLAG_verify_heap = true;
+#endif
+
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ Handle<JSFunction> function =
+ factory->NewFunction(factory->function_string(), factory->null_value());
+
+ // Start second old-space page so that keys land on evacuation candidate.
+ Page* first_page = heap->old_pointer_space()->anchor()->next_page();
+ factory->NewFixedArray(900 * KB / kPointerSize, TENURED);
+
+ // Fill up weak set with keys on an evacuation candidate.
+ Handle<JSObject> keys[32];
+ for (int i = 0; i < 32; i++) {
+ keys[i] = factory->NewJSObject(function, TENURED);
+ CHECK(!heap->InNewSpace(keys[i]->address()));
+ CHECK(!first_page->Contains(keys[i]->address()));
+ }
+ Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
+ for (int i = 0; i < 32; i++) {
+ PutIntoWeakSet(weakset,
+ keys[i],
+ Handle<Smi>(Smi::FromInt(i), isolate));
+ }
+
+ // Force compacting garbage collection. The subsequent collections are used
+ // to verify that key references were actually updated.
+ CHECK(FLAG_always_compact);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+}
diff --git a/deps/v8/test/intl/assert.js b/deps/v8/test/intl/assert.js
new file mode 100644
index 0000000000..3180e6f96e
--- /dev/null
+++ b/deps/v8/test/intl/assert.js
@@ -0,0 +1,184 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Some methods are taken from v8/test/mjsunit/mjsunit.js
+
+/**
+ * Compares two objects for key/value equality.
+ * Returns true if they are equal, false otherwise.
+ */
+function deepObjectEquals(a, b) {
+ var aProps = Object.keys(a);
+ aProps.sort();
+ var bProps = Object.keys(b);
+ bProps.sort();
+ if (!deepEquals(aProps, bProps)) {
+ return false;
+ }
+ for (var i = 0; i < aProps.length; i++) {
+ if (!deepEquals(a[aProps[i]], b[aProps[i]])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+/**
+ * Compares two JavaScript values for type and value equality.
+ * It checks internals of arrays and objects.
+ */
+function deepEquals(a, b) {
+ if (a === b) {
+ // Check for -0.
+ if (a === 0) return (1 / a) === (1 / b);
+ return true;
+ }
+ if (typeof a != typeof b) return false;
+ if (typeof a == 'number') return isNaN(a) && isNaN(b);
+ if (typeof a !== 'object' && typeof a !== 'function') return false;
+ // Neither a nor b is primitive.
+ var objectClass = classOf(a);
+ if (objectClass !== classOf(b)) return false;
+ if (objectClass === 'RegExp') {
+ // For RegExp, just compare pattern and flags using its toString.
+ return (a.toString() === b.toString());
+ }
+ // Functions are only identical to themselves.
+ if (objectClass === 'Function') return false;
+ if (objectClass === 'Array') {
+ var elementCount = 0;
+ if (a.length != b.length) {
+ return false;
+ }
+ for (var i = 0; i < a.length; i++) {
+ if (!deepEquals(a[i], b[i])) return false;
+ }
+ return true;
+ }
+ if (objectClass == 'String' || objectClass == 'Number' ||
+ objectClass == 'Boolean' || objectClass == 'Date') {
+ if (a.valueOf() !== b.valueOf()) return false;
+ }
+ return deepObjectEquals(a, b);
+}
+
+
+/**
+ * Throws an exception, and prints the values in case of error.
+ */
+function fail(expected, found) {
+ // TODO(cira): Replace String with PrettyPrint for objects and arrays.
+ var message = 'Failure: expected <' + String(expected) + '>, found <' +
+ String(found) + '>.';
+ throw new Error(message);
+}
+
+
+/**
+ * Throws if two variables have different types or values.
+ */
+function assertEquals(expected, found) {
+ if (!deepEquals(expected, found)) {
+ fail(expected, found);
+ }
+}
+
+
+/**
+ * Throws if value is false.
+ */
+function assertTrue(value) {
+ assertEquals(true, value)
+}
+
+
+/**
+ * Throws if value is true.
+ */
+function assertFalse(value) {
+ assertEquals(false, value);
+}
+
+
+/**
+ * Returns true if code throws specified exception.
+ */
+function assertThrows(code, type_opt, cause_opt) {
+ var threwException = true;
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ threwException = false;
+ } catch (e) {
+ if (typeof type_opt == 'function') {
+ assertInstanceof(e, type_opt);
+ }
+ if (arguments.length >= 3) {
+ assertEquals(e.type, cause_opt);
+ }
+ // Success.
+ return;
+ }
+ throw new Error("Did not throw exception");
+}
+
+
+/**
+ * Throws an exception if code throws.
+ */
+function assertDoesNotThrow(code, name_opt) {
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ } catch (e) {
+ fail("threw an exception: ", e.message || e, name_opt);
+ }
+}
+
+
+/**
+ * Throws if obj is not of given type.
+ */
+function assertInstanceof(obj, type) {
+ if (!(obj instanceof type)) {
+ var actualTypeName = null;
+ var actualConstructor = Object.prototypeOf(obj).constructor;
+ if (typeof actualConstructor == "function") {
+ actualTypeName = actualConstructor.name || String(actualConstructor);
+ }
+ throw new Error('Object <' + obj + '> is not an instance of <' +
+ (type.name || type) + '>' +
+ (actualTypeName ? ' but of < ' + actualTypeName + '>' : ''));
+ }
+}
diff --git a/deps/v8/test/intl/break-iterator/default-locale.js b/deps/v8/test/intl/break-iterator/default-locale.js
new file mode 100644
index 0000000000..39a88574fe
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/default-locale.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Constructing BreakIterator with no locale arguments or with []
+// creates one with default locale.
+
+var iterator = new Intl.v8BreakIterator([]);
+
+var options = iterator.resolvedOptions();
+
+// Check it's none of these first.
+assertFalse(options.locale === 'und');
+assertFalse(options.locale === '');
+assertFalse(options.locale === undefined);
+
+// Then check for equality.
+assertEquals(options.locale, getDefaultLocale());
+
+var iteratorNone = new Intl.v8BreakIterator();
+assertEquals(options.locale, iteratorNone.resolvedOptions().locale);
+
+// TODO(cira): remove support for {} to mean empty list.
+var iteratorBraket = new Intl.v8BreakIterator({});
+assertEquals(options.locale, iteratorBraket.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/break-iterator/en-break.js b/deps/v8/test/intl/break-iterator/en-break.js
new file mode 100644
index 0000000000..7268a101e7
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/en-break.js
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Segment plain English sentence and check results.
+
+var iterator = new Intl.v8BreakIterator(['en']);
+
+var textToSegment = 'Jack and Jill, went over hill, and got lost. Alert!';
+iterator.adoptText(textToSegment);
+
+var slices = [];
+var types = [];
+var pos = iterator.first();
+while (pos !== -1) {
+ var nextPos = iterator.next();
+ if (nextPos === -1) break;
+
+ slices.push(textToSegment.slice(pos, nextPos));
+ types.push(iterator.breakType());
+
+ pos = nextPos;
+}
+
+assertEquals('Jack', slices[0]);
+assertEquals(' ', slices[1]);
+assertEquals('and', slices[2]);
+assertEquals(' ', slices[3]);
+assertEquals('Jill', slices[4]);
+assertEquals(',', slices[5]);
+assertEquals('!', slices[slices.length - 1]);
+
+assertEquals('letter', types[0]);
+assertEquals('none', types[1]);
+assertEquals('letter', types[2]);
+assertEquals('none', types[3]);
+assertEquals('letter', types[4]);
+assertEquals('none', types[types.length - 1]);
diff --git a/deps/v8/test/intl/break-iterator/property-override.js b/deps/v8/test/intl/break-iterator/property-override.js
new file mode 100644
index 0000000000..49bd86fa6e
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/property-override.js
@@ -0,0 +1,64 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+//
+// Update both break-iterator.js and break-iterator.cc so they have the same
+// list of properties.
+
+// First get supported properties.
+var properties = [];
+var options = Intl.v8BreakIterator().resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+var expectedProperties = [
+ 'type', 'locale'
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.v8BreakIterator().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/break-iterator/protected-icu-internals.js b/deps/v8/test/intl/break-iterator/protected-icu-internals.js
new file mode 100644
index 0000000000..ad1dc54fbe
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/protected-icu-internals.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Internal object we got from native code should not be writable,
+// configurable or enumerable. One can still change its public properties, but
+// we don't use them to do actual work.
+
+var iterator = new Intl.v8BreakIterator([]);
+
+// Direct write should fail.
+iterator.iterator = {'zzz':'some random object'};
+
+assertFalse(iterator.iterator.hasOwnProperty('zzz'));
+
+// Try redefining the property.
+var didThrow = false;
+try {
+ Object.defineProperty(iterator, 'iterator', {value: undefined});
+} catch(e) {
+ didThrow = true;
+}
+assertTrue(didThrow);
+
+// Try deleting the property.
+assertFalse(delete iterator.iterator);
diff --git a/deps/v8/test/intl/break-iterator/resolved-options-is-method.js b/deps/v8/test/intl/break-iterator/resolved-options-is-method.js
new file mode 100644
index 0000000000..0e9e06c2f2
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/resolved-options-is-method.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that resolvedOptions is a method, not a property getter and that
+// the result is mutable.
+
+var iterator = new Intl.v8BreakIterator();
+
+var result = iterator.resolvedOptions();
+
+assertTrue(result instanceof Object);
+
+// Result should be mutable.
+result.locale = 'xx';
+
+assertEquals(result.locale, 'xx');
diff --git a/deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js b/deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js
new file mode 100644
index 0000000000..56457b4829
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Passing a well formed but unsupported locale falls back to default.
+
+var iterator = Intl.v8BreakIterator(['xx']);
+
+assertEquals(iterator.resolvedOptions().locale, getDefaultLocale());
diff --git a/deps/v8/test/intl/break-iterator/zh-break.js b/deps/v8/test/intl/break-iterator/zh-break.js
new file mode 100644
index 0000000000..c8434b10c2
--- /dev/null
+++ b/deps/v8/test/intl/break-iterator/zh-break.js
@@ -0,0 +1,63 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Segment plain Chinese sentence and check results.
+
+var iterator = new Intl.v8BreakIterator(['zh']);
+
+var textToSegment = '\u56FD\u52A1\u9662\u5173\u4E8E\u300A\u571F\u5730' +
+ '\u623F\u5C4B\u7BA1\u7406\u6761\u4F8B\u300B';
+iterator.adoptText(textToSegment);
+
+var slices = [];
+var types = [];
+var pos = iterator.first();
+while (pos !== -1) {
+ var nextPos = iterator.next();
+ if (nextPos === -1) break;
+
+ slices.push(textToSegment.slice(pos, nextPos));
+ types.push(iterator.breakType());
+
+ pos = nextPos;
+}
+
+assertEquals('\u56FD\u52A1\u9662', slices[0]);
+assertEquals('\u5173\u4E8E', slices[1]);
+assertEquals('\u300A', slices[2]);
+assertEquals('\u571F\u5730', slices[3]);
+assertEquals('\u623F\u5C4B', slices[4]);
+assertEquals('\u7BA1\u7406', slices[5]);
+assertEquals('\u6761\u4F8B', slices[6]);
+assertEquals('\u300B', slices[7]);
+
+assertEquals('ideo', types[0]);
+assertEquals('ideo', types[1]);
+assertEquals('none', types[2]);
+assertEquals('ideo', types[3]);
+assertEquals('ideo', types[4]);
+assertEquals('none', types[types.length - 1]);
diff --git a/deps/v8/test/intl/collator/de-sort.js b/deps/v8/test/intl/collator/de-sort.js
new file mode 100644
index 0000000000..278b9492d3
--- /dev/null
+++ b/deps/v8/test/intl/collator/de-sort.js
@@ -0,0 +1,44 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Sort plain German text using defaults.
+
+var strings = ['März', 'Fuße', 'FUSSE', 'Fluße', 'Flusse',
+ 'flusse', 'fluße', 'flüße', 'flüsse'];
+
+var collator = Intl.Collator(['de']);
+var result = strings.sort(collator.compare);
+
+assertEquals('flusse', result[0]);
+assertEquals('Flusse', result[1]);
+assertEquals('fluße', result[2]);
+assertEquals('Fluße', result[3]);
+assertEquals('flüsse', result[4]);
+assertEquals('flüße', result[5]);
+assertEquals('FUSSE', result[6]);
+assertEquals('Fuße', result[7]);
+assertEquals('März', result[8]);
diff --git a/deps/v8/test/intl/collator/default-locale.js b/deps/v8/test/intl/collator/default-locale.js
new file mode 100644
index 0000000000..f6ffba8e1d
--- /dev/null
+++ b/deps/v8/test/intl/collator/default-locale.js
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Constructing Collator with no locale arguments or with []
+// creates one with default locale.
+
+var collator = new Intl.Collator([]);
+
+var options = collator.resolvedOptions();
+
+// Check it's none of these first.
+assertFalse(options.locale === 'und');
+assertFalse(options.locale === '');
+assertFalse(options.locale === undefined);
+
+// Then check for equality.
+assertEquals(options.locale, getDefaultLocale());
+
+var collatorNone = new Intl.Collator();
+assertEquals(options.locale, collatorNone.resolvedOptions().locale);
+
+// TODO(cira): remove support for {} to mean empty list.
+var collatorBraket = new Intl.Collator({});
+assertEquals(options.locale, collatorBraket.resolvedOptions().locale);
+
+var collatorWithOptions = new Intl.Collator(undefined, {usage: 'search'});
+assertEquals(getDefaultLocale() + '-u-co-search',
+ collatorWithOptions.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/collator/en-sort.js b/deps/v8/test/intl/collator/en-sort.js
new file mode 100644
index 0000000000..24adc773eb
--- /dev/null
+++ b/deps/v8/test/intl/collator/en-sort.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Sort plain English text using defaults.
+
+var strings = ['blood', 'bull', 'ascend', 'zed', 'down'];
+
+var collator = Intl.Collator(['en']);
+var result = strings.sort(collator.compare);
+
+assertEquals('ascend', result[0]);
+assertEquals('blood', result[1]);
+assertEquals('bull', result[2]);
+assertEquals('down', result[3]);
+assertEquals('zed', result[4]);
diff --git a/deps/v8/test/intl/collator/normalization.js b/deps/v8/test/intl/collator/normalization.js
new file mode 100644
index 0000000000..8238f235a8
--- /dev/null
+++ b/deps/v8/test/intl/collator/normalization.js
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure normalization is always on, and normalization flag is ignored.
+
+// We need a character with two combining marks, from two different classes,
+// to make ICU fail comparison without normalization (upper, lower accent).
+// We will just switch order of combining characters to try to induce failure.
+
+// FYI, this one wouldn't work, since both accents are from the same class:
+// http://unicode.org/cldr/utility/character.jsp?a=01DF
+
+// See http://demo.icu-project.org/icu-bin/nbrowser?t=&s=1E09&uv=0 and
+// http://unicode.org/cldr/utility/character.jsp?a=1E09 for character details.
+var toCompare = ['\u0063\u0327\u0301', '\u0063\u0301\u0327'];
+
+// Try with normalization off (as an option).
+var collator = Intl.Collator([], {normalization: false});
+// If we accepted normalization parameter, this would have failed.
+assertEquals(0, collator.compare(toCompare[0], toCompare[1]));
+assertFalse(collator.resolvedOptions().hasOwnProperty('normalization'));
+
+// Try with normalization off (as Unicode extension).
+collator = Intl.Collator(['de-u-kk-false']);
+// If we accepted normalization parameter, this would have failed.
+assertEquals(0, collator.compare(toCompare[0], toCompare[1]));
+assertFalse(collator.resolvedOptions().hasOwnProperty('normalization'));
+
+// Normalization is on by default.
+collator = Intl.Collator();
+assertEquals(0, collator.compare(toCompare[0], toCompare[1]));
+assertFalse(collator.resolvedOptions().hasOwnProperty('normalization'));
diff --git a/deps/v8/test/intl/collator/property-override.js b/deps/v8/test/intl/collator/property-override.js
new file mode 100644
index 0000000000..bed4d7773d
--- /dev/null
+++ b/deps/v8/test/intl/collator/property-override.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+//
+// Update both collator.js and collator.cc so they have the same list of
+// properties.
+
+// First get supported properties.
+var properties = [];
+var options = Intl.Collator().resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+var expectedProperties = [
+ 'caseFirst', 'sensitivity', 'ignorePunctuation',
+ 'locale', 'numeric', 'usage', 'collation'
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.Collator().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/collator/protected-icu-internals.js b/deps/v8/test/intl/collator/protected-icu-internals.js
new file mode 100644
index 0000000000..7acd35e454
--- /dev/null
+++ b/deps/v8/test/intl/collator/protected-icu-internals.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Internal object we got from native code should not be writable,
+// configurable or enumerable. One can still change its public properties, but
+// we don't use them to do actual work.
+
+var collator = new Intl.Collator([]);
+
+// Direct write should fail.
+collator.collator = {'zzz':'some random object'};
+
+assertFalse(collator.collator.hasOwnProperty('zzz'));
+
+// Try redefining the property.
+var didThrow = false;
+try {
+ Object.defineProperty(collator, 'collator', {value: undefined});
+} catch(e) {
+ didThrow = true;
+}
+assertTrue(didThrow);
+
+// Try deleting the property.
+assertFalse(delete collator.collator);
diff --git a/deps/v8/test/intl/collator/resolved-options-is-method.js b/deps/v8/test/intl/collator/resolved-options-is-method.js
new file mode 100644
index 0000000000..e7c874f0af
--- /dev/null
+++ b/deps/v8/test/intl/collator/resolved-options-is-method.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that resolvedOptions is a method, not a property getter and that
+// the result is mutable.
+
+var collator = new Intl.Collator();
+
+var result = collator.resolvedOptions();
+
+assertTrue(result instanceof Object);
+
+// Result should be mutable.
+result.locale = 'xx';
+
+assertEquals(result.locale, 'xx');
diff --git a/deps/v8/test/intl/collator/sr-sort.js b/deps/v8/test/intl/collator/sr-sort.js
new file mode 100644
index 0000000000..53c784ce0d
--- /dev/null
+++ b/deps/v8/test/intl/collator/sr-sort.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Sort plain Serbian text using defaults.
+
+var strings = ['новине', 'ограда', 'жирафа', 'Никола', 'Андрија',
+ 'Стара Планина', 'џак', 'алав', 'ћук', 'чука'];
+
+var collator = Intl.Collator(['sr']);
+var result = strings.sort(collator.compare);
+
+assertEquals('алав', result[0]);
+assertEquals('Андрија', result[1]);
+assertEquals('жирафа', result[2]);
+assertEquals('Никола', result[3]);
+assertEquals('новине', result[4]);
+assertEquals('ограда', result[5]);
+assertEquals('Стара Планина', result[6]);
+assertEquals('ћук', result[7]);
+assertEquals('чука', result[8]);
+assertEquals('џак', result[9]);
+
diff --git a/deps/v8/test/intl/collator/wellformed-unsupported-locale.js b/deps/v8/test/intl/collator/wellformed-unsupported-locale.js
new file mode 100644
index 0000000000..ea143fdc63
--- /dev/null
+++ b/deps/v8/test/intl/collator/wellformed-unsupported-locale.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Passing a well formed but unsupported locale falls back to default.
+
+var collator = Intl.Collator(['xx']);
+
+assertEquals(collator.resolvedOptions().locale, getDefaultLocale());
diff --git a/deps/v8/test/intl/date-format/default-locale.js b/deps/v8/test/intl/date-format/default-locale.js
new file mode 100644
index 0000000000..2dcb0f8ae7
--- /dev/null
+++ b/deps/v8/test/intl/date-format/default-locale.js
@@ -0,0 +1,44 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Constructing DateTimeFormat with no locale arguments or with []
+// creates one with default locale.
+
+var dtf = new Intl.DateTimeFormat([]);
+
+var options = dtf.resolvedOptions();
+
+// Check it's none of these first.
+assertFalse(options.locale === 'und');
+assertFalse(options.locale === '');
+assertFalse(options.locale === undefined);
+
+// Then check for equality.
+assertEquals(options.locale, getDefaultLocale());
+
+var dtfNone = new Intl.DateTimeFormat();
+assertEquals(options.locale, dtfNone.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/date-format/format-is-bound.js b/deps/v8/test/intl/date-format/format-is-bound.js
new file mode 100644
index 0000000000..b744b65b91
--- /dev/null
+++ b/deps/v8/test/intl/date-format/format-is-bound.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Create default DateTimeFormat.
+var dtf = new Intl.DateTimeFormat();
+
+// Array we want to iterate, actual dates are not important.
+var dateArray = [Date.now(), Date.now(), Date.now()];
+
+// It shouldn't throw.
+// The format() method should be properly bound to the dtf object.
+dateArray.forEach(dtf.format);
+
+// Formatting a date should work in a direct call.
+dtf.format();
diff --git a/deps/v8/test/intl/date-format/format-test.js b/deps/v8/test/intl/date-format/format-test.js
new file mode 100644
index 0000000000..9817c97ed9
--- /dev/null
+++ b/deps/v8/test/intl/date-format/format-test.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test formatting method with specified date, invalid input.
+
+var dtf = new Intl.DateTimeFormat('en-US', {timeZone: 'UTC'});
+
+var someDate = dtf.format(144313200000);
+assertEquals('7/29/1974', someDate);
+
+var invalidValues = [NaN, Infinity, -Infinity];
+invalidValues.forEach(function(value) {
+ var error;
+ try {
+ dtf.format(value);
+ } catch (e) {
+ error = e;
+ }
+
+ assertTrue(error !== undefined);
+ assertEquals('RangeError', error.name);
+});
diff --git a/deps/v8/test/intl/date-format/parse-MMMdy.js b/deps/v8/test/intl/date-format/parse-MMMdy.js
new file mode 100644
index 0000000000..7136527810
--- /dev/null
+++ b/deps/v8/test/intl/date-format/parse-MMMdy.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Testing v8Parse method for date and time pattern.
+// Month is represented as a short name.
+
+var dtf = new Intl.DateTimeFormat(['en'],
+ {year: 'numeric', month: 'short',
+ day: 'numeric'});
+
+// Make sure we have pattern we expect (may change in the future).
+assertEquals('MMM d, y', dtf.resolved.pattern);
+
+assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('May 4, 1974'))));
+
+// Missing , in the pattern.
+assertEquals(undefined, dtf.v8Parse('May 4 1974'));
+
+// Extra "th" after 4 in the pattern.
+assertEquals(undefined, dtf.v8Parse('May 4th, 1974'));
+
+// Wrong pattern.
+assertEquals(undefined, dtf.v8Parse('5/4/1974'));
diff --git a/deps/v8/test/intl/date-format/parse-invalid-input.js b/deps/v8/test/intl/date-format/parse-invalid-input.js
new file mode 100644
index 0000000000..ab0b889ff5
--- /dev/null
+++ b/deps/v8/test/intl/date-format/parse-invalid-input.js
@@ -0,0 +1,35 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Invalid input is handled properly.
+
+var dtf = new Intl.DateTimeFormat(['en']);
+
+assertEquals(undefined, dtf.v8Parse(''));
+assertEquals(undefined, dtf.v8Parse('A'));
+assertEquals(undefined, dtf.v8Parse(5));
+assertEquals(undefined, dtf.v8Parse(new Date()));
diff --git a/deps/v8/test/intl/date-format/parse-mdy.js b/deps/v8/test/intl/date-format/parse-mdy.js
new file mode 100644
index 0000000000..e767a0b2d2
--- /dev/null
+++ b/deps/v8/test/intl/date-format/parse-mdy.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Testing v8Parse method for date only.
+
+var dtf = new Intl.DateTimeFormat(['en']);
+
+// Make sure we have pattern we expect (may change in the future).
+assertEquals('M/d/y', dtf.resolved.pattern);
+
+assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('5/4/74'))));
+assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('05/04/74'))));
+assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('5/04/74'))));
+assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('5/4/1974'))));
+
+// Month is numeric, so it fails on "May".
+assertEquals(undefined, dtf.v8Parse('May 4th 1974'));
+
+// Time is ignored from the input, since the pattern doesn't have it.
+assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('5/4/74 12:30:12'))));
diff --git a/deps/v8/test/intl/date-format/parse-mdyhms.js b/deps/v8/test/intl/date-format/parse-mdyhms.js
new file mode 100644
index 0000000000..74f7467f3d
--- /dev/null
+++ b/deps/v8/test/intl/date-format/parse-mdyhms.js
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Testing v8Parse method for date and time pattern.
+
+var dtf = new Intl.DateTimeFormat(['en'],
+ {year: 'numeric', month: 'numeric',
+ day: 'numeric', hour: 'numeric',
+ minute: 'numeric', second: 'numeric'});
+
+// Make sure we have pattern we expect (may change in the future).
+assertEquals('M/d/y h:mm:ss a', dtf.resolved.pattern);
+
+assertEquals('Sat May 04 1974 12:30:12 GMT-0007 (PDT)',
+ usePDT(String(dtf.v8Parse('5/4/74 12:30:12 pm'))));
+
+// AM/PM were not specified.
+assertEquals(undefined, dtf.v8Parse('5/4/74 12:30:12'));
+
+// Time was not specified.
+assertEquals(undefined, dtf.v8Parse('5/4/74'));
+
+// Month is numeric, so it fails on "May".
+assertEquals(undefined, dtf.v8Parse('May 4th 1974'));
+
+// Wrong date delimiter.
+assertEquals(undefined, dtf.v8Parse('5-4-74 12:30:12 am'));
diff --git a/deps/v8/test/intl/date-format/property-override.js b/deps/v8/test/intl/date-format/property-override.js
new file mode 100644
index 0000000000..a2bc2d9a30
--- /dev/null
+++ b/deps/v8/test/intl/date-format/property-override.js
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+//
+// Update both date-format.js and date-format.cc so they have the same list of
+// properties.
+
+// First get supported properties.
+// Some of the properties are optional, so we request them.
+var properties = [];
+var options = Intl.DateTimeFormat(
+ 'en-US', {weekday: 'short', era: 'short', year: 'numeric', month: 'short',
+ day: 'numeric', hour: 'numeric', minute: 'numeric',
+ second: 'numeric', timeZoneName: 'short'}).resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+var expectedProperties = [
+ 'calendar', 'day', 'era', 'hour12', 'hour', 'locale',
+ 'minute', 'month', 'numberingSystem',
+ 'second', 'timeZone', 'timeZoneName', 'weekday', 'year'
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.DateTimeFormat().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/date-format/protected-icu-internals.js b/deps/v8/test/intl/date-format/protected-icu-internals.js
new file mode 100644
index 0000000000..140f4b594d
--- /dev/null
+++ b/deps/v8/test/intl/date-format/protected-icu-internals.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Internal object we got from native code should not be writable,
+// configurable or enumerable. One can still change its public properties, but
+// we don't use them to do actual work.
+
+var format = new Intl.DateTimeFormat([]);
+
+// Direct write should fail.
+format.formatter = {'zzz':'some random object'};
+
+assertFalse(format.formatter.hasOwnProperty('zzz'));
+
+// Try redefining the property.
+var didThrow = false;
+try {
+ Object.defineProperty(format, 'formatter', {value: undefined});
+} catch(e) {
+ didThrow = true;
+}
+assertTrue(didThrow);
+
+// Try deleting the property.
+assertFalse(delete format.formatter);
diff --git a/deps/v8/test/intl/date-format/resolved-options-is-method.js b/deps/v8/test/intl/date-format/resolved-options-is-method.js
new file mode 100644
index 0000000000..0c44778384
--- /dev/null
+++ b/deps/v8/test/intl/date-format/resolved-options-is-method.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that resolvedOptions is a method, not a property getter and that
+// the result is mutable.
+
+var dtf = new Intl.DateTimeFormat();
+
+var result = dtf.resolvedOptions();
+
+assertTrue(result instanceof Object);
+
+// Result should be mutable.
+result.locale = 'xx';
+
+assertEquals(result.locale, 'xx');
diff --git a/deps/v8/test/intl/date-format/resolved-options.js b/deps/v8/test/intl/date-format/resolved-options.js
new file mode 100644
index 0000000000..707eb07a98
--- /dev/null
+++ b/deps/v8/test/intl/date-format/resolved-options.js
@@ -0,0 +1,107 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test if resolvedOptions() returns expected fields/values.
+
+// Default (year, month, day) formatter.
+var dtfDefault = Intl.DateTimeFormat('en-US');
+var resolved = dtfDefault.resolvedOptions();
+
+assertTrue(resolved.hasOwnProperty('locale'));
+assertEquals('en-US', resolved.locale);
+assertTrue(resolved.hasOwnProperty('numberingSystem'));
+assertEquals('latn', resolved.numberingSystem);
+assertTrue(resolved.hasOwnProperty('calendar'));
+assertEquals('gregory', resolved.calendar);
+assertTrue(resolved.hasOwnProperty('timeZone'));
+assertEquals(getDefaultTimeZone(), resolved.timeZone);
+// These are in by default.
+assertTrue(resolved.hasOwnProperty('year'));
+assertEquals('numeric', resolved.year);
+assertTrue(resolved.hasOwnProperty('month'));
+assertEquals('numeric', resolved.month);
+assertTrue(resolved.hasOwnProperty('day'));
+assertEquals('numeric', resolved.day);
+// These shouldn't be in by default.
+assertFalse(resolved.hasOwnProperty('era'));
+assertFalse(resolved.hasOwnProperty('timeZoneName'));
+assertFalse(resolved.hasOwnProperty('weekday'));
+assertFalse(resolved.hasOwnProperty('hour12'));
+assertFalse(resolved.hasOwnProperty('hour'));
+assertFalse(resolved.hasOwnProperty('minute'));
+assertFalse(resolved.hasOwnProperty('second'));
+
+// Time formatter.
+var dtfTime = Intl.DateTimeFormat(
+ 'sr-RS', {hour: 'numeric', minute: 'numeric', second: 'numeric'});
+resolved = dtfTime.resolvedOptions();
+
+assertTrue(resolved.hasOwnProperty('locale'));
+assertTrue(resolved.hasOwnProperty('numberingSystem'));
+assertTrue(resolved.hasOwnProperty('calendar'));
+assertTrue(resolved.hasOwnProperty('timeZone'));
+assertTrue(resolved.hasOwnProperty('hour12'));
+assertEquals(false, resolved.hour12);
+assertTrue(resolved.hasOwnProperty('hour'));
+assertEquals('2-digit', resolved.hour);
+assertTrue(resolved.hasOwnProperty('minute'));
+assertEquals('2-digit', resolved.minute);
+assertTrue(resolved.hasOwnProperty('second'));
+assertEquals('2-digit', resolved.second);
+// Didn't ask for them.
+assertFalse(resolved.hasOwnProperty('year'));
+assertFalse(resolved.hasOwnProperty('month'));
+assertFalse(resolved.hasOwnProperty('day'));
+assertFalse(resolved.hasOwnProperty('era'));
+assertFalse(resolved.hasOwnProperty('timeZoneName'));
+assertFalse(resolved.hasOwnProperty('weekday'));
+
+// Full formatter.
+var dtfFull = Intl.DateTimeFormat(
+ 'en-US', {weekday: 'short', era: 'short', year: 'numeric', month: 'short',
+ day: 'numeric', hour: 'numeric', minute: 'numeric',
+ second: 'numeric', timeZoneName: 'short', timeZone: 'UTC'});
+resolved = dtfFull.resolvedOptions();
+
+assertTrue(resolved.hasOwnProperty('locale'));
+assertTrue(resolved.hasOwnProperty('numberingSystem'));
+assertTrue(resolved.hasOwnProperty('calendar'));
+assertTrue(resolved.hasOwnProperty('timeZone'));
+assertTrue(resolved.hasOwnProperty('hour12'));
+assertEquals(true, resolved.hour12);
+assertTrue(resolved.hasOwnProperty('hour'));
+assertTrue(resolved.hasOwnProperty('minute'));
+assertTrue(resolved.hasOwnProperty('second'));
+assertTrue(resolved.hasOwnProperty('year'));
+assertTrue(resolved.hasOwnProperty('month'));
+assertTrue(resolved.hasOwnProperty('day'));
+assertTrue(resolved.hasOwnProperty('era'));
+assertEquals('short', resolved.era);
+assertTrue(resolved.hasOwnProperty('timeZoneName'));
+assertEquals('short', resolved.timeZoneName);
+assertTrue(resolved.hasOwnProperty('weekday'));
+assertEquals('short', resolved.weekday);
diff --git a/deps/v8/test/intl/date-format/timezone.js b/deps/v8/test/intl/date-format/timezone.js
new file mode 100644
index 0000000000..03e25f0574
--- /dev/null
+++ b/deps/v8/test/intl/date-format/timezone.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests time zone support.
+
+var df = Intl.DateTimeFormat();
+assertEquals(getDefaultTimeZone(), df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'UtC'});
+assertEquals('UTC', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'gmt'});
+assertEquals('UTC', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'America/Los_Angeles'});
+assertEquals('America/Los_Angeles', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'Europe/Belgrade'});
+assertEquals('Europe/Belgrade', df.resolvedOptions().timeZone);
+
+// Check Etc/XXX variants. They should work too.
+df = Intl.DateTimeFormat(undefined, {timeZone: 'Etc/UTC'});
+assertEquals('UTC', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'Etc/GMT'});
+assertEquals('UTC', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'euRope/beLGRade'});
+assertEquals('Europe/Belgrade', df.resolvedOptions().timeZone);
+
+// : + - are not allowed, only / _ are.
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT+07:00\'})');
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT+0700\'})');
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT-05:00\'})');
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT-0500\'})');
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'Etc/GMT+0\'})');
+assertThrows('Intl.DateTimeFormat(undefined, ' +
+ '{timeZone: \'America/Los-Angeles\'})');
+
+// Throws for unsupported time zones.
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'Aurope/Belgrade\'})');
diff --git a/deps/v8/test/intl/date-format/utils.js b/deps/v8/test/intl/date-format/utils.js
new file mode 100644
index 0000000000..535de15e9a
--- /dev/null
+++ b/deps/v8/test/intl/date-format/utils.js
@@ -0,0 +1,36 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility methods for date testing.
+
+/**
+ * Returns date with timezone info forced into PDT.
+ */
+function usePDT(dateString) {
+ var removedTZ = dateString.replace(/(\+|-)\d{4}/, '-0007');
+ return removedTZ.replace(/\(.*?\)/, '(PDT)');
+}
diff --git a/deps/v8/test/intl/date-format/wellformed-unsupported-locale.js b/deps/v8/test/intl/date-format/wellformed-unsupported-locale.js
new file mode 100644
index 0000000000..8867ec6442
--- /dev/null
+++ b/deps/v8/test/intl/date-format/wellformed-unsupported-locale.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Passing a well formed but unsupported locale falls back to default.
+
+var dtf = Intl.DateTimeFormat(['xx']);
+
+assertEquals(dtf.resolvedOptions().locale, getDefaultLocale());
diff --git a/deps/v8/test/intl/general/empty-handle.js b/deps/v8/test/intl/general/empty-handle.js
new file mode 100644
index 0000000000..d61896381c
--- /dev/null
+++ b/deps/v8/test/intl/general/empty-handle.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Minimal test case for b/161999.
+// We have to check if ObjectTemplate::NewInstance returned empty handle, which
+// can happen if there was a stack overflow.
+// This test can take some time to fail.
+
+var didThrowRangeError = false;
+try {
+ var X = '})()';
+ function C() { X[C("asd".localeCompare("asdf") < 0)] = C("a"); }
+ var b = C(C(new Date(Number.b, "").getTime()),
+ function() {
+ if (!X.C()) {
+ }
+ }[0].b++);
+} catch (e) {
+ if (e instanceof RangeError) {
+ didThrowRangeError = true;
+ }
+}
+
+assertTrue(didThrowRangeError);
diff --git a/deps/v8/test/intl/general/mapped-locale.js b/deps/v8/test/intl/general/mapped-locale.js
new file mode 100644
index 0000000000..17151b557f
--- /dev/null
+++ b/deps/v8/test/intl/general/mapped-locale.js
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure that zh locales map properly, i.e. don't map zh-TW to zh.
+
+var nf = Intl.NumberFormat(['zh-TW'], {localeMatcher: 'lookup'});
+assertEquals('zh-TW', nf.resolvedOptions().locale);
+
+var nf = Intl.NumberFormat(['zh-Hant-TW'], {localeMatcher: 'lookup'});
+assertEquals('zh-Hant-TW', nf.resolvedOptions().locale);
+
+var nf = Intl.NumberFormat(['zh-Hant'], {localeMatcher: 'lookup'});
+assertEquals('zh-Hant', nf.resolvedOptions().locale);
+
+nf = Intl.NumberFormat(['zh'], {localeMatcher: 'lookup'});
+assertEquals('zh', nf.resolvedOptions().locale);
+
+nf = Intl.NumberFormat(['zh-CN'], {localeMatcher: 'lookup'});
+assertEquals('zh-CN', nf.resolvedOptions().locale);
+
+nf = Intl.NumberFormat(['zh-Hans-CN'], {localeMatcher: 'lookup'});
+assertEquals('zh-Hans-CN', nf.resolvedOptions().locale);
+
+nf = Intl.NumberFormat(['zh-Hans'], {localeMatcher: 'lookup'});
+assertEquals('zh-Hans', nf.resolvedOptions().locale);
+
+nf = Intl.NumberFormat(['en-US'], {localeMatcher: 'lookup'});
+assertEquals('en-US', nf.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/general/supported-locales-of.js b/deps/v8/test/intl/general/supported-locales-of.js
new file mode 100644
index 0000000000..016b4e9e5d
--- /dev/null
+++ b/deps/v8/test/intl/general/supported-locales-of.js
@@ -0,0 +1,43 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests supportedLocalesOf method.
+
+var undef = Intl.DateTimeFormat.supportedLocalesOf();
+assertEquals(undefined, undef[0]);
+
+var empty = Intl.DateTimeFormat.supportedLocalesOf([]);
+assertEquals(undefined, empty[0]);
+
+var strLocale = Intl.DateTimeFormat.supportedLocalesOf('sr');
+assertEquals('sr', strLocale[0]);
+
+var multiLocale =
+ Intl.DateTimeFormat.supportedLocalesOf(['sr-Thai-RS', 'de', 'zh-CN']);
+assertEquals('sr-Thai-RS', multiLocale[0]);
+assertEquals('de', multiLocale[1]);
+assertEquals('zh-CN', multiLocale[2]);
diff --git a/deps/v8/test/intl/general/v8Intl-exists.js b/deps/v8/test/intl/general/v8Intl-exists.js
new file mode 100644
index 0000000000..610767e376
--- /dev/null
+++ b/deps/v8/test/intl/general/v8Intl-exists.js
@@ -0,0 +1,36 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure that v8Intl is mapped into Intl for backward compatibility.
+
+assertEquals(v8Intl, Intl);
+
+// Extra checks.
+assertTrue(v8Intl.hasOwnProperty('DateTimeFormat'));
+assertTrue(v8Intl.hasOwnProperty('NumberFormat'));
+assertTrue(v8Intl.hasOwnProperty('Collator'));
+assertTrue(v8Intl.hasOwnProperty('v8BreakIterator'));
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
new file mode 100644
index 0000000000..913626b1f4
--- /dev/null
+++ b/deps/v8/test/intl/intl.status
@@ -0,0 +1,41 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+prefix intl
+
+# The following tests use getDefaultLocale() or getDefaultTimezone().
+break-iterator/default-locale: FAIL
+break-iterator/wellformed-unsupported-locale: FAIL
+collator/default-locale: FAIL
+collator/wellformed-unsupported-locale: FAIL
+date-format/default-locale: FAIL
+date-format/resolved-options: FAIL
+date-format/timezone: FAIL
+date-format/wellformed-unsupported-locale: FAIL
+general/v8Intl-exists: FAIL
+number-format/default-locale: FAIL
+number-format/wellformed-unsupported-locale: FAIL
diff --git a/deps/v8/test/intl/number-format/check-digit-ranges.js b/deps/v8/test/intl/number-format/check-digit-ranges.js
new file mode 100644
index 0000000000..322785cbf9
--- /dev/null
+++ b/deps/v8/test/intl/number-format/check-digit-ranges.js
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Digit ranges are obeyed.
+
+// Invalid ranges
+assertThrows('Intl.NumberFormat(undefined, {minimumIntegerDigits: 0})');
+assertThrows('Intl.NumberFormat(undefined, {minimumIntegerDigits: 22})');
+assertThrows('Intl.NumberFormat(undefined, {minimumIntegerDigits: null})');
+assertThrows('Intl.NumberFormat(undefined, {minimumIntegerDigits: Infinity})');
+assertThrows('Intl.NumberFormat(undefined, {minimumIntegerDigits: -Infinity})');
+assertThrows('Intl.NumberFormat(undefined, {minimumIntegerDigits: x})');
+
+assertThrows('Intl.NumberFormat(undefined, {minimumFractionDigits: -1})');
+assertThrows('Intl.NumberFormat(undefined, {maximumFractionDigits: 21})');
+
+assertThrows('Intl.NumberFormat(undefined, {minimumSignificantDigits: 0})');
+assertThrows('Intl.NumberFormat(undefined, {maximumSignificantDigits: 22})');
+assertThrows('Intl.NumberFormat(undefined, ' +
+ '{minimumSignificantDigits: 5, maximumSignificantDigits: 2})');
+
+// Valid ranges
+assertDoesNotThrow('Intl.NumberFormat(undefined, {minimumIntegerDigits: 1})');
+assertDoesNotThrow('Intl.NumberFormat(undefined, {minimumIntegerDigits: 21})');
+
+assertDoesNotThrow('Intl.NumberFormat(undefined, {minimumFractionDigits: 0})');
+assertDoesNotThrow('Intl.NumberFormat(undefined, {minimumFractionDigits: 20})');
+
+assertDoesNotThrow('Intl.NumberFormat(undefined, ' +
+ '{minimumSignificantDigits: 1})');
+assertDoesNotThrow('Intl.NumberFormat(undefined, ' +
+ '{maximumSignificantDigits: 21})');
diff --git a/deps/v8/test/intl/number-format/default-locale.js b/deps/v8/test/intl/number-format/default-locale.js
new file mode 100644
index 0000000000..0d5e24dd70
--- /dev/null
+++ b/deps/v8/test/intl/number-format/default-locale.js
@@ -0,0 +1,44 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Constructing NumberFormat with no locale arguments or with []
+// creates one with default locale.
+
+var nf = new Intl.NumberFormat([]);
+
+var options = nf.resolvedOptions();
+
+// Check it's none of these first.
+assertFalse(options.locale === 'und');
+assertFalse(options.locale === '');
+assertFalse(options.locale === undefined);
+
+// Then check for equality.
+assertEquals(options.locale, getDefaultLocale());
+
+var nfNone = new Intl.NumberFormat();
+assertEquals(options.locale, nfNone.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/number-format/format-is-bound.js b/deps/v8/test/intl/number-format/format-is-bound.js
new file mode 100644
index 0000000000..b24c2ed0ca
--- /dev/null
+++ b/deps/v8/test/intl/number-format/format-is-bound.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Create default NumberFormat.
+var nf = new Intl.NumberFormat();
+
+// Array we want to iterate, actual numbers are not important.
+var numberArray = [1, 2, 3];
+
+// It shouldn't throw.
+// The format() method should be properly bound to the nf object.
+numberArray.forEach(nf.format);
+
+// Formatting a number should work in a direct call.
+nf.format(12345);
diff --git a/deps/v8/test/intl/number-format/parse-currency.js b/deps/v8/test/intl/number-format/parse-currency.js
new file mode 100644
index 0000000000..c87ffea0d2
--- /dev/null
+++ b/deps/v8/test/intl/number-format/parse-currency.js
@@ -0,0 +1,33 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Currency parsing is not yet supported. We need ICU49 or higher to get
+// it working.
+
+var nf = new Intl.NumberFormat(['en'], {style: 'currency', currency: 'USD'});
+
+assertEquals(undefined, nf.v8Parse('USD 123.43'));
diff --git a/deps/v8/test/intl/number-format/parse-decimal.js b/deps/v8/test/intl/number-format/parse-decimal.js
new file mode 100644
index 0000000000..ea3f8ddf30
--- /dev/null
+++ b/deps/v8/test/intl/number-format/parse-decimal.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var nf = new Intl.NumberFormat(['en'], {style: 'decimal'});
+
+assertEquals(123.43, nf.v8Parse('123.43'));
+assertEquals(123, nf.v8Parse('123'));
+assertEquals(NaN, nf.v8Parse(NaN));
+assertEquals(12323, nf.v8Parse('123,23'));
+assertEquals(12323.456, nf.v8Parse('123,23.456'));
+assertEquals(12323.456, nf.v8Parse('0000000123,23.456'));
+assertEquals(-12323.456, nf.v8Parse('-123,23.456'));
+
+// Scientific notation gets ignored.
+assertEquals(123.456, nf.v8Parse('123.456e-3'));
diff --git a/deps/v8/test/intl/number-format/parse-invalid-input.js b/deps/v8/test/intl/number-format/parse-invalid-input.js
new file mode 100644
index 0000000000..8c84d0b87e
--- /dev/null
+++ b/deps/v8/test/intl/number-format/parse-invalid-input.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Invalid input is handled properly.
+
+var nf = new Intl.NumberFormat(['en']);
+
+assertEquals(undefined, nf.v8Parse(''));
+assertEquals(undefined, nf.v8Parse('A'));
+assertEquals(undefined, nf.v8Parse(new Date()));
+assertEquals(undefined, nf.v8Parse(undefined));
+assertEquals(undefined, nf.v8Parse(null));
+assertEquals(undefined, nf.v8Parse());
+assertEquals(undefined, nf.v8Parse('Text before 12345'));
diff --git a/deps/v8/test/intl/number-format/parse-percent.js b/deps/v8/test/intl/number-format/parse-percent.js
new file mode 100644
index 0000000000..4964da4ae7
--- /dev/null
+++ b/deps/v8/test/intl/number-format/parse-percent.js
@@ -0,0 +1,36 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var nf = new Intl.NumberFormat(['en'], {style: 'percent'});
+
+assertEquals(1.2343, nf.v8Parse('123.43%'));
+assertEquals(1.23, nf.v8Parse('123%'));
+assertEquals(NaN, nf.v8Parse(NaN));
+assertEquals(123.23, nf.v8Parse('123,23%'));
+assertEquals(123.23456, nf.v8Parse('123,23.456%'));
+assertEquals(123.23456, nf.v8Parse('0000000123,23.456%'));
+assertEquals(-123.23456, nf.v8Parse('-123,23.456%'));
diff --git a/deps/v8/test/intl/number-format/property-override.js b/deps/v8/test/intl/number-format/property-override.js
new file mode 100644
index 0000000000..1fbe78e080
--- /dev/null
+++ b/deps/v8/test/intl/number-format/property-override.js
@@ -0,0 +1,78 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+//
+// Update both number-format.js and number-format.cc so they have the same
+// list of properties.
+
+// First get supported properties.
+var properties = [];
+// Some properties are optional and won't show up in resolvedOptions if
+// they were not requested - currency, currencyDisplay,
+// minimumSignificantDigits and maximumSignificantDigits - so we request them.
+var options = Intl.NumberFormat(
+ undefined, {style: 'currency', currency: 'USD', currencyDisplay: 'name',
+ minimumSignificantDigits: 1, maximumSignificantDigits: 5}).
+ resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+var expectedProperties = [
+ 'style', 'locale', 'numberingSystem',
+ 'currency', 'currencyDisplay', 'useGrouping',
+ 'minimumIntegerDigits', 'minimumFractionDigits',
+ 'maximumFractionDigits', 'minimumSignificantDigits',
+ 'maximumSignificantDigits'
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.NumberFormat(undefined,
+ {currency: 'USD', currencyDisplay: 'name',
+ minimumIntegerDigits: 2,
+ numberingSystem: 'latn'}).
+ resolvedOptions().locale;
diff --git a/deps/v8/test/intl/number-format/protected-icu-internals.js b/deps/v8/test/intl/number-format/protected-icu-internals.js
new file mode 100644
index 0000000000..fc9b709c82
--- /dev/null
+++ b/deps/v8/test/intl/number-format/protected-icu-internals.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Internal object we got from native code should not be writable,
+// configurable or enumerable. One can still change its public properties, but
+// we don't use them to do actual work.
+
+var format = new Intl.NumberFormat([]);
+
+// Direct write should fail.
+format.formatter = {'zzz':'some random object'};
+
+assertFalse(format.formatter.hasOwnProperty('zzz'));
+
+// Try redefining the property.
+var didThrow = false;
+try {
+ Object.defineProperty(format, 'formatter', {value: undefined});
+} catch(e) {
+ didThrow = true;
+}
+assertTrue(didThrow);
+
+// Try deleting the property.
+assertFalse(delete format.formatter);
diff --git a/deps/v8/test/intl/number-format/resolved-options-is-method.js b/deps/v8/test/intl/number-format/resolved-options-is-method.js
new file mode 100644
index 0000000000..1b56716f79
--- /dev/null
+++ b/deps/v8/test/intl/number-format/resolved-options-is-method.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that resolvedOptions is a method, not a property getter and that
+// the result is mutable.
+
+var nf = new Intl.NumberFormat();
+
+var result = nf.resolvedOptions();
+
+assertTrue(result instanceof Object);
+
+// Result should be mutable.
+result.locale = 'xx';
+
+assertEquals(result.locale, 'xx');
diff --git a/deps/v8/test/intl/number-format/wellformed-unsupported-locale.js b/deps/v8/test/intl/number-format/wellformed-unsupported-locale.js
new file mode 100644
index 0000000000..e3fe9cc087
--- /dev/null
+++ b/deps/v8/test/intl/number-format/wellformed-unsupported-locale.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Passing a well formed but unsupported locale falls back to default.
+
+var nf = Intl.NumberFormat(['xx']);
+
+assertEquals(nf.resolvedOptions().locale, getDefaultLocale());
diff --git a/deps/v8/test/intl/overrides/caching.js b/deps/v8/test/intl/overrides/caching.js
new file mode 100644
index 0000000000..5ff3c390e7
--- /dev/null
+++ b/deps/v8/test/intl/overrides/caching.js
@@ -0,0 +1,60 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Performance test for overriden methods. Makes sure that default case
+// is faster (cached) than the general case.
+
+// Default, cached.
+var startTime = new Date();
+for (var i = 0; i < 1000; i++) {
+ 'a'.localeCompare('c');
+}
+var endTime = new Date();
+var cachedTime = endTime.getTime() - startTime.getTime();
+
+// Not cached.
+startTime = new Date();
+for (var i = 0; i < 1000; i++) {
+ 'a'.localeCompare('c', 'sr');
+}
+endTime = new Date();
+var nonCachedTime = endTime.getTime() - startTime.getTime();
+
+// Using collator. Faster than default, but not by much.
+var collator = Intl.Collator();
+startTime = new Date();
+for (var i = 0; i < 1000; i++) {
+ collator.compare('a', 'c');
+}
+endTime = new Date();
+collatorTime = endTime.getTime() - startTime.getTime();
+
+// Difference is within 20%.
+assertTrue(collatorTime < cachedTime);
+// Non-cached time is much slower, measured to 12.5 times.
+assertTrue(cachedTime < nonCachedTime);
+
diff --git a/deps/v8/test/intl/overrides/date.js b/deps/v8/test/intl/overrides/date.js
new file mode 100644
index 0000000000..a35d63d131
--- /dev/null
+++ b/deps/v8/test/intl/overrides/date.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests Date.prototype.toLocaleXXXString method overrides.
+
+var date = new Date();
+
+
+// Defaults for toLocaleXXXString
+var dtfDate = new Intl.DateTimeFormat();
+var dtfTime = new Intl.DateTimeFormat(
+ [], {hour: 'numeric', minute: 'numeric', second: 'numeric'});
+var dtfAll = new Intl.DateTimeFormat(
+ [], {year: 'numeric', month: 'numeric', day: 'numeric',
+ hour: 'numeric', minute: 'numeric', second: 'numeric'});
+assertEquals(dtfAll.format(date), date.toLocaleString());
+assertEquals(dtfDate.format(date), date.toLocaleDateString());
+assertEquals(dtfTime.format(date), date.toLocaleTimeString());
+
+
+// Specify locale, default options for toLocaleXXXString
+var locale = ['sr'];
+dtfDate = new Intl.DateTimeFormat(locale);
+dtfTime = new Intl.DateTimeFormat(
+ locale, {hour: 'numeric', minute: 'numeric', second: 'numeric'});
+dtfAll = new Intl.DateTimeFormat(
+ locale, {year: 'numeric', month: 'numeric', day: 'numeric',
+ hour: 'numeric', minute: 'numeric', second: 'numeric'});
+assertEquals(dtfAll.format(date), date.toLocaleString(locale));
+assertEquals(dtfDate.format(date), date.toLocaleDateString(locale));
+assertEquals(dtfTime.format(date), date.toLocaleTimeString(locale));
+
+
+// Specify locale and options for toLocaleXXXString
+locale = ['ko'];
+var options = {year: 'numeric', month: 'long', day: 'numeric',
+ hour: 'numeric', minute: '2-digit', second: 'numeric'};
+var dtf = new Intl.DateTimeFormat(locale, options);
+assertEquals(dtf.format(date), date.toLocaleString(locale, options));
+assertEquals(dtf.format(date), date.toLocaleDateString(locale, options));
+assertEquals(dtf.format(date), date.toLocaleTimeString(locale, options));
diff --git a/deps/v8/test/intl/overrides/number.js b/deps/v8/test/intl/overrides/number.js
new file mode 100644
index 0000000000..11f41fd7ec
--- /dev/null
+++ b/deps/v8/test/intl/overrides/number.js
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests Number.prototype.toLocaleString method override.
+
+var integer = 123456790;
+var float = 1234567890.123434;
+
+
+// Defaults
+var nf = new Intl.NumberFormat();
+assertEquals(nf.format(integer), integer.toLocaleString());
+assertEquals(nf.format(float), float.toLocaleString());
+
+
+// Specify locale, default options for toLocaleString method.
+var locale = ['sr'];
+nf = new Intl.NumberFormat(locale);
+assertEquals(nf.format(integer), integer.toLocaleString(locale));
+assertEquals(nf.format(float), float.toLocaleString(locale));
+
+
+// Specify locale and options for toLocaleString method.
+locale = ['ko'];
+var options = {minimumIntegerDigits: 8, useGroupingSeparator: true,
+ minimumFractionalDigits: 1, maximumFractionalDigits: 2};
+nf = new Intl.NumberFormat(locale, options);
+assertEquals(nf.format(integer), integer.toLocaleString(locale, options));
+assertEquals(nf.format(float), float.toLocaleString(locale, options));
diff --git a/deps/v8/test/intl/overrides/security.js b/deps/v8/test/intl/overrides/security.js
new file mode 100644
index 0000000000..e012753fa2
--- /dev/null
+++ b/deps/v8/test/intl/overrides/security.js
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we always use original Intl.Constructors for toLocaleString calls.
+
+function throwError() {
+ throw new Error('Malicious method invoked.');
+}
+
+Intl.Collator = Intl.NumberFormat = Intl.DateTimeFormat = throwError;
+
+Intl.Collator.prototype.compare = throwError;
+Intl.NumberFormat.prototype.format = throwError;
+Intl.DateTimeFormat.prototype.format = throwError;
+
+// Make sure constructors actually throw now.
+assertThrows('new Intl.Collator()');
+assertThrows('new Intl.NumberFormat()');
+assertThrows('new Intl.DateTimeFormat()');
+
+// None of these should throw.
+assertDoesNotThrow('new Date().toLocaleString()');
+assertDoesNotThrow('new Date().toLocaleDateString()');
+assertDoesNotThrow('new Date().toLocaleTimeString()');
+assertDoesNotThrow('new Number(12345.412).toLocaleString()');
+assertDoesNotThrow('new String(\'abc\').localeCompare(\'bcd\')');
diff --git a/deps/v8/test/intl/overrides/string.js b/deps/v8/test/intl/overrides/string.js
new file mode 100644
index 0000000000..9e9da4da1d
--- /dev/null
+++ b/deps/v8/test/intl/overrides/string.js
@@ -0,0 +1,69 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests String.prototype.localeCompare method override.
+
+var testData = {
+ 'en': ['blood', 'bull', 'ascend', 'zed', 'down'],
+ 'sr': ['новине', 'ограда', 'жирафа', 'Никола', 'Андрија', 'Стара Планина',
+ 'џак', 'алав', 'ћук', 'чука'],
+ 'de': ['März', 'Fuße', 'FUSSE', 'Fluße', 'Flusse', 'flusse', 'fluße',
+ 'flüße', 'flüsse']
+};
+
+
+function testArrays(locale) {
+ var data;
+ if (locale === undefined) {
+ data = testData['en'];
+ locale = [];
+ } else {
+ data = testData[locale];
+ }
+
+ var collator = new Intl.Collator(locale, options);
+ var collatorResult = data.sort(collator.compare);
+ var localeCompareResult = data.sort(function(a, b) {
+ return a.localeCompare(b, locale, options)
+ });
+ assertEquals(collatorResult, localeCompareResult);
+}
+
+
+// Defaults
+var options = undefined;
+testArrays();
+
+
+// Specify locale, keep default options.
+options = undefined;
+Object.keys(testData).forEach(testArrays);
+
+
+// Specify locale and options.
+options = {caseFirst: 'upper'};
+Object.keys(testData).forEach(testArrays);
diff --git a/deps/v8/test/intl/overrides/webkit-tests.js b/deps/v8/test/intl/overrides/webkit-tests.js
new file mode 100644
index 0000000000..1429de6217
--- /dev/null
+++ b/deps/v8/test/intl/overrides/webkit-tests.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Makes sure we don't break affected WebKit tests.
+
+// Handles fast/js/string-prototype-properties.html
+assertThrows('String.prototype.localeCompare.call(undefined, \'1224\')');
+assertEquals(0, String.prototype.localeCompare.call(1224, '1224'));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
new file mode 100644
index 0000000000..d25683bed2
--- /dev/null
+++ b/deps/v8/test/intl/testcfg.py
@@ -0,0 +1,72 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+class IntlTestSuite(testsuite.TestSuite):
+
+ def __init__(self, name, root):
+ super(IntlTestSuite, self).__init__(name, root)
+
+ def ListTests(self, context):
+ tests = []
+ for dirname, dirs, files in os.walk(self.root):
+ for dotted in [x for x in dirs if x.startswith('.')]:
+ dirs.remove(dotted)
+ dirs.sort()
+ files.sort()
+ for filename in files:
+ if (filename.endswith(".js") and filename != "assert.js" and
+ filename != "utils.js"):
+ testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
+ test = testcase.TestCase(self, testname)
+ tests.append(test)
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ flags = [] + context.mode_flags
+
+ files = []
+ files.append(os.path.join(self.root, "assert.js"))
+ files.append(os.path.join(self.root, "utils.js"))
+ files.append(os.path.join(self.root, "date-format", "utils.js"))
+ files.append(os.path.join(self.root, testcase.path + self.suffix()))
+
+ flags += files
+ if context.isolates:
+ flags.append("--isolate")
+ flags += files
+
+ return testcase.flags + flags
+
+
+def GetSuite(name, root):
+ return IntlTestSuite(name, root)
diff --git a/deps/v8/test/intl/utils.js b/deps/v8/test/intl/utils.js
new file mode 100644
index 0000000000..917359077d
--- /dev/null
+++ b/deps/v8/test/intl/utils.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * Taints properties on Object.prototype so we can find security issues.
+ */
+function taintProperties(properties) {
+ properties.forEach(function(property) {
+ Object.defineProperty(Object.prototype, property, {
+ set: function(value) {
+ throw new Error('Property ' + property + ' is compromised. ' +
+ 'Setting value: ' + value);
+ }
+ });
+ });
+}
diff --git a/deps/v8/test/mjsunit/allocation-folding.js b/deps/v8/test/mjsunit/allocation-folding.js
new file mode 100644
index 0000000000..a730bf12a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/allocation-folding.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse-osr
+function f() {
+ var elem1 = [1,2,3];
+ for (var i=0; i < 100000; i++) {
+ var bar = [1];
+ }
+ var elem2 = [1,2,3];
+ return elem2;
+}
+
+f(); f(); f();
+%OptimizeFunctionOnNextCall(f);
+var result = f();
+
+for (var i=0; i < 100000; i++) {
+ var bar = [1];
+}
+
+assertEquals(result[2], 3);
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index 86c28aac63..5f6817b6d3 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -133,9 +133,7 @@ if (support_smi_only_arrays) {
obj = fastliteralcase(get_standard_literal(), 1.5);
assertKind(elements_kind.fast_double, obj);
obj = fastliteralcase(get_standard_literal(), 2);
- // TODO(hpayer): bring the following assert back as soon as allocation
- // sites work again for fast literals
- //assertKind(elements_kind.fast_double, obj);
+ assertKind(elements_kind.fast_double, obj);
// The test below is in a loop because arrays that live
// at global scope without the chance of being recreated
@@ -175,9 +173,21 @@ if (support_smi_only_arrays) {
obj = fastliteralcase_smifast("carter");
assertKind(elements_kind.fast, obj);
obj = fastliteralcase_smifast(2);
- // TODO(hpayer): bring the following assert back as soon as allocation
- // sites work again for fast literals
- //assertKind(elements_kind.fast, obj);
+ assertKind(elements_kind.fast, obj);
+
+ // Case: make sure transitions from packed to holey are tracked
+ function fastliteralcase_smiholey(index, value) {
+ var literal = [1, 2, 3, 4];
+ literal[index] = value;
+ return literal;
+ }
+
+ obj = fastliteralcase_smiholey(5, 1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ assertHoley(obj);
+ obj = fastliteralcase_smiholey(0, 1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ assertHoley(obj);
function newarraycase_smidouble(value) {
var a = new Array();
@@ -272,6 +282,32 @@ if (support_smi_only_arrays) {
obj = newarraycase_list_smiobj(2);
assertKind(elements_kind.fast, obj);
+ // Case: array constructor calls with out of date feedback.
+ // The boilerplate should incorporate all feedback, but the input array
+ // should be minimally transitioned based on immediate need.
+ (function() {
+ function foo(i) {
+ // We have two cases, one for literals one for constructed arrays.
+ var a = (i == 0)
+ ? [1, 2, 3]
+ : new Array(1, 2, 3);
+ return a;
+ }
+
+ for (i = 0; i < 2; i++) {
+ a = foo(i);
+ b = foo(i);
+ b[5] = 1; // boilerplate goes holey
+ assertHoley(foo(i));
+ a[0] = 3.5; // boilerplate goes holey double
+ assertKind(elements_kind.fast_double, a);
+ assertNotHoley(a);
+ c = foo(i);
+ assertKind(elements_kind.fast_double, c);
+ assertHoley(c);
+ }
+ })();
+
function newarraycase_onearg(len, value) {
var a = new Array(len);
a[0] = value;
@@ -301,17 +337,34 @@ if (support_smi_only_arrays) {
assertTrue(new type(1,2,3) instanceof type);
}
+ function instanceof_check2(type) {
+ assertTrue(new type() instanceof type);
+ assertTrue(new type(5) instanceof type);
+ assertTrue(new type(1,2,3) instanceof type);
+ }
+
var realmBArray = Realm.eval(realmB, "Array");
instanceof_check(Array);
instanceof_check(realmBArray);
+
+ // instanceof_check2 is here because the call site goes through a state.
+ // Since instanceof_check(Array) was first called with the current context
+ // Array function, it went from (uninit->Array) then (Array->megamorphic).
+ // We'll get a different state traversal if we start with realmBArray.
+ // It'll go (uninit->realmBArray) then (realmBArray->megamorphic). Recognize
+ // that state "Array" implies an AllocationSite is present, and code is
+ // configured to use it.
+ instanceof_check2(realmBArray);
+ instanceof_check2(Array);
+
%OptimizeFunctionOnNextCall(instanceof_check);
// No de-opt will occur because HCallNewArray wasn't selected, on account of
// the call site not being monomorphic to Array.
instanceof_check(Array);
- assertTrue(2 != %GetOptimizationStatus(instanceof_check));
+ assertOptimized(instanceof_check);
instanceof_check(realmBArray);
- assertTrue(2 != %GetOptimizationStatus(instanceof_check));
+ assertOptimized(instanceof_check);
// Try to optimize again, but first clear all type feedback, and allow it
// to be monomorphic on first call. Only after crankshafting do we introduce
@@ -322,8 +375,8 @@ if (support_smi_only_arrays) {
instanceof_check(Array);
%OptimizeFunctionOnNextCall(instanceof_check);
instanceof_check(Array);
- assertTrue(2 != %GetOptimizationStatus(instanceof_check));
+ assertOptimized(instanceof_check);
instanceof_check(realmBArray);
- assertTrue(1 != %GetOptimizationStatus(instanceof_check));
+ assertUnoptimized(instanceof_check);
}
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index 8ed7901d43..7b089eed3d 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -105,7 +105,7 @@ test_base(dictionary_map_array, 5, false);
test_base(dictionary_map_array, 6, false);
%OptimizeFunctionOnNextCall(test_base);
test_base(dictionary_map_array, -2, true);
-assertTrue(%GetOptimizationStatus(test_base) != 1);
+assertUnoptimized(test_base);
// Forget about the dictionary_map_array's map.
%ClearFunctionTypeFeedback(test_base);
@@ -116,7 +116,7 @@ test_base(a, 5, false);
test_base(a, 6, false);
%OptimizeFunctionOnNextCall(test_base);
test_base(a, 2048, true);
-assertTrue(%GetOptimizationStatus(test_base) != 1);
+assertUnoptimized(test_base);
function test_minus(base,cond) {
a[base - 1] = 1;
@@ -173,7 +173,7 @@ short_test(short_a, 50);
%OptimizeFunctionOnNextCall(short_test);
short_a.length = 10;
short_test(short_a, 0);
-assertTrue(%GetOptimizationStatus(short_test) != 1);
+assertUnoptimized(test_base);
// A test for when we would modify a phi index.
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index e29e769465..72ff12c08f 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -35,6 +35,11 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
+// Reset the GC stress mode to be off. Needed because AllocationMementos only
+// live for one gc, so a gc that happens in certain fragile areas of the test
+// can break assumptions.
+%SetFlags("--gc-interval=-1")
+
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
@@ -115,10 +120,10 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(bar0);
b = bar0(Array);
assertKind(elements_kind.fast_double, b);
- assertTrue(2 != %GetOptimizationStatus(bar0));
+ assertOptimized(bar0);
// bar0 should deopt
b = bar0(Object);
- assertTrue(1 != %GetOptimizationStatus(bar0));
+ assertUnoptimized(bar0)
// When it's re-optimized, we should call through the full stub
bar0(Array);
%OptimizeFunctionOnNextCall(bar0);
@@ -126,7 +131,7 @@ if (support_smi_only_arrays) {
// We also lost our ability to record kind feedback, as the site
// is megamorphic now.
assertKind(elements_kind.fast_smi_only, b);
- assertTrue(2 != %GetOptimizationStatus(bar0));
+ assertOptimized(bar0);
b[0] = 3.5;
c = bar0(Array);
assertKind(elements_kind.fast_smi_only, c);
@@ -146,15 +151,15 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(bar);
a = bar(10);
assertKind(elements_kind.fast, a);
- assertTrue(2 != %GetOptimizationStatus(bar));
+ assertOptimized(bar);
// The stub bails out, but the method call should be fine.
a = bar(100000);
- assertTrue(2 != %GetOptimizationStatus(bar));
+ assertOptimized(bar);
assertKind(elements_kind.dictionary, a);
// If the argument isn't a smi, it bails out as well
a = bar("oops");
- assertTrue(2 != %GetOptimizationStatus(bar));
+ assertOptimized(bar);
assertKind(elements_kind.fast, a);
function barn(one, two, three) {
@@ -165,11 +170,11 @@ if (support_smi_only_arrays) {
barn(1, 2, 3);
%OptimizeFunctionOnNextCall(barn);
barn(1, 2, 3);
- assertTrue(2 != %GetOptimizationStatus(barn));
+ assertOptimized(barn);
a = barn(1, "oops", 3);
// The stub should bail out but the method should remain optimized.
assertKind(elements_kind.fast, a);
- assertTrue(2 != %GetOptimizationStatus(barn));
+ assertOptimized(barn);
})();
@@ -186,12 +191,12 @@ if (support_smi_only_arrays) {
b = bar();
// This only makes sense to test if we allow crankshafting
if (4 != %GetOptimizationStatus(bar)) {
- assertTrue(2 != %GetOptimizationStatus(bar));
+ assertOptimized(bar);
%DebugPrint(3);
b[0] = 3.5;
c = bar();
assertKind(elements_kind.fast_smi_only, c);
- assertTrue(2 != %GetOptimizationStatus(bar));
+ assertOptimized(bar);
}
})();
diff --git a/deps/v8/test/mjsunit/array-feedback.js b/deps/v8/test/mjsunit/array-feedback.js
index d1b3062eb0..6b1cbb3f5f 100644
--- a/deps/v8/test/mjsunit/array-feedback.js
+++ b/deps/v8/test/mjsunit/array-feedback.js
@@ -35,6 +35,11 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
+// Reset the GC stress mode to be off. Needed because AllocationMementos only
+// live for one gc, so a gc that happens in certain fragile areas of the test
+// can break assumptions.
+%SetFlags("--gc-interval=-1")
+
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
@@ -187,7 +192,7 @@ if (support_smi_only_arrays) {
b[0] = 3.5;
c = create0();
assertKind(elements_kind.fast_double, c);
- assertTrue(2 != %GetOptimizationStatus(create0));
+ assertOptimized(create0);
}
})();
diff --git a/deps/v8/test/mjsunit/array-literal-feedback.js b/deps/v8/test/mjsunit/array-literal-feedback.js
index 8cc617e93f..3378394d90 100644
--- a/deps/v8/test/mjsunit/array-literal-feedback.js
+++ b/deps/v8/test/mjsunit/array-literal-feedback.js
@@ -55,7 +55,7 @@ if (support_smi_only_arrays) {
get_literal(3);
%OptimizeFunctionOnNextCall(get_literal);
a = get_literal(3);
- assertTrue(2 != %GetOptimizationStatus(get_literal));
+ assertOptimized(get_literal);
assertTrue(%HasFastSmiElements(a));
a[0] = 3.5;
@@ -64,12 +64,12 @@ if (support_smi_only_arrays) {
b = get_literal(3);
assertTrue(%HasFastDoubleElements(b));
assertEquals([1, 2, 3], b);
- assertTrue(1 != %GetOptimizationStatus(get_literal));
+ assertUnoptimized(get_literal);
// Optimize again
get_literal(3);
%OptimizeFunctionOnNextCall(get_literal);
b = get_literal(3);
assertTrue(%HasFastDoubleElements(b));
- assertTrue(2 != %GetOptimizationStatus(get_literal));
+ assertOptimized(get_literal);
}
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index d4c0c305fc..fab45ed720 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --noparallel-recompilation
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -144,11 +143,11 @@ if (support_smi_only_arrays) {
deopt_array(false);
%OptimizeFunctionOnNextCall(deopt_array);
var array = deopt_array(false);
- assertTrue(2 != %GetOptimizationStatus(deopt_array));
+ assertOptimized(deopt_array);
deopt_array(true);
- assertTrue(2 != %GetOptimizationStatus(deopt_array));
+ assertOptimized(deopt_array);
array = deopt_array(false);
- assertTrue(2 != %GetOptimizationStatus(deopt_array));
+ assertOptimized(deopt_array);
// Check that unexpected changes in the objects stored into the boilerplate
// also force a deopt.
@@ -166,13 +165,13 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(deopt_array_literal_all_smis);
array = deopt_array_literal_all_smis(5);
array = deopt_array_literal_all_smis(6);
- assertTrue(2 != %GetOptimizationStatus(deopt_array_literal_all_smis));
+ assertOptimized(deopt_array_literal_all_smis);
assertEquals(0, array[0]);
assertEquals(1, array[1]);
assertEquals(6, array[2]);
array = deopt_array_literal_all_smis(.5);
- assertTrue(1 != %GetOptimizationStatus(deopt_array_literal_all_smis));
+ assertUnoptimized(deopt_array_literal_all_smis);
assertEquals(0, array[0]);
assertEquals(1, array[1]);
assertEquals(.5, array[2]);
@@ -191,14 +190,14 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(deopt_array_literal_all_doubles);
array = deopt_array_literal_all_doubles(5);
array = deopt_array_literal_all_doubles(6);
- assertTrue(2 != %GetOptimizationStatus(deopt_array_literal_all_doubles));
+ assertOptimized(deopt_array_literal_all_doubles);
assertEquals(0.5, array[0]);
assertEquals(1, array[1]);
assertEquals(6, array[2]);
var foo = new Object();
array = deopt_array_literal_all_doubles(foo);
- assertTrue(1 != %GetOptimizationStatus(deopt_array_literal_all_doubles));
+ assertUnoptimized(deopt_array_literal_all_doubles);
assertEquals(0.5, array[0]);
assertEquals(1, array[1]);
assertEquals(foo, array[2]);
@@ -207,6 +206,6 @@ if (support_smi_only_arrays) {
(function literals_after_osr() {
var color = [0];
// Trigger OSR.
- while (%GetOptimizationStatus(literals_after_osr) == 2) {}
+ while (%GetOptimizationStatus(literals_after_osr, "no sync") == 2) {}
return [color[0]];
})();
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
index b3a7141096..04c2f73d7e 100644
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ b/deps/v8/test/mjsunit/array-natives-elements.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays
-// Flags: --noparallel-recompilation
// Flags: --notrack-allocation-sites
// Test element kind of objects.
diff --git a/deps/v8/test/mjsunit/assert-opt-and-deopt.js b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
index afba963fc7..bfd2f3f489 100644
--- a/deps/v8/test/mjsunit/assert-opt-and-deopt.js
+++ b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --noparallel-recompilation
+// Flags: --allow-natives-syntax
if (%IsParallelRecompilationSupported()) {
print("Parallel recompilation is turned on after all. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/bugs/bug-2758.js b/deps/v8/test/mjsunit/bugs/bug-2758.js
new file mode 100644
index 0000000000..ee78844400
--- /dev/null
+++ b/deps/v8/test/mjsunit/bugs/bug-2758.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var functions = [
+ function() { var f = [].concat; f() },
+ function() { var f = [].push; f() },
+ function() { var f = [].shift; f() },
+ function() { (0, [].concat)() },
+ function() { (0, [].push)() },
+ function() { (0, [].shift)() }
+]
+
+for (var i = 0; i < 5; ++i) {
+ for (var j in functions) {
+ print(functions[i])
+ assertThrows(functions[j], TypeError)
+ }
+
+ if (i === 3) {
+ for (var j in functions)
+ %OptimizeFunctionOnNextCall(functions[j]);
+ }
+}
diff --git a/deps/v8/test/mjsunit/compiler/inline-arguments.js b/deps/v8/test/mjsunit/compiler/inline-arguments.js
index 75d01b5df3..1337ab237a 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arguments.js
@@ -115,9 +115,9 @@ F4(1);
})();
// Test arguments access from the inlined function.
+%NeverOptimizeFunction(uninlinable);
function uninlinable(v) {
assertEquals(0, v);
- try { } catch (e) { }
return 0;
}
diff --git a/deps/v8/test/mjsunit/compiler/minus-zero.js b/deps/v8/test/mjsunit/compiler/minus-zero.js
new file mode 100644
index 0000000000..6efceb54e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/minus-zero.js
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function add(x, y) {
+ return x + y;
+}
+
+assertEquals(0, add(0, 0));
+assertEquals(0, add(0, 0));
+%OptimizeFunctionOnNextCall(add);
+assertEquals(-0, add(-0, -0));
diff --git a/deps/v8/test/mjsunit/compiler/parallel-proto-change.js b/deps/v8/test/mjsunit/compiler/parallel-proto-change.js
index 2392a37c95..25ea3b59df 100644
--- a/deps/v8/test/mjsunit/compiler/parallel-proto-change.js
+++ b/deps/v8/test/mjsunit/compiler/parallel-proto-change.js
@@ -33,10 +33,6 @@ if (!%IsParallelRecompilationSupported()) {
quit();
}
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
function f(foo) { return foo.bar(); }
var o = {};
@@ -45,11 +41,14 @@ o.__proto__ = { __proto__: { bar: function() { return 1; } } };
assertEquals(1, f(o));
assertEquals(1, f(o));
+// Mark for parallel optimization.
%OptimizeFunctionOnNextCall(f, "parallel");
-assertEquals(1, f(o)); // Trigger optimization.
-assertUnoptimized(f); // Optimization not yet done.
+// Trigger optimization in the parallel thread.
+assertEquals(1, f(o));
+// While parallel recompilation is running, optimization not yet done.
+assertUnoptimized(f, "no sync");
// Change the prototype chain during optimization to trigger map invalidation.
o.__proto__.__proto__ = { bar: function() { return 2; } };
-%CompleteOptimization(f); // Conclude optimization with...
-assertUnoptimized(f); // ... bailing out due to map dependency.
+// Optimization eventually bails out due to map dependency.
+assertUnoptimized(f, "sync");
assertEquals(2, f(o));
diff --git a/deps/v8/test/mjsunit/compiler/phi-representations.js b/deps/v8/test/mjsunit/compiler/phi-representations.js
new file mode 100644
index 0000000000..6d11bb0d8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/phi-representations.js
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function ar() {
+ var r = undefined;
+ var f = 1;
+ while (f--) {
+ r = (typeof r === 'undefined') ? 0.1 : r;
+ };
+ return (r - r);
+}
+
+assertEquals(0, ar());
+assertEquals(0, ar());
+%OptimizeFunctionOnNextCall(ar);
+assertEquals(0, ar());
+
+function ar2() {
+ var r = undefined;
+ var f = 1;
+ while (f--) {
+ r = r === undefined ? 0.1 : r;
+ };
+ return (r - r);
+}
+
+assertEquals(0, ar2());
+assertEquals(0, ar2());
+%OptimizeFunctionOnNextCall(ar2);
+assertEquals(0, ar2());
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 4c50e30d54..9e6b2c6306 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -34,7 +34,7 @@ function test(f) {
%OptimizeFunctionOnNextCall(f);
f();
// Assert that there has been no deopt.
- assertTrue(%GetOptimizationStatus(f) != 2);
+ assertOptimized(f);
}
test(function add() {
diff --git a/deps/v8/test/mjsunit/count-based-osr.js b/deps/v8/test/mjsunit/count-based-osr.js
index fbff91e4a2..5ce4dc5cc4 100644
--- a/deps/v8/test/mjsunit/count-based-osr.js
+++ b/deps/v8/test/mjsunit/count-based-osr.js
@@ -26,14 +26,14 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges
-// Flags: --allow-natives-syntax --noparallel-recompilation
+// Flags: --allow-natives-syntax
// Test that OSR works properly when using count-based interrupting/profiling.
function osr_this() {
var a = 1;
// Trigger OSR.
- while (%GetOptimizationStatus(osr_this) == 2) {}
+ while (%GetOptimizationStatus(osr_this, "no sync") == 2) {}
return a;
}
assertEquals(1, osr_this());
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index a1b7871d60..3d72032ab8 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -333,11 +333,10 @@ date.getTime();
date.getTime();
%OptimizeFunctionOnNextCall(Date.prototype.getTime);
assertThrows(function() { Date.prototype.getTime.call(""); }, TypeError);
-assertTrue(%GetOptimizationStatus(Date.prototype.getTime) != 1);
+assertUnoptimized(Date.prototype.getTime);
date.getYear();
date.getYear();
%OptimizeFunctionOnNextCall(Date.prototype.getYear);
assertThrows(function() { Date.prototype.getYear.call(""); }, TypeError);
-opt_status = %GetOptimizationStatus(Date.prototype.getYear);
-assertTrue(%GetOptimizationStatus(Date.prototype.getTime) != 1);
+assertUnoptimized(Date.prototype.getYear);
diff --git a/deps/v8/test/mjsunit/debug-break-inline.js b/deps/v8/test/mjsunit/debug-break-inline.js
index 464cb73637..4418fa8d1b 100644
--- a/deps/v8/test/mjsunit/debug-break-inline.js
+++ b/deps/v8/test/mjsunit/debug-break-inline.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --noparallel-recompilation
// This test tests that deoptimization due to debug breaks works for
// inlined functions where the full-code is generated before the
diff --git a/deps/v8/test/mjsunit/debug-evaluate-closure.js b/deps/v8/test/mjsunit/debug-evaluate-closure.js
new file mode 100644
index 0000000000..778defd0ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-closure.js
@@ -0,0 +1,91 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+Debug = debug.Debug;
+var listened = false;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals("goo", exec_state.frame(0).evaluate("goo").value());
+ exec_state.frame(0).evaluate("goo = 'goo foo'");
+ assertEquals("bar return", exec_state.frame(0).evaluate("bar()").value());
+ assertEquals("inner bar", exec_state.frame(0).evaluate("inner").value());
+ assertEquals("outer bar", exec_state.frame(0).evaluate("outer").value());
+ assertEquals("baz inner", exec_state.frame(0).evaluate("baz").value());
+ assertEquals("baz outer", exec_state.frame(1).evaluate("baz").value());
+ exec_state.frame(0).evaluate("w = 'w foo'");
+ exec_state.frame(0).evaluate("inner = 'inner foo'");
+ exec_state.frame(0).evaluate("outer = 'outer foo'");
+ exec_state.frame(0).evaluate("baz = 'baz inner foo'");
+ exec_state.frame(1).evaluate("baz = 'baz outer foo'");
+ listened = true;
+ } catch (e) {
+ print(e);
+ print(e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+var outer = "outer";
+var baz = "baz outer";
+
+function foo() {
+ var inner = "inner";
+ var baz = "baz inner";
+ var goo = "goo";
+ var withw = { w: "w" };
+ var withv = { v: "v" };
+
+ with (withv) {
+ var bar = function bar() {
+ assertEquals("goo foo", goo);
+ inner = "inner bar";
+ outer = "outer bar";
+ v = "v bar";
+ return "bar return";
+ };
+ }
+
+ with (withw) {
+ debugger;
+ }
+
+ assertEquals("inner foo", inner);
+ assertEquals("baz inner foo", baz);
+ assertEquals("w foo", withw.w);
+ assertEquals("v bar", withv.v);
+}
+
+foo();
+assertEquals("outer foo", outer);
+assertEquals("baz outer foo", baz);
+assertTrue(listened);
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index c456e6bf57..6e673f71c0 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --expose-gc --noparallel-recompilation
-// Flags: --send-idle-notification
+// Flags: --expose-debug-as debug --expose-gc --send-idle-notification
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/deopt-minus-zero.js b/deps/v8/test/mjsunit/deopt-minus-zero.js
index ee0983127d..835494cfcf 100644
--- a/deps/v8/test/mjsunit/deopt-minus-zero.js
+++ b/deps/v8/test/mjsunit/deopt-minus-zero.js
@@ -27,17 +27,6 @@
// Flags: --allow-natives-syntax --expose-gc
-/**
- * The possible optimization states of a function. Must be in sync with the
- * return values of Runtime_GetOptimizationStatus() in runtime.cc!
- */
-var OptimizationState = {
- YES: 1,
- NO: 2,
- ALWAYS: 3,
- NEVER: 4
-};
-
function mul (a, b) {
return a * b;
}
@@ -50,7 +39,5 @@ mul(0, -1);
%OptimizeFunctionOnNextCall(mul);
mul(0, -1);
-var raw_optimized = %GetOptimizationStatus(mul);
-assertFalse(raw_optimized == OptimizationState.NO);
+assertOptimized(mul);
gc();
-
diff --git a/deps/v8/test/mjsunit/double-truncation.js b/deps/v8/test/mjsunit/double-truncation.js
new file mode 100644
index 0000000000..b43e1e6c63
--- /dev/null
+++ b/deps/v8/test/mjsunit/double-truncation.js
@@ -0,0 +1,78 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function RunOneTruncationTest(a, b) {
+ var temp = a | 0;
+ assertEquals(b, temp);
+}
+
+function RunAllTruncationTests() {
+ RunOneTruncationTest(0, 0);
+ RunOneTruncationTest(0.5, 0);
+ RunOneTruncationTest(-0.5, 0);
+ RunOneTruncationTest(1.5, 1);
+ RunOneTruncationTest(-1.5, -1);
+ RunOneTruncationTest(5.5, 5);
+ RunOneTruncationTest(-5.0, -5);
+ RunOneTruncationTest(NaN, 0);
+ RunOneTruncationTest(Infinity, 0);
+ RunOneTruncationTest(-NaN, 0);
+ RunOneTruncationTest(-Infinity, 0);
+
+ RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(-4.5036e+15, -372629504);
+
+ RunOneTruncationTest(4503603922337791.0, -1);
+ RunOneTruncationTest(-4503603922337791.0, 1);
+ RunOneTruncationTest(4503601774854143.0, 2147483647);
+ RunOneTruncationTest(-4503601774854143.0, -2147483647);
+ RunOneTruncationTest(9007207844675582.0, -2);
+ RunOneTruncationTest(-9007207844675582.0, 2);
+
+ RunOneTruncationTest(2.4178527921507624e+24, -536870912);
+ RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
+ RunOneTruncationTest(2.417853945072267e+24, -536870912);
+ RunOneTruncationTest(-2.417853945072267e+24, 536870912);
+
+ RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
+ RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
+ RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
+ RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+
+ RunOneTruncationTest(9.6714111686030497e+24, -2147483648);
+ RunOneTruncationTest(-9.6714111686030497e+24, -2147483648);
+ RunOneTruncationTest(9.6714157802890681e+24, -2147483648);
+ RunOneTruncationTest(-9.6714157802890681e+24, -2147483648);
+}
+
+RunAllTruncationTests();
+RunAllTruncationTests();
+%OptimizeFunctionOnNextCall(RunOneTruncationTest);
+RunAllTruncationTests();
+RunAllTruncationTests();
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 247aa89747..442d756ae9 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -170,22 +170,22 @@ for (var i = 0; i < 3; i++) monomorphic(smi_only);
monomorphic(smi_only);
if (support_smi_only_arrays) {
+ %NeverOptimizeFunction(construct_smis);
function construct_smis() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = [0, 0, 0];
a[0] = 0; // Send the COW array map to the steak house.
assertKind(elements_kind.fast_smi_only, a);
return a;
}
+ %NeverOptimizeFunction(construct_doubles);
function construct_doubles() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = construct_smis();
a[0] = 1.5;
assertKind(elements_kind.fast_double, a);
return a;
}
+ %NeverOptimizeFunction(construct_objects);
function construct_objects() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = construct_smis();
a[0] = "one";
assertKind(elements_kind.fast, a);
@@ -193,8 +193,8 @@ if (support_smi_only_arrays) {
}
// Test crankshafted transition SMI->DOUBLE.
+ %NeverOptimizeFunction(convert_to_double);
function convert_to_double(array) {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
array[1] = 2.5;
assertKind(elements_kind.fast_double, array);
assertEquals(2.5, array[1]);
@@ -205,8 +205,8 @@ if (support_smi_only_arrays) {
smis = construct_smis();
convert_to_double(smis);
// Test crankshafted transitions SMI->FAST and DOUBLE->FAST.
+ %NeverOptimizeFunction(convert_to_fast);
function convert_to_fast(array) {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
array[1] = "two";
assertKind(elements_kind.fast, array);
assertEquals("two", array[1]);
@@ -222,8 +222,8 @@ if (support_smi_only_arrays) {
convert_to_fast(doubles);
// Test transition chain SMI->DOUBLE->FAST (crankshafted function will
// transition to FAST directly).
+ %NeverOptimizeFunction(convert_mixed);
function convert_mixed(array, value, kind) {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
array[1] = value;
assertKind(kind, array);
assertEquals(value, array[1]);
diff --git a/deps/v8/test/mjsunit/elements-transition-and-store.js b/deps/v8/test/mjsunit/elements-transition-and-store.js
new file mode 100644
index 0000000000..78ca597ba9
--- /dev/null
+++ b/deps/v8/test/mjsunit/elements-transition-and-store.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --compiled-transitions --notrack-allocation-sites
+
+function foo(a, v) {
+ a[0] = v;
+ return a;
+}
+
+for (var i = 0; i < 3; ++i) {
+ var a = Array();
+ a = foo(a, 1.5);
+ assertEquals(a[0], 1.5);
+ a = foo(a, 2);
+ assertEquals(a[0], 2);
+}
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index 40b25cd582..0295318f6a 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays --noparallel-recompilation
-// Flags: --notrack-allocation-sites
+// Flags: --allow-natives-syntax --smi-only-arrays --notrack-allocation-sites
// No tracking of allocation sites because it interfers with the semantics
// the test is trying to ensure.
@@ -63,7 +62,7 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(testDoubleConversion4);
testDoubleConversion4(new Array(5));
testDoubleConversion4(new Array(5));
- assertTrue(2 != %GetOptimizationStatus(testDoubleConversion4));
+ assertOptimized(testDoubleConversion4);
%ClearFunctionTypeFeedback(testDoubleConversion4);
// Make sure that non-element related map checks that are not preceded by
@@ -89,7 +88,7 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(testExactMapHoisting);
testExactMapHoisting(new Array(5));
testExactMapHoisting(new Array(5));
- assertTrue(2 != %GetOptimizationStatus(testExactMapHoisting));
+ assertOptimized(testExactMapHoisting);
%ClearFunctionTypeFeedback(testExactMapHoisting);
// Make sure that non-element related map checks do NOT get hoisted if they
@@ -121,7 +120,7 @@ if (support_smi_only_arrays) {
testExactMapHoisting2(new Array(5));
testExactMapHoisting2(new Array(5));
// Temporarily disabled - see bug 2176.
- // assertTrue(2 != %GetOptimizationStatus(testExactMapHoisting2));
+ // assertOptimized(testExactMapHoisting2);
%ClearFunctionTypeFeedback(testExactMapHoisting2);
// Make sure that non-element related map checks do get hoisted if they use
@@ -150,7 +149,7 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(testExactMapHoisting3);
testExactMapHoisting3(new Array(5));
testExactMapHoisting3(new Array(5));
- assertTrue(2 != %GetOptimizationStatus(testExactMapHoisting3));
+ assertOptimized(testExactMapHoisting3);
%ClearFunctionTypeFeedback(testExactMapHoisting3);
function testDominatingTransitionHoisting1(a) {
@@ -177,7 +176,7 @@ if (support_smi_only_arrays) {
// TODO(verwaest) With current changes the elements transition gets hoisted
// above the access, causing a deopt. We should update the type of access
// rather than forbid hoisting the transition.
- assertTrue(2 != %GetOptimizationStatus(testDominatingTransitionHoisting1));
+ assertOptimized(testDominatingTransitionHoisting1);
%ClearFunctionTypeFeedback(testDominatingTransitionHoisting1);
*/
@@ -198,7 +197,7 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(testHoistingWithSideEffect);
testHoistingWithSideEffect(new Array(5));
testHoistingWithSideEffect(new Array(5));
- assertTrue(2 != %GetOptimizationStatus(testHoistingWithSideEffect));
+ assertOptimized(testHoistingWithSideEffect);
%ClearFunctionTypeFeedback(testHoistingWithSideEffect);
function testStraightLineDupeElinination(a,b,c,d,e,f) {
@@ -237,6 +236,6 @@ if (support_smi_only_arrays) {
%OptimizeFunctionOnNextCall(testStraightLineDupeElinination);
testStraightLineDupeElinination(new Array(5),0,0,0,0,0);
testStraightLineDupeElinination(new Array(5),0,0,0,0,0);
- assertTrue(2 != %GetOptimizationStatus(testStraightLineDupeElinination));
+ assertOptimized(testStraightLineDupeElinination);
%ClearFunctionTypeFeedback(testStraightLineDupeElinination);
}
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-9.js b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
index 4d277af695..88bbc7eaaa 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-9.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
@@ -29,8 +29,8 @@
var do_set = false;
+%NeverOptimizeFunction(set_proto_elements);
function set_proto_elements() {
- try {} catch (e) {} // Don't optimize or inline
if (do_set) Array.prototype[1] = 1.5;
}
diff --git a/deps/v8/test/mjsunit/external-array-no-sse2.js b/deps/v8/test/mjsunit/external-array-no-sse2.js
index cffcab8610..11e61ba186 100644
--- a/deps/v8/test/mjsunit/external-array-no-sse2.js
+++ b/deps/v8/test/mjsunit/external-array-no-sse2.js
@@ -606,8 +606,10 @@ a61.set(a62)
assertArrayPrefix([1, 12], a61)
// Invalid source
-assertThrows(function() { a.set(0) })
-assertThrows(function() { a.set({}) })
+assertThrows(function() { a.set(0); }, TypeError);
+assertArrayPrefix([1,2,3,4,5,6], a);
+a.set({}); // does not throw
+assertArrayPrefix([1,2,3,4,5,6], a);
// Test arraybuffer.slice
diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js
index deb3c8659d..3fcd544ab6 100644
--- a/deps/v8/test/mjsunit/external-array.js
+++ b/deps/v8/test/mjsunit/external-array.js
@@ -605,8 +605,10 @@ a61.set(a62)
assertArrayPrefix([1, 12], a61)
// Invalid source
-assertThrows(function() { a.set(0) })
-assertThrows(function() { a.set({}) })
+assertThrows(function() { a.set(0); }, TypeError);
+assertArrayPrefix([1,2,3,4,5,6], a);
+a.set({}); // does not throw
+assertArrayPrefix([1,2,3,4,5,6], a);
// Test arraybuffer.slice
diff --git a/deps/v8/test/mjsunit/function-call.js b/deps/v8/test/mjsunit/function-call.js
index 38be10c48b..88df353a60 100644
--- a/deps/v8/test/mjsunit/function-call.js
+++ b/deps/v8/test/mjsunit/function-call.js
@@ -151,8 +151,8 @@ var reducing_functions =
function checkExpectedMessage(e) {
assertTrue(e.message.indexOf("called on null or undefined") >= 0 ||
- e.message.indexOf("invoked on undefined or null value") >= 0 ||
- e.message.indexOf("Cannot convert null to object") >= 0);
+ e.message.indexOf("invoked on undefined or null value") >= 0 ||
+ e.message.indexOf("Cannot convert undefined or null to object") >= 0);
}
// Test that all natives using the ToObject call throw the right exception.
diff --git a/deps/v8/test/mjsunit/generated-transition-stub.js b/deps/v8/test/mjsunit/generated-transition-stub.js
index 072ce9ce1c..8b890c0bad 100644
--- a/deps/v8/test/mjsunit/generated-transition-stub.js
+++ b/deps/v8/test/mjsunit/generated-transition-stub.js
@@ -27,192 +27,196 @@
// Flags: --allow-natives-syntax --compiled_transitions
-try {} catch (e) {}
+%NeverOptimizeFunction(test);
+function test() {
-var iteration_count = 1;
+ var iteration_count = 1;
-function transition1(a, i, v) {
- a[i] = v;
-}
-
-//
-// Test PACKED SMI -> PACKED DOUBLE
-//
+ function transition1(a, i, v) {
+ a[i] = v;
+ }
-var a1 = [0, 1, 2, 3, 4];
-transition1(a1, 0, 2.5);
-var a2 = [0, 1, 2, 3, 4];
-transition1(a2, 0, 2.5);
-assertFalse(%HasFastHoleyElements(a2));
-%OptimizeFunctionOnNextCall(transition1);
-
-var a3 = [0, 1, 2, 3, 4];
-assertTrue(%HasFastSmiElements(a3));
-transition1(a3, 0, 2.5);
-assertFalse(%HasFastHoleyElements(a3));
-assertEquals(4, a3[4]);
-assertEquals(2.5, a3[0]);
-
-// Test handling of hole.
-var a4 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
-a4.length = 7;
-assertTrue(%HasFastSmiElements(a4));
-transition1(a4, 0, 2.5);
-assertFalse(%HasFastHoleyElements(a4));
-assertEquals(2.5, a4[0]);
-assertEquals(undefined, a4[8]);
-
-// Large array should deopt to runtimea
-for (j = 0; j < iteration_count; ++j) {
- a5 = new Array();
- for (i = 0; i < 0x40000; ++i) {
- a5[i] = 0;
+ //
+ // Test PACKED SMI -> PACKED DOUBLE
+ //
+
+ var a1 = [0, 1, 2, 3, 4];
+ transition1(a1, 0, 2.5);
+ var a2 = [0, 1, 2, 3, 4];
+ transition1(a2, 0, 2.5);
+ assertFalse(%HasFastHoleyElements(a2));
+ %OptimizeFunctionOnNextCall(transition1);
+
+ var a3 = [0, 1, 2, 3, 4];
+ assertTrue(%HasFastSmiElements(a3));
+ transition1(a3, 0, 2.5);
+ assertFalse(%HasFastHoleyElements(a3));
+ assertEquals(4, a3[4]);
+ assertEquals(2.5, a3[0]);
+
+ // Test handling of hole.
+ var a4 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ a4.length = 7;
+ assertTrue(%HasFastSmiElements(a4));
+ transition1(a4, 0, 2.5);
+ assertFalse(%HasFastHoleyElements(a4));
+ assertEquals(2.5, a4[0]);
+ assertEquals(undefined, a4[8]);
+
+ // Large array should deopt to runtimea
+ for (j = 0; j < iteration_count; ++j) {
+ a5 = new Array();
+ for (i = 0; i < 0x40000; ++i) {
+ a5[i] = 0;
+ }
+ assertTrue(%HasFastSmiElements(a5) || %HasFastDoubleElements(a5));
+ transition1(a5, 0, 2.5);
+ assertEquals(2.5, a5[0]);
}
- assertTrue(%HasFastSmiElements(a5) || %HasFastDoubleElements(a5));
- transition1(a5, 0, 2.5);
- assertEquals(2.5, a5[0]);
-}
-//
-// Test HOLEY SMI -> HOLEY DOUBLE
-//
+ //
+ // Test HOLEY SMI -> HOLEY DOUBLE
+ //
-function transition2(a, i, v) {
- a[i] = v;
-}
+ function transition2(a, i, v) {
+ a[i] = v;
+ }
-var b1 = [0, 1, 2, , 4];
-transition2(b1, 0, 2.5);
-var b2 = [0, 1, 2, , 4];
-transition2(b2, 0, 2.5);
-assertTrue(%HasFastHoleyElements(b2));
-%OptimizeFunctionOnNextCall(transition2);
-
-var b3 = [0, 1, 2, , 4];
-assertTrue(%HasFastSmiElements(b3));
-assertTrue(%HasFastHoleyElements(b3));
-transition2(b3, 0, 2.5);
-assertTrue(%HasFastHoleyElements(b3));
-assertEquals(4, b3[4]);
-assertEquals(2.5, b3[0]);
-
-// Large array should deopt to runtime
-for (j = 0; j < iteration_count; ++j) {
- b4 = [0, ,0];
- for (i = 3; i < 0x40000; ++i) {
- b4[i] = 0;
+ var b1 = [0, 1, 2, , 4];
+ transition2(b1, 0, 2.5);
+ var b2 = [0, 1, 2, , 4];
+ transition2(b2, 0, 2.5);
+ assertTrue(%HasFastHoleyElements(b2));
+ %OptimizeFunctionOnNextCall(transition2);
+
+ var b3 = [0, 1, 2, , 4];
+ assertTrue(%HasFastSmiElements(b3));
+ assertTrue(%HasFastHoleyElements(b3));
+ transition2(b3, 0, 2.5);
+ assertTrue(%HasFastHoleyElements(b3));
+ assertEquals(4, b3[4]);
+ assertEquals(2.5, b3[0]);
+
+ // Large array should deopt to runtime
+ for (j = 0; j < iteration_count; ++j) {
+ b4 = [0, ,0];
+ for (i = 3; i < 0x40000; ++i) {
+ b4[i] = 0;
+ }
+ assertTrue(%HasFastSmiElements(b4));
+ transition2(b4, 0, 2.5);
+ assertEquals(2.5, b4[0]);
}
- assertTrue(%HasFastSmiElements(b4));
- transition2(b4, 0, 2.5);
- assertEquals(2.5, b4[0]);
-}
-//
-// Test PACKED DOUBLE -> PACKED OBJECT
-//
+ //
+ // Test PACKED DOUBLE -> PACKED OBJECT
+ //
-function transition3(a, i, v) {
- a[i] = v;
-}
+ function transition3(a, i, v) {
+ a[i] = v;
+ }
-var c1 = [0, 1, 2, 3.5, 4];
-transition3(c1, 0, new Object());
-var c2 = [0, 1, 2, 3.5, 4];
-transition3(c2, 0, new Object());
-assertTrue(%HasFastObjectElements(c2));
-assertTrue(!%HasFastHoleyElements(c2));
-%OptimizeFunctionOnNextCall(transition3);
-
-var c3 = [0, 1, 2, 3.5, 4];
-assertTrue(%HasFastDoubleElements(c3));
-assertTrue(!%HasFastHoleyElements(c3));
-transition3(c3, 0, new Array());
-assertTrue(!%HasFastHoleyElements(c3));
-assertTrue(%HasFastObjectElements(c3));
-assertEquals(4, c3[4]);
-assertEquals(0, c3[0].length);
-
-// Large array under the deopt threshold should be able to trigger GC without
-// causing crashes.
-for (j = 0; j < iteration_count; ++j) {
- c4 = [0, 2.5, 0];
- for (i = 3; i < 0xa000; ++i) {
- c4[i] = 0;
+ var c1 = [0, 1, 2, 3.5, 4];
+ transition3(c1, 0, new Object());
+ var c2 = [0, 1, 2, 3.5, 4];
+ transition3(c2, 0, new Object());
+ assertTrue(%HasFastObjectElements(c2));
+ assertTrue(!%HasFastHoleyElements(c2));
+ %OptimizeFunctionOnNextCall(transition3);
+
+ var c3 = [0, 1, 2, 3.5, 4];
+ assertTrue(%HasFastDoubleElements(c3));
+ assertTrue(!%HasFastHoleyElements(c3));
+ transition3(c3, 0, new Array());
+ assertTrue(!%HasFastHoleyElements(c3));
+ assertTrue(%HasFastObjectElements(c3));
+ assertEquals(4, c3[4]);
+ assertEquals(0, c3[0].length);
+
+ // Large array under the deopt threshold should be able to trigger GC without
+ // causing crashes.
+ for (j = 0; j < iteration_count; ++j) {
+ c4 = [0, 2.5, 0];
+ for (i = 3; i < 0xa000; ++i) {
+ c4[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(c4));
+ assertTrue(!%HasFastHoleyElements(c4));
+ transition3(c4, 0, new Array(5));
+ assertTrue(!%HasFastHoleyElements(c4));
+ assertTrue(%HasFastObjectElements(c4));
+ assertEquals(5, c4[0].length);
}
- assertTrue(%HasFastDoubleElements(c4));
- assertTrue(!%HasFastHoleyElements(c4));
- transition3(c4, 0, new Array(5));
- assertTrue(!%HasFastHoleyElements(c4));
- assertTrue(%HasFastObjectElements(c4));
- assertEquals(5, c4[0].length);
-}
-// Large array should deopt to runtime
-for (j = 0; j < iteration_count; ++j) {
- c5 = [0, 2.5, 0];
- for (i = 3; i < 0x40000; ++i) {
- c5[i] = 0;
+ // Large array should deopt to runtime
+ for (j = 0; j < iteration_count; ++j) {
+ c5 = [0, 2.5, 0];
+ for (i = 3; i < 0x40000; ++i) {
+ c5[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(c5));
+ assertTrue(!%HasFastHoleyElements(c5));
+ transition3(c5, 0, new Array(5));
+ assertTrue(!%HasFastHoleyElements(c5));
+ assertTrue(%HasFastObjectElements(c5));
+ assertEquals(5, c5[0].length);
}
- assertTrue(%HasFastDoubleElements(c5));
- assertTrue(!%HasFastHoleyElements(c5));
- transition3(c5, 0, new Array(5));
- assertTrue(!%HasFastHoleyElements(c5));
- assertTrue(%HasFastObjectElements(c5));
- assertEquals(5, c5[0].length);
-}
-//
-// Test HOLEY DOUBLE -> HOLEY OBJECT
-//
+ //
+ // Test HOLEY DOUBLE -> HOLEY OBJECT
+ //
-function transition4(a, i, v) {
- a[i] = v;
-}
+ function transition4(a, i, v) {
+ a[i] = v;
+ }
-var d1 = [0, 1, , 3.5, 4];
-transition4(d1, 0, new Object());
-var d2 = [0, 1, , 3.5, 4];
-transition4(d2, 0, new Object());
-assertTrue(%HasFastObjectElements(d2));
-assertTrue(%HasFastHoleyElements(d2));
-%OptimizeFunctionOnNextCall(transition4);
-
-var d3 = [0, 1, , 3.5, 4];
-assertTrue(%HasFastDoubleElements(d3));
-assertTrue(%HasFastHoleyElements(d3));
-transition4(d3, 0, new Array());
-assertTrue(%HasFastHoleyElements(d3));
-assertTrue(%HasFastObjectElements(d3));
-assertEquals(4, d3[4]);
-assertEquals(0, d3[0].length);
-
-// Large array under the deopt threshold should be able to trigger GC without
-// causing crashes.
-for (j = 0; j < iteration_count; ++j) {
- d4 = [, 2.5, ,];
- for (i = 3; i < 0xa000; ++i) {
- d4[i] = 0;
+ var d1 = [0, 1, , 3.5, 4];
+ transition4(d1, 0, new Object());
+ var d2 = [0, 1, , 3.5, 4];
+ transition4(d2, 0, new Object());
+ assertTrue(%HasFastObjectElements(d2));
+ assertTrue(%HasFastHoleyElements(d2));
+ %OptimizeFunctionOnNextCall(transition4);
+
+ var d3 = [0, 1, , 3.5, 4];
+ assertTrue(%HasFastDoubleElements(d3));
+ assertTrue(%HasFastHoleyElements(d3));
+ transition4(d3, 0, new Array());
+ assertTrue(%HasFastHoleyElements(d3));
+ assertTrue(%HasFastObjectElements(d3));
+ assertEquals(4, d3[4]);
+ assertEquals(0, d3[0].length);
+
+ // Large array under the deopt threshold should be able to trigger GC without
+ // causing crashes.
+ for (j = 0; j < iteration_count; ++j) {
+ d4 = [, 2.5, ,];
+ for (i = 3; i < 0xa000; ++i) {
+ d4[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(d4));
+ assertTrue(%HasFastHoleyElements(d4));
+ transition4(d4, 0, new Array(5));
+ assertTrue(%HasFastHoleyElements(d4));
+ assertTrue(%HasFastObjectElements(d4));
+ assertEquals(5, d4[0].length);
+ assertEquals(undefined, d4[2]);
}
- assertTrue(%HasFastDoubleElements(d4));
- assertTrue(%HasFastHoleyElements(d4));
- transition4(d4, 0, new Array(5));
- assertTrue(%HasFastHoleyElements(d4));
- assertTrue(%HasFastObjectElements(d4));
- assertEquals(5, d4[0].length);
- assertEquals(undefined, d4[2]);
-}
-// Large array should deopt to runtime
-for (j = 0; j < iteration_count; ++j) {
- d5 = [, 2.5, ,];
- for (i = 3; i < 0x40000; ++i) {
- d5[i] = 0;
+ // Large array should deopt to runtime
+ for (j = 0; j < iteration_count; ++j) {
+ d5 = [, 2.5, ,];
+ for (i = 3; i < 0x40000; ++i) {
+ d5[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(d5));
+ assertTrue(%HasFastHoleyElements(d5));
+ transition4(d5, 0, new Array(5));
+ assertTrue(%HasFastHoleyElements(d5));
+ assertTrue(%HasFastObjectElements(d5));
+ assertEquals(5, d5[0].length);
+ assertEquals(undefined, d5[2]);
}
- assertTrue(%HasFastDoubleElements(d5));
- assertTrue(%HasFastHoleyElements(d5));
- transition4(d5, 0, new Array(5));
- assertTrue(%HasFastHoleyElements(d5));
- assertTrue(%HasFastObjectElements(d5));
- assertEquals(5, d5[0].length);
- assertEquals(undefined, d5[2]);
+
}
+test();
diff --git a/deps/v8/test/mjsunit/harmony/array-iterator.js b/deps/v8/test/mjsunit/harmony/array-iterator.js
new file mode 100644
index 0000000000..f3a2627b57
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-iterator.js
@@ -0,0 +1,195 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-iteration --allow-natives-syntax
+
+function TestArrayPrototype() {
+ assertTrue(Array.prototype.hasOwnProperty('entries'));
+ assertTrue(Array.prototype.hasOwnProperty('values'));
+ assertTrue(Array.prototype.hasOwnProperty('keys'));
+
+ assertFalse(Array.prototype.propertyIsEnumerable('entries'));
+ assertFalse(Array.prototype.propertyIsEnumerable('values'));
+ assertFalse(Array.prototype.propertyIsEnumerable('keys'));
+}
+TestArrayPrototype();
+
+function assertIteratorResult(value, done, result) {
+ assertEquals({ value: value, done: done}, result);
+}
+
+function TestValues() {
+ var array = ['a', 'b', 'c'];
+ var iterator = array.values();
+ assertIteratorResult('a', false, iterator.next());
+ assertIteratorResult('b', false, iterator.next());
+ assertIteratorResult('c', false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+
+ array.push('d');
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestValues();
+
+function TestValuesMutate() {
+ var array = ['a', 'b', 'c'];
+ var iterator = array.values();
+ assertIteratorResult('a', false, iterator.next());
+ assertIteratorResult('b', false, iterator.next());
+ assertIteratorResult('c', false, iterator.next());
+ array.push('d');
+ assertIteratorResult('d', false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestValuesMutate();
+
+function TestKeys() {
+ var array = ['a', 'b', 'c'];
+ var iterator = array.keys();
+ assertIteratorResult('0', false, iterator.next());
+ assertIteratorResult('1', false, iterator.next());
+ assertIteratorResult('2', false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+
+ array.push('d');
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestKeys();
+
+function TestKeysMutate() {
+ var array = ['a', 'b', 'c'];
+ var iterator = array.keys();
+ assertIteratorResult('0', false, iterator.next());
+ assertIteratorResult('1', false, iterator.next());
+ assertIteratorResult('2', false, iterator.next());
+ array.push('d');
+ assertIteratorResult('3', false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestKeysMutate();
+
+function TestEntries() {
+ var array = ['a', 'b', 'c'];
+ var iterator = array.entries();
+ assertIteratorResult(['0', 'a'], false, iterator.next());
+ assertIteratorResult(['1', 'b'], false, iterator.next());
+ assertIteratorResult(['2', 'c'], false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+
+ array.push('d');
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestEntries();
+
+function TestEntriesMutate() {
+ var array = ['a', 'b', 'c'];
+ var iterator = array.entries();
+ assertIteratorResult(['0', 'a'], false, iterator.next());
+ assertIteratorResult(['1', 'b'], false, iterator.next());
+ assertIteratorResult(['2', 'c'], false, iterator.next());
+ array.push('d');
+ assertIteratorResult(['3', 'd'], false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestEntriesMutate();
+
+function TestArrayIteratorPrototype() {
+ var array = [];
+ var iterator = array.values();
+
+ var ArrayIterator = iterator.constructor;
+ assertEquals(ArrayIterator.prototype, array.values().__proto__);
+ assertEquals(ArrayIterator.prototype, array.keys().__proto__);
+ assertEquals(ArrayIterator.prototype, array.entries().__proto__);
+
+ assertEquals(Object.prototype, ArrayIterator.prototype.__proto__);
+
+ assertEquals('Array Iterator', %_ClassOf(array.values()));
+ assertEquals('Array Iterator', %_ClassOf(array.keys()));
+ assertEquals('Array Iterator', %_ClassOf(array.entries()));
+
+ var prototypeDescriptor =
+ Object.getOwnPropertyDescriptor(ArrayIterator, 'prototype');
+ assertFalse(prototypeDescriptor.configurable);
+ assertFalse(prototypeDescriptor.enumerable);
+ assertFalse(prototypeDescriptor.writable);
+}
+TestArrayIteratorPrototype();
+
+function TestForArrayValues() {
+ var buffer = [];
+ var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+ var i = 0;
+ for (var value of array.values()) {
+ buffer[i++] = value;
+ }
+
+ assertEquals(8, buffer.length);
+
+ for (var i = 0; i < buffer.length - 1; i++) {
+ assertEquals(array[i], buffer[i]);
+ }
+ assertTrue(isNaN(buffer[buffer.length - 1]));
+}
+TestForArrayValues();
+
+function TestForArrayKeys() {
+ var buffer = [];
+ var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+ var i = 0;
+ for (var key of array.keys()) {
+ buffer[i++] = key;
+ }
+
+ assertEquals(8, buffer.length);
+
+ for (var i = 0; i < buffer.length; i++) {
+ assertEquals(String(i), buffer[i]);
+ }
+}
+TestForArrayKeys();
+
+function TestForArrayEntries() {
+ var buffer = [];
+ var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+ var i = 0;
+ for (var entry of array.entries()) {
+ buffer[i++] = entry;
+ }
+
+ assertEquals(8, buffer.length);
+
+ for (var i = 0; i < buffer.length - 1; i++) {
+ assertEquals(array[i], buffer[i][1]);
+ }
+ assertTrue(isNaN(buffer[buffer.length - 1][1]));
+
+ for (var i = 0; i < buffer.length; i++) {
+ assertEquals(String(i), buffer[i][0]);
+ }
+}
+TestForArrayEntries();
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
index d01e5c08ab..5888fd24f5 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --allow-natives-syntax --noparallel-recompilation
+// Flags: --harmony-scoping --allow-natives-syntax
// TODO(ES6): properly activate extended mode
"use strict";
@@ -43,7 +43,7 @@ for (var i = 0; i < functions.length; ++i) {
}
%OptimizeFunctionOnNextCall(func);
func(12);
- assertTrue(%GetOptimizationStatus(func) != 2);
+ assertOptimized(func);
}
function f1() { }
diff --git a/deps/v8/test/mjsunit/harmony/collections.js b/deps/v8/test/mjsunit/harmony/collections.js
index cf18745ae8..67f91a8ad4 100644
--- a/deps/v8/test/mjsunit/harmony/collections.js
+++ b/deps/v8/test/mjsunit/harmony/collections.js
@@ -35,6 +35,7 @@ function TestValidSetCalls(m) {
assertDoesNotThrow(function () { m.delete(new Object) });
}
TestValidSetCalls(new Set);
+TestValidSetCalls(new WeakSet);
// Test valid getter and setter calls on Maps and WeakMaps
@@ -85,6 +86,7 @@ function TestSetBehavior(set) {
}
}
TestSetBehavior(new Set);
+TestSet(new WeakSet, new Object);
// Test expected mapping behavior for Maps and WeakMaps
@@ -185,6 +187,7 @@ function TestEnumerable(func) {
TestEnumerable(Set);
TestEnumerable(Map);
TestEnumerable(WeakMap);
+TestEnumerable(WeakSet);
// Test arbitrary properties on Maps and WeakMaps
@@ -207,6 +210,7 @@ TestArbitrary(new WeakMap);
assertTrue(Set() instanceof Set);
assertTrue(Map() instanceof Map);
assertTrue(WeakMap() instanceof WeakMap);
+assertTrue(WeakSet() instanceof WeakSet);
// Test whether NaN values as keys are treated correctly.
@@ -234,6 +238,7 @@ assertTrue(s instanceof Set);
assertTrue(Set.prototype.add instanceof Function)
assertTrue(Set.prototype.has instanceof Function)
assertTrue(Set.prototype.delete instanceof Function)
+assertTrue(Set.prototype.clear instanceof Function)
// Test some common JavaScript idioms for Maps
@@ -243,6 +248,7 @@ assertTrue(Map.prototype.set instanceof Function)
assertTrue(Map.prototype.get instanceof Function)
assertTrue(Map.prototype.has instanceof Function)
assertTrue(Map.prototype.delete instanceof Function)
+assertTrue(Map.prototype.clear instanceof Function)
// Test some common JavaScript idioms for WeakMaps
@@ -252,18 +258,37 @@ assertTrue(WeakMap.prototype.set instanceof Function)
assertTrue(WeakMap.prototype.get instanceof Function)
assertTrue(WeakMap.prototype.has instanceof Function)
assertTrue(WeakMap.prototype.delete instanceof Function)
+assertTrue(WeakMap.prototype.clear instanceof Function)
-// Test class of the Set, Map and WeakMap instance and prototype.
+// Test some common JavaScript idioms for WeakSets
+var s = new WeakSet;
+assertTrue(s instanceof WeakSet);
+assertTrue(WeakSet.prototype.add instanceof Function)
+assertTrue(WeakSet.prototype.has instanceof Function)
+assertTrue(WeakSet.prototype.delete instanceof Function)
+assertTrue(WeakSet.prototype.clear instanceof Function)
+
+
+// Test class of instance and prototype.
assertEquals("Set", %_ClassOf(new Set))
assertEquals("Object", %_ClassOf(Set.prototype))
assertEquals("Map", %_ClassOf(new Map))
assertEquals("Object", %_ClassOf(Map.prototype))
assertEquals("WeakMap", %_ClassOf(new WeakMap))
assertEquals("Object", %_ClassOf(WeakMap.prototype))
+assertEquals("WeakSet", %_ClassOf(new WeakSet))
+assertEquals("Object", %_ClassOf(WeakMap.prototype))
+
+
+// Test name of constructor.
+assertEquals("Set", Set.name);
+assertEquals("Map", Map.name);
+assertEquals("WeakMap", WeakMap.name);
+assertEquals("WeakSet", WeakSet.name);
-// Test constructor property of the Set, Map and WeakMap prototype.
+// Test constructor property of the Set, Map, WeakMap and WeakSet prototype.
function TestConstructor(C) {
assertFalse(C === Object.prototype.constructor);
assertSame(C, C.prototype.constructor);
@@ -273,6 +298,21 @@ function TestConstructor(C) {
TestConstructor(Set);
TestConstructor(Map);
TestConstructor(WeakMap);
+TestConstructor(WeakSet);
+
+
+function TestDescriptor(global, C) {
+ assertEquals({
+ value: C,
+ writable: true,
+ enumerable: false,
+ configurable: true
+ }, Object.getOwnPropertyDescriptor(global, C.name));
+}
+TestDescriptor(this, Set);
+TestDescriptor(this, Map);
+TestDescriptor(this, WeakMap);
+TestDescriptor(this, WeakSet);
// Regression test for WeakMap prototype.
@@ -304,15 +344,19 @@ var alwaysBogus = [ undefined, null, true, "x", 23, {} ];
var bogusReceiversTestSet = [
{ proto: Set.prototype,
funcs: [ 'add', 'has', 'delete' ],
- receivers: alwaysBogus.concat([ new Map, new WeakMap ]),
+ receivers: alwaysBogus.concat([ new Map, new WeakMap, new WeakSet ]),
},
{ proto: Map.prototype,
funcs: [ 'get', 'set', 'has', 'delete' ],
- receivers: alwaysBogus.concat([ new Set, new WeakMap ]),
+ receivers: alwaysBogus.concat([ new Set, new WeakMap, new WeakSet ]),
},
{ proto: WeakMap.prototype,
funcs: [ 'get', 'set', 'has', 'delete' ],
- receivers: alwaysBogus.concat([ new Set, new Map ]),
+ receivers: alwaysBogus.concat([ new Set, new Map, new WeakSet ]),
+ },
+ { proto: WeakSet.prototype,
+ funcs: [ 'add', 'has', 'delete' ],
+ receivers: alwaysBogus.concat([ new Set, new Map, new WeakMap ]),
},
];
function TestBogusReceivers(testSet) {
@@ -413,3 +457,14 @@ for (var i = 9; i >= 0; i--) {
assertFalse(w.has(k));
assertEquals(undefined, w.get(k));
})();
+
+
+// Test WeakSet clear
+(function() {
+ var k = new Object();
+ var w = new WeakSet();
+ w.add(k);
+ assertTrue(w.has(k));
+ w.clear();
+ assertFalse(w.has(k));
+})(); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/harmony/dataview-accessors.js b/deps/v8/test/mjsunit/harmony/dataview-accessors.js
index 9dd8fe35e0..c57841c494 100644
--- a/deps/v8/test/mjsunit/harmony/dataview-accessors.js
+++ b/deps/v8/test/mjsunit/harmony/dataview-accessors.js
@@ -62,7 +62,10 @@ function getElementSize(func) {
function checkGet(func, index, expected, littleEndian) {
function doGet() {
- return view["get" + func](index, littleEndian);
+ if (littleEndian != undefined)
+ return view["get" + func](index, littleEndian);
+ else
+ return view["get" + func](index);
}
if (index >=0 && index + getElementSize(func) - 1 < view.byteLength)
assertSame(expected, doGet());
@@ -72,7 +75,10 @@ function checkGet(func, index, expected, littleEndian) {
function checkSet(func, index, value, littleEndian) {
function doSet() {
- view["set" + func](index, value, littleEndian);
+ if (littleEndian != undefined)
+ view["set" + func](index, value, littleEndian);
+ else
+ view["set" + func](index, value);
}
if (index >= 0 &&
index + getElementSize(func) - 1 < view.byteLength) {
@@ -105,39 +111,46 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
createDataView(array, 0, true, start, length);
test(isTestingGet, "Int8", 0, 0);
+ test(isTestingGet, "Int8", undefined, 0);
test(isTestingGet, "Int8", 8, -128);
test(isTestingGet, "Int8", 15, -1);
test(isTestingGet, "Uint8", 0, 0);
+ test(isTestingGet, "Uint8", undefined, 0);
test(isTestingGet, "Uint8", 8, 128);
test(isTestingGet, "Uint8", 15, 255);
// Little endian.
test(isTestingGet, "Int16", 0, 256, true);
+ test(isTestingGet, "Int16", undefined, 256, true);
test(isTestingGet, "Int16", 5, 26213, true);
test(isTestingGet, "Int16", 9, -32127, true);
test(isTestingGet, "Int16", 14, -2, true);
// Big endian.
test(isTestingGet, "Int16", 0, 1);
+ test(isTestingGet, "Int16", undefined, 1);
test(isTestingGet, "Int16", 5, 25958);
test(isTestingGet, "Int16", 9, -32382);
test(isTestingGet, "Int16", 14, -257);
// Little endian.
test(isTestingGet, "Uint16", 0, 256, true);
+ test(isTestingGet, "Uint16", undefined, 256, true);
test(isTestingGet, "Uint16", 5, 26213, true);
test(isTestingGet, "Uint16", 9, 33409, true);
test(isTestingGet, "Uint16", 14, 65534, true);
// Big endian.
test(isTestingGet, "Uint16", 0, 1);
+ test(isTestingGet, "Uint16", undefined, 1);
test(isTestingGet, "Uint16", 5, 25958);
test(isTestingGet, "Uint16", 9, 33154);
test(isTestingGet, "Uint16", 14, 65279);
// Little endian.
test(isTestingGet, "Int32", 0, 50462976, true);
+ test(isTestingGet, "Int32", undefined, 50462976, true);
test(isTestingGet, "Int32", 3, 1717920771, true);
test(isTestingGet, "Int32", 6, -2122291354, true);
test(isTestingGet, "Int32", 9, -58490239, true);
@@ -145,6 +158,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
// Big endian.
test(isTestingGet, "Int32", 0, 66051);
+ test(isTestingGet, "Int32", undefined, 66051);
test(isTestingGet, "Int32", 3, 56911206);
test(isTestingGet, "Int32", 6, 1718059137);
test(isTestingGet, "Int32", 9, -2122152964);
@@ -152,6 +166,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
// Little endian.
test(isTestingGet, "Uint32", 0, 50462976, true);
+ test(isTestingGet, "Uint32", undefined, 50462976, true);
test(isTestingGet, "Uint32", 3, 1717920771, true);
test(isTestingGet, "Uint32", 6, 2172675942, true);
test(isTestingGet, "Uint32", 9, 4236477057, true);
@@ -159,6 +174,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
// Big endian.
test(isTestingGet, "Uint32", 0, 66051);
+ test(isTestingGet, "Uint32", undefined, 66051);
test(isTestingGet, "Uint32", 3, 56911206);
test(isTestingGet, "Uint32", 6, 1718059137);
test(isTestingGet, "Uint32", 9, 2172814332);
@@ -169,6 +185,7 @@ function testFloat(isTestingGet, func, array, start, expected) {
// Little endian.
createDataView(array, 0, true, start);
test(isTestingGet, func, 0, expected, true);
+ test(isTestingGet, func, undefined, expected, true);
createDataView(array, 3, true, start);
test(isTestingGet, func, 3, expected, true);
createDataView(array, 7, true, start);
@@ -179,6 +196,7 @@ function testFloat(isTestingGet, func, array, start, expected) {
// Big endian.
createDataView(array, 0, false);
test(isTestingGet, func, 0, expected, false);
+ test(isTestingGet, func, undefined, expected, false);
createDataView(array, 3, false);
test(isTestingGet, func, 3, expected, false);
createDataView(array, 7, false);
@@ -276,18 +294,101 @@ function TestSetters() {
runFloatTestCases(false, 7);
runNegativeIndexTests(false);
-
}
TestGetters();
TestSetters();
+function CheckOutOfRangeInt8(value, expected) {
+ var view = new DataView(new ArrayBuffer(100));
+ assertSame(undefined, view.setInt8(0, value));
+ assertSame(expected, view.getInt8(0));
+ assertSame(undefined, view.setInt8(0, value, true));
+ assertSame(expected, view.getInt8(0, true));
+}
+
+function CheckOutOfRangeUint8(value, expected) {
+ var view = new DataView(new ArrayBuffer(100));
+ assertSame(undefined, view.setUint8(0, value));
+ assertSame(expected, view.getUint8(0));
+ assertSame(undefined, view.setUint8(0, value, true));
+ assertSame(expected, view.getUint8(0, true));
+}
+
+function CheckOutOfRangeInt16(value, expected) {
+ var view = new DataView(new ArrayBuffer(100));
+ assertSame(undefined, view.setInt16(0, value));
+ assertSame(expected, view.getInt16(0));
+ assertSame(undefined, view.setInt16(0, value, true));
+ assertSame(expected, view.getInt16(0, true));
+}
+
+function CheckOutOfRangeUint16(value, expected) {
+ var view = new DataView(new ArrayBuffer(100));
+ assertSame(undefined, view.setUint16(0, value));
+ assertSame(expected, view.getUint16(0));
+ assertSame(undefined, view.setUint16(0, value, true));
+ assertSame(expected, view.getUint16(0, true));
+}
+
+function CheckOutOfRangeInt32(value, expected) {
+ var view = new DataView(new ArrayBuffer(100));
+ assertSame(undefined, view.setInt32(0, value));
+ assertSame(expected, view.getInt32(0));
+ assertSame(undefined, view.setInt32(0, value, true));
+ assertSame(expected, view.getInt32(0, true));
+}
+
+function CheckOutOfRangeUint32(value, expected) {
+ var view = new DataView(new ArrayBuffer(100));
+ assertSame(undefined, view.setUint32(0, value));
+ assertSame(expected, view.getUint32(0));
+ assertSame(undefined, view.setUint32(0, value, true));
+ assertSame(expected, view.getUint32(0, true));
+}
+
+function TestOutOfRange() {
+ CheckOutOfRangeInt8(0x80, -0x80);
+ CheckOutOfRangeInt8(0x1000, 0);
+ CheckOutOfRangeInt8(-0x81, 0x7F);
+
+ CheckOutOfRangeUint8(0x100, 0);
+ CheckOutOfRangeUint8(0x1000, 0);
+ CheckOutOfRangeUint8(-0x80, 0x80);
+ CheckOutOfRangeUint8(-1, 0xFF);
+ CheckOutOfRangeUint8(-0xFF, 1);
+
+ CheckOutOfRangeInt16(0x8000, -0x8000);
+ CheckOutOfRangeInt16(0x10000, 0);
+ CheckOutOfRangeInt16(-0x8001, 0x7FFF);
+
+ CheckOutOfRangeUint16(0x10000, 0);
+ CheckOutOfRangeUint16(0x100000, 0);
+ CheckOutOfRangeUint16(-0x8000, 0x8000);
+ CheckOutOfRangeUint16(-1, 0xFFFF);
+ CheckOutOfRangeUint16(-0xFFFF, 1);
+
+ CheckOutOfRangeInt32(0x80000000, -0x80000000);
+ CheckOutOfRangeInt32(0x100000000, 0);
+ CheckOutOfRangeInt32(-0x80000001, 0x7FFFFFFF);
+
+ CheckOutOfRangeUint32(0x100000000, 0);
+ CheckOutOfRangeUint32(0x1000000000, 0);
+ CheckOutOfRangeUint32(-0x80000000, 0x80000000);
+ CheckOutOfRangeUint32(-1, 0xFFFFFFFF);
+ CheckOutOfRangeUint32(-0xFFFFFFFF, 1);
+}
+
+TestOutOfRange();
+
function TestGeneralAccessors() {
var a = new DataView(new ArrayBuffer(256));
function CheckAccessor(name) {
var f = a[name];
+ assertThrows(function() { f(); }, TypeError);
f.call(a, 0, 0); // should not throw
assertThrows(function() { f.call({}, 0, 0); }, TypeError);
+ assertThrows(function() { f.call(a); }, TypeError);
}
CheckAccessor("getUint8");
CheckAccessor("setUint8");
diff --git a/deps/v8/test/mjsunit/harmony/numeric-literals-off.js b/deps/v8/test/mjsunit/harmony/numeric-literals-off.js
new file mode 100644
index 0000000000..37204ed9d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/numeric-literals-off.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is to ensure that we do not support 0b and 0o in Number when
+// the --harmony-numeric-literals flag is not set.
+
+
+function TestOctalLiteralUsingNumberFunction() {
+ assertEquals(NaN, Number('0o0'));
+}
+TestOctalLiteralUsingNumberFunction();
+
+
+function TestBinaryLiteralUsingNumberFunction() {
+ assertEquals(NaN, Number('0b0'));
+}
+TestBinaryLiteralUsingNumberFunction();
diff --git a/deps/v8/test/mjsunit/harmony/numeric-literals.js b/deps/v8/test/mjsunit/harmony/numeric-literals.js
new file mode 100644
index 0000000000..7300f3e47e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/numeric-literals.js
@@ -0,0 +1,87 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-numeric-literals
+
+function TestOctalLiteral() {
+ assertEquals(0, 0o0);
+ assertEquals(0, 0O0);
+ assertEquals(1, 0o1);
+ assertEquals(7, 0o7);
+ assertEquals(8, 0o10);
+ assertEquals(63, 0o77);
+}
+TestOctalLiteral();
+
+
+function TestOctalLiteralUsingNumberFunction() {
+ assertEquals(0, Number('0o0'));
+ assertEquals(0, Number('0O0'));
+ assertEquals(1, Number('0o1'));
+ assertEquals(7, Number('0o7'));
+ assertEquals(8, Number('0o10'));
+ assertEquals(63, Number('0o77'));
+}
+TestOctalLiteralUsingNumberFunction();
+
+
+function TestBinaryLiteral() {
+ assertEquals(0, 0b0);
+ assertEquals(0, 0B0);
+ assertEquals(1, 0b1);
+ assertEquals(2, 0b10);
+ assertEquals(3, 0b11);
+}
+TestBinaryLiteral();
+
+
+function TestBinaryLiteralUsingNumberFunction() {
+ assertEquals(0, Number('0b0'));
+ assertEquals(0, Number('0B0'));
+ assertEquals(1, Number('0b1'));
+ assertEquals(2, Number('0b10'));
+ assertEquals(3, Number('0b11'));
+}
+TestBinaryLiteralUsingNumberFunction();
+
+
+// parseInt should (probably) not support 0b and 0o.
+// https://bugs.ecmascript.org/show_bug.cgi?id=1585
+function TestParseIntDoesNotSupportOctalNorBinary() {
+ assertEquals(0, parseInt('0o77'));
+ assertEquals(0, parseInt('0o77', 8));
+ assertEquals(0, parseInt('0b11'));
+ assertEquals(0, parseInt('0b11', 2));
+}
+TestParseIntDoesNotSupportOctalNorBinary();
+
+
+function TestParseFloatDoesNotSupportOctalNorBinary() {
+ assertEquals(0, parseFloat('0o77'));
+ assertEquals(0, parseFloat('0b11'));
+}
+TestParseFloatDoesNotSupportOctalNorBinary();
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/harmony/object-observe.js
index 0434ccdcb6..103dda6567 100644
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ b/deps/v8/test/mjsunit/harmony/object-observe.js
@@ -637,7 +637,8 @@ Object.observe(obj1, recursiveObserver2);
Object.observe(obj2, recursiveObserver2);
++obj1.a;
Object.deliverChangeRecords(recursiveObserver2);
-assertEquals(199, recordCount);
+// TODO(verwaest): Disabled because of bug 2774.
+// assertEquals(199, recordCount);
// Observing named properties.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-for.js b/deps/v8/test/mjsunit/harmony/proxies-for.js
index 3d419c6dca..d0f2a022fd 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-for.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-for.js
@@ -62,10 +62,10 @@ TestForIn(["b", "d"], {
getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
getPropertyDescriptor: function(k) {
switch (k) {
- case "a": return {enumerable: false, value: "3"};
- case "b": return {enumerable: true, get get() {}};
- case "c": return {value: 4};
- case "d": return {get enumerable() { return true }};
+ case "a": return {enumerable: false, value: "3", configurable: true};
+ case "b": return {enumerable: true, get get() {}, configurable: true};
+ case "c": return {value: 4, configurable: true};
+ case "d": return {get enumerable() { return true }, configurable: true};
default: return undefined;
}
}
@@ -103,7 +103,7 @@ function TestForInDerived2(create, properties, handler) {
TestForInDerived(["0", "a"], {
enumerate: function() { return [0, "a"] },
getPropertyDescriptor: function(k) {
- return k == "0" || k == "a" ? {} : undefined
+ return k == "0" || k == "a" ? {configurable: true} : undefined
}
})
@@ -111,7 +111,7 @@ TestForInDerived(["null", "a"], {
enumerate: function() { return this.enumerate2() },
enumerate2: function() { return [null, "a"] },
getPropertyDescriptor: function(k) {
- return k == "null" || k == "a" ? {} : undefined
+ return k == "null" || k == "a" ? {configurable: true} : undefined
}
})
@@ -119,10 +119,10 @@ TestForInDerived(["b", "d"], {
getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
getPropertyDescriptor: function(k) {
switch (k) {
- case "a": return {enumerable: false, value: "3"};
- case "b": return {enumerable: true, get get() {}};
- case "c": return {value: 4};
- case "d": return {get enumerable() { return true }};
+ case "a": return {enumerable: false, value: "3", configurable: true};
+ case "b": return {enumerable: true, get get() {}, configurable: true};
+ case "c": return {value: 4, configurable: true};
+ case "d": return {get enumerable() { return true }, configurable: true};
default: return undefined;
}
}
diff --git a/deps/v8/test/mjsunit/harmony/proxies-with.js b/deps/v8/test/mjsunit/harmony/proxies-with.js
new file mode 100644
index 0000000000..94de25e3ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-with.js
@@ -0,0 +1,446 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies
+
+
+// Helper.
+
+function TestWithProxies(test, x, y, z) {
+ test(Proxy.create, x, y, z)
+ test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+}
+
+
+
+// Getting.
+
+function TestWithGet(handler) {
+ TestWithProxies(TestWithGet2, handler)
+}
+
+var c = "global"
+var key = ""
+
+function TestWithGet2(create, handler) {
+ var b = "local"
+
+ var p = create(handler)
+ with (p) {
+ assertEquals("onproxy", a)
+ assertEquals("local", b)
+ assertEquals("global", c)
+ }
+
+ var o = Object.create(p, {d: {value: "own"}})
+ with (o) {
+ assertEquals("onproxy", a)
+ assertEquals("local", b)
+ assertEquals("global", c)
+ assertEquals("own", d)
+ }
+}
+
+TestWithGet({
+ get: function(r, k) { key = k; return k === "a" ? "onproxy" : undefined },
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: "onproxy", configurable: true} : undefined
+ }
+})
+
+TestWithGet({
+ get: function(r, k) { return this.get2(r, k) },
+ get2: function(r, k) { key = k; return k === "a" ? "onproxy" : undefined },
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: "onproxy", configurable: true} : undefined
+ }
+})
+
+TestWithGet({
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: "onproxy", configurable: true} : undefined
+ }
+})
+
+TestWithGet({
+ getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+ getPropertyDescriptor2: function(k) {
+ key = k;
+ return k === "a" ? {value: "onproxy", configurable: true} : undefined
+ }
+})
+
+TestWithGet({
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ?
+ {get value() { return "onproxy" }, configurable: true} : undefined
+ }
+})
+
+TestWithGet({
+ get: undefined,
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: "onproxy", configurable: true} : undefined
+ }
+})
+
+
+
+// Invoking.
+
+function TestWithGetCall(handler) {
+ TestWithProxies(TestWithGetCall2, handler)
+}
+
+var receiver = null
+var c = function() { return "global" }
+
+function TestWithGetCall2(create, handler) {
+ var b = function() { return "local" }
+
+ var p = create(handler)
+ with (p) {
+ receiver = null
+ assertEquals("onproxy", a())
+ assertSame(p, receiver)
+ assertEquals("local", b())
+ assertEquals("global", c())
+ }
+
+ var o = Object.create(p, {d: {value: function() { return "own" }}})
+ with (o) {
+ receiver = null
+ assertEquals("onproxy", a())
+ assertSame(o, receiver)
+ assertEquals("local", b())
+ assertEquals("global", c())
+ assertEquals("own", d())
+ }
+}
+
+function onproxy() { receiver = this; return "onproxy" }
+
+TestWithGetCall({
+ get: function(r, k) { key = k; return k === "a" ? onproxy : undefined },
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxy, configurable: true} : undefined
+ }
+})
+
+TestWithGetCall({
+ get: function(r, k) { return this.get2(r, k) },
+ get2: function(r, k) { key = k; return k === "a" ? onproxy : undefined },
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxy, configurable: true} : undefined
+ }
+})
+
+TestWithGetCall({
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxy, configurable: true} : undefined
+ }
+})
+
+TestWithGetCall({
+ getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+ getPropertyDescriptor2: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxy, configurable: true} : undefined
+ }
+})
+
+TestWithGetCall({
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ?
+ {get value() { return onproxy }, configurable: true} : undefined
+ }
+})
+
+TestWithGetCall({
+ get: undefined,
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxy, configurable: true} : undefined
+ }
+})
+
+
+function TestWithGetCallThrow(handler) {
+ TestWithProxies(TestWithGetCallThrow2, handler)
+}
+
+function TestWithGetCallThrow2(create, handler) {
+ var b = function() { return "local" }
+
+ var p = create(handler)
+ with (p) {
+ assertThrows(function(){ a() }, "myexn")
+ assertEquals("local", b())
+ assertEquals("global", c())
+ }
+
+ var o = Object.create(p, {d: {value: function() { return "own" }}})
+ with (o) {
+ assertThrows(function(){ a() }, "myexn")
+ assertEquals("local", b())
+ assertEquals("global", c())
+ assertEquals("own", d())
+ }
+}
+
+function onproxythrow() { throw "myexn" }
+
+TestWithGetCallThrow({
+ get: function(r, k) { key = k; return k === "a" ? onproxythrow : undefined },
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxythrow, configurable: true} : undefined
+ }
+})
+
+TestWithGetCallThrow({
+ get: function(r, k) { return this.get2(r, k) },
+ get2: function(r, k) { key = k; return k === "a" ? onproxythrow : undefined },
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxythrow, configurable: true} : undefined
+ }
+})
+
+TestWithGetCallThrow({
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxythrow, configurable: true} : undefined
+ }
+})
+
+TestWithGetCallThrow({
+ getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+ getPropertyDescriptor2: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxythrow, configurable: true} : undefined
+ }
+})
+
+TestWithGetCallThrow({
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ?
+ {get value() { return onproxythrow }, configurable: true} : undefined
+ }
+})
+
+TestWithGetCallThrow({
+ get: undefined,
+ getPropertyDescriptor: function(k) {
+ key = k;
+ return k === "a" ? {value: onproxythrow, configurable: true} : undefined
+ }
+})
+
+
+
+// Setting.
+
+var key
+var val
+
+function TestWithSet(handler, hasSetter) {
+ TestWithProxies(TestWithSet2, handler, hasSetter)
+}
+
+var c = "global"
+
+function TestWithSet2(create, handler, hasSetter) {
+ var b = "local"
+
+ var p = create(handler)
+ key = val = undefined
+ with (p) {
+ a = "set"
+ assertEquals("a", key)
+ assertEquals("set", val)
+ assertEquals("local", b)
+ assertEquals("global", c)
+ b = "local"
+ c = "global"
+ assertEquals("a", key)
+ assertEquals("set", val)
+ }
+
+ if (!hasSetter) return
+
+ var o = Object.create(p, {d: {value: "own"}})
+ key = val = undefined
+ with (o) {
+ a = "set"
+ assertEquals("a", key)
+ assertEquals("set", val)
+ assertEquals("local", b)
+ assertEquals("global", c)
+ assertEquals("own", d)
+ b = "local"
+ c = "global"
+ d = "own"
+ assertEquals("a", key)
+ assertEquals("set", val)
+ }
+}
+
+TestWithSet({
+ set: function(r, k, v) { key = k; val = v; return true },
+ getPropertyDescriptor: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ }
+})
+
+TestWithSet({
+ set: function(r, k, v) { return this.set2(r, k, v) },
+ set2: function(r, k, v) { key = k; val = v; return true },
+ getPropertyDescriptor: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ }
+})
+
+TestWithSet({
+ getPropertyDescriptor: function(k) {
+ return this.getOwnPropertyDescriptor(k)
+ },
+ getOwnPropertyDescriptor: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ },
+ defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestWithSet({
+ getOwnPropertyDescriptor: function(k) {
+ return this.getPropertyDescriptor2(k)
+ },
+ getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+ getPropertyDescriptor2: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ },
+ defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
+ defineProperty2: function(k, desc) { key = k; val = desc.value }
+})
+
+TestWithSet({
+ getOwnPropertyDescriptor: function(k) {
+ return this.getPropertyDescriptor(k)
+ },
+ getPropertyDescriptor: function(k) {
+ return k === "a" ?
+ {get writable() { return true }, configurable: true} : undefined
+ },
+ defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+TestWithSet({
+ getOwnPropertyDescriptor: function(k) {
+ return this.getPropertyDescriptor(k)
+ },
+ getPropertyDescriptor: function(k) {
+ return k === "a" ?
+ {set: function(v) { key = k; val = v }, configurable: true} : undefined
+ }
+}, true)
+
+TestWithSet({
+ getOwnPropertyDescriptor: function(k) {
+ return this.getPropertyDescriptor(k)
+ },
+ getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+ getPropertyDescriptor2: function(k) {
+ return k === "a" ?
+ {set: function(v) { key = k; val = v }, configurable: true} : undefined
+ }
+}, true)
+
+TestWithSet({
+ getOwnPropertyDescriptor: function(k) { return null },
+ getPropertyDescriptor: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ },
+ defineProperty: function(k, desc) { key = k; val = desc.value }
+})
+
+
+function TestWithSetThrow(handler, hasSetter) {
+ TestWithProxies(TestWithSetThrow2, handler, hasSetter)
+}
+
+function TestWithSetThrow2(create, handler, hasSetter) {
+ var p = create(handler)
+ assertThrows(function(){
+ with (p) {
+ a = 1
+ }
+ }, "myexn")
+
+ if (!hasSetter) return
+
+ var o = Object.create(p, {})
+ assertThrows(function(){
+ with (o) {
+ a = 1
+ }
+ }, "myexn")
+}
+
+TestWithSetThrow({
+ set: function(r, k, v) { throw "myexn" },
+ getPropertyDescriptor: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ }
+})
+
+TestWithSetThrow({
+ getPropertyDescriptor: function(k) { throw "myexn" },
+})
+
+TestWithSetThrow({
+ getPropertyDescriptor: function(k) {
+ return k === "a" ? {writable: true, configurable: true} : undefined
+ },
+ defineProperty: function(k, desc) { throw "myexn" }
+})
+
+TestWithSetThrow({
+ getPropertyDescriptor: function(k) {
+ return k === "a" ?
+ {set: function() { throw "myexn" }, configurable: true} : undefined
+ }
+}, true)
diff --git a/deps/v8/test/mjsunit/harmony/typedarrays.js b/deps/v8/test/mjsunit/harmony/typedarrays.js
index e1b0e653d6..c6d130fc0c 100644
--- a/deps/v8/test/mjsunit/harmony/typedarrays.js
+++ b/deps/v8/test/mjsunit/harmony/typedarrays.js
@@ -117,31 +117,33 @@ TestArrayBufferSlice();
// Typed arrays
-function TestTypedArray(proto, elementSize, typicalElement) {
+function TestTypedArray(constr, elementSize, typicalElement) {
+ assertSame(elementSize, constr.BYTES_PER_ELEMENT);
+
var ab = new ArrayBuffer(256*elementSize);
- var a0 = new proto(30);
+ var a0 = new constr(30);
assertSame(elementSize, a0.BYTES_PER_ELEMENT);
assertSame(30, a0.length);
assertSame(30*elementSize, a0.byteLength);
assertSame(0, a0.byteOffset);
assertSame(30*elementSize, a0.buffer.byteLength);
- var aLen0 = new proto(0);
+ var aLen0 = new constr(0);
assertSame(elementSize, aLen0.BYTES_PER_ELEMENT);
assertSame(0, aLen0.length);
assertSame(0, aLen0.byteLength);
assertSame(0, aLen0.byteOffset);
assertSame(0, aLen0.buffer.byteLength);
- var aOverBufferLen0 = new proto(ab, 128*elementSize, 0);
+ var aOverBufferLen0 = new constr(ab, 128*elementSize, 0);
assertSame(ab, aOverBufferLen0.buffer);
assertSame(elementSize, aOverBufferLen0.BYTES_PER_ELEMENT);
assertSame(0, aOverBufferLen0.length);
assertSame(0, aOverBufferLen0.byteLength);
assertSame(128*elementSize, aOverBufferLen0.byteOffset);
- var a1 = new proto(ab, 128*elementSize, 128);
+ var a1 = new constr(ab, 128*elementSize, 128);
assertSame(ab, a1.buffer);
assertSame(elementSize, a1.BYTES_PER_ELEMENT);
assertSame(128, a1.length);
@@ -149,20 +151,20 @@ function TestTypedArray(proto, elementSize, typicalElement) {
assertSame(128*elementSize, a1.byteOffset);
- var a2 = new proto(ab, 64*elementSize, 128);
+ var a2 = new constr(ab, 64*elementSize, 128);
assertSame(ab, a2.buffer);
assertSame(elementSize, a2.BYTES_PER_ELEMENT);
assertSame(128, a2.length);
assertSame(128*elementSize, a2.byteLength);
assertSame(64*elementSize, a2.byteOffset);
- var a3 = new proto(ab, 192*elementSize);
+ var a3 = new constr(ab, 192*elementSize);
assertSame(ab, a3.buffer);
assertSame(64, a3.length);
assertSame(64*elementSize, a3.byteLength);
assertSame(192*elementSize, a3.byteOffset);
- var a4 = new proto(ab);
+ var a4 = new constr(ab);
assertSame(ab, a4.buffer);
assertSame(256, a4.length);
assertSame(256*elementSize, a4.byteLength);
@@ -198,31 +200,30 @@ function TestTypedArray(proto, elementSize, typicalElement) {
assertSame(typicalElement, a4[i]);
}
- var aAtTheEnd = new proto(ab, 256*elementSize);
+ var aAtTheEnd = new constr(ab, 256*elementSize);
assertSame(elementSize, aAtTheEnd.BYTES_PER_ELEMENT);
assertSame(0, aAtTheEnd.length);
assertSame(0, aAtTheEnd.byteLength);
assertSame(256*elementSize, aAtTheEnd.byteOffset);
- assertThrows(function () { new proto(ab, 257*elementSize); }, RangeError);
+ assertThrows(function () { new constr(ab, 257*elementSize); }, RangeError);
assertThrows(
- function () { new proto(ab, 128*elementSize, 192); },
+ function () { new constr(ab, 128*elementSize, 192); },
RangeError);
if (elementSize !== 1) {
- assertThrows(function() { new proto(ab, 128*elementSize - 1, 10); },
+ assertThrows(function() { new constr(ab, 128*elementSize - 1, 10); },
RangeError);
var unalignedArrayBuffer = new ArrayBuffer(10*elementSize + 1);
- var goodArray = new proto(unalignedArrayBuffer, 0, 10);
+ var goodArray = new constr(unalignedArrayBuffer, 0, 10);
assertSame(10, goodArray.length);
assertSame(10*elementSize, goodArray.byteLength);
- assertThrows(function() { new proto(unalignedArrayBuffer)}, RangeError);
- assertThrows(function() { new proto(unalignedArrayBuffer, 5*elementSize)},
+ assertThrows(function() { new constr(unalignedArrayBuffer)}, RangeError);
+ assertThrows(function() { new constr(unalignedArrayBuffer, 5*elementSize)},
RangeError);
- assertThrows(function() { new proto() }, TypeError);
}
- var aFromString = new proto("30");
+ var aFromString = new constr("30");
assertSame(elementSize, aFromString.BYTES_PER_ELEMENT);
assertSame(30, aFromString.length);
assertSame(30*elementSize, aFromString.byteLength);
@@ -233,7 +234,7 @@ function TestTypedArray(proto, elementSize, typicalElement) {
for (i = 0; i < 30; i++) {
jsArray.push(typicalElement);
}
- var aFromArray = new proto(jsArray);
+ var aFromArray = new constr(jsArray);
assertSame(elementSize, aFromArray.BYTES_PER_ELEMENT);
assertSame(30, aFromArray.length);
assertSame(30*elementSize, aFromArray.byteLength);
@@ -244,12 +245,18 @@ function TestTypedArray(proto, elementSize, typicalElement) {
}
var abLen0 = new ArrayBuffer(0);
- var aOverAbLen0 = new proto(abLen0);
+ var aOverAbLen0 = new constr(abLen0);
assertSame(abLen0, aOverAbLen0.buffer);
assertSame(elementSize, aOverAbLen0.BYTES_PER_ELEMENT);
assertSame(0, aOverAbLen0.length);
assertSame(0, aOverAbLen0.byteLength);
assertSame(0, aOverAbLen0.byteOffset);
+
+ var aNoParam = new constr();
+ assertSame(elementSize, aNoParam.BYTES_PER_ELEMENT);
+ assertSame(0, aNoParam.length);
+ assertSame(0, aNoParam.byteLength);
+ assertSame(0, aNoParam.byteOffset);
}
TestTypedArray(Uint8Array, 1, 0xFF);
@@ -448,10 +455,18 @@ function TestTypedArraySet() {
// Invalid source
var a = new Uint16Array(50);
- assertThrows(function() { a.set(0) }, TypeError);
- assertThrows(function() { a.set({}) }, TypeError);
+ var expected = [];
+ for (i = 0; i < 50; i++) {
+ a[i] = i;
+ expected.push(i);
+ }
+ a.set({});
+ assertArrayPrefix(expected, a);
assertThrows(function() { a.set.call({}) }, TypeError);
assertThrows(function() { a.set.call([]) }, TypeError);
+
+ assertThrows(function() { a.set(0); }, TypeError);
+ assertThrows(function() { a.set(0, 1); }, TypeError);
}
TestTypedArraySet();
diff --git a/deps/v8/test/mjsunit/manual-parallel-recompile.js b/deps/v8/test/mjsunit/manual-parallel-recompile.js
index b502fb19ad..84bfff1a57 100644
--- a/deps/v8/test/mjsunit/manual-parallel-recompile.js
+++ b/deps/v8/test/mjsunit/manual-parallel-recompile.js
@@ -33,14 +33,6 @@ if (!%IsParallelRecompilationSupported()) {
quit();
}
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
-function assertOptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 2);
-}
-
function f(x) {
var xx = x * x;
var xxstr = xx.toString();
@@ -65,11 +57,8 @@ assertUnoptimized(g);
%OptimizeFunctionOnNextCall(g, "parallel");
f(g(2)); // Trigger optimization.
-assertUnoptimized(f); // Not yet optimized.
-assertUnoptimized(g);
-
-%CompleteOptimization(f); // Wait till optimized code is installed.
-%CompleteOptimization(g);
+assertUnoptimized(f, "no sync"); // Not yet optimized while parallel thread
+assertUnoptimized(g, "no sync"); // is running.
-assertOptimized(f); // Optimized now.
-assertOptimized(g);
+assertOptimized(f, "sync"); // Optimized once we sync with the parallel thread.
+assertOptimized(g, "sync");
diff --git a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
index 7349165854..269e96f50b 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --nouse_inlining --noparallel-recompilation
+// Flags: --allow-natives-syntax --nouse_inlining
// Test for negative zero that doesn't need bail out
@@ -38,4 +38,4 @@ test_div_no_deopt_minus_zero();
test_div_no_deopt_minus_zero();
%OptimizeFunctionOnNextCall(test_div_no_deopt_minus_zero);
test_div_no_deopt_minus_zero();
-assertTrue(2 != %GetOptimizationStatus(test_div_no_deopt_minus_zero));
+assertOptimized(test_div_no_deopt_minus_zero);
diff --git a/deps/v8/test/mjsunit/md5.js b/deps/v8/test/mjsunit/md5.js
new file mode 100644
index 0000000000..38dc802312
--- /dev/null
+++ b/deps/v8/test/mjsunit/md5.js
@@ -0,0 +1,211 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
+// Digest Algorithm, as defined in RFC 1321.
+// Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
+// Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
+// Distributed under the BSD License
+// See http://pajhome.org.uk/crypt/md5 for more info.
+//
+
+function hex_md5(s) {
+ return binl2hex(core_md5(str2binl(s), s.length * 8));
+}
+
+function core_md5(x, len) {
+ /* append padding */
+ x[len >> 5] |= 0x80 << ((len) % 32);
+ x[(((len + 64) >>> 9) << 4) + 14] = len;
+
+ var a = 1732584193;
+ var b = -271733879;
+ var c = -1732584194;
+ var d = 271733878;
+
+ for (var i = 0; i < x.length; i += 16) {
+ var olda = a;
+ var oldb = b;
+ var oldc = c;
+ var oldd = d;
+
+ a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
+ d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
+ c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
+ b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
+ a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
+ d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
+ c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
+ b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
+ a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
+ d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
+ c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
+ b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
+ a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
+ d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
+ c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
+ b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
+
+ a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
+ d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
+ c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
+ b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
+ a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
+ d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
+ c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
+ b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
+ a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
+ d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
+ c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
+ b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
+ a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
+ d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
+ c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
+ b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
+
+ a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
+ d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
+ c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
+ b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
+ a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
+ d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
+ c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
+ b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
+ a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
+ d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
+ c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
+ b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
+ a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
+ d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
+ c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
+ b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
+
+ a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
+ d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
+ c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
+ b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
+ a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
+ d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
+ c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
+ b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
+ a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
+ d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
+ c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
+ b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
+ a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
+ d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
+ c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
+ b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
+
+ a = safe_add(a, olda);
+ b = safe_add(b, oldb);
+ c = safe_add(c, oldc);
+ d = safe_add(d, oldd);
+ }
+ return Array(a, b, c, d);
+}
+
+function md5_cmn(q, a, b, x, s, t) {
+ return safe_add(bit_rol(safe_add(safe_add(a, q), safe_add(x, t)), s),b);
+}
+
+function md5_ff(a, b, c, d, x, s, t) {
+ return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
+}
+
+function md5_gg(a, b, c, d, x, s, t) {
+ return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
+}
+
+function md5_hh(a, b, c, d, x, s, t) {
+ return md5_cmn(b ^ c ^ d, a, b, x, s, t);
+}
+
+function md5_ii(a, b, c, d, x, s, t) {
+ return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
+}
+
+function safe_add(x, y) {
+ var lsw = (x & 0xFFFF) + (y & 0xFFFF);
+ var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
+ return (msw << 16) | (lsw & 0xFFFF);
+}
+
+function bit_rol(num, cnt) {
+ return (num << cnt) | (num >>> (32 - cnt));
+}
+
+function str2binl(str) {
+ var bin = Array();
+ var mask = (1 << 8) - 1;
+ for(var i = 0; i < str.length * 8; i += 8)
+ bin[i>>5] |= (str.charCodeAt(i / 8) & mask) << (i%32);
+ return bin;
+}
+
+function binl2hex(binarray) {
+ var hex_tab = "0123456789abcdef";
+ var str = "";
+ for(var i = 0; i < binarray.length * 4; i++) {
+ str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
+ hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
+ }
+ return str;
+}
+
+var plainText = "Rebellious subjects, enemies to peace,\n\
+Profaners of this neighbour-stained steel,--\n\
+Will they not hear? What, ho! you men, you beasts,\n\
+That quench the fire of your pernicious rage\n\
+With purple fountains issuing from your veins,\n\
+On pain of torture, from those bloody hands\n\
+Throw your mistemper'd weapons to the ground,\n\
+And hear the sentence of your moved prince.\n\
+Three civil brawls, bred of an airy word,\n\
+By thee, old Capulet, and Montague,\n\
+Have thrice disturb'd the quiet of our streets,\n\
+And made Verona's ancient citizens\n\
+Cast by their grave beseeming ornaments,\n\
+To wield old partisans, in hands as old,\n\
+Canker'd with peace, to part your canker'd hate:\n\
+If ever you disturb our streets again,\n\
+Your lives shall pay the forfeit of the peace.\n\
+For this time, all the rest depart away:\n\
+You Capulet; shall go along with me:\n\
+And, Montague, come you this afternoon,\n\
+To know our further pleasure in this case,\n\
+To old Free-town, our common judgment-place.\n\
+Once more, on pain of death, all men depart.\n"
+
+for (var i = 0; i < 4; ++i) {
+ plainText += plainText;
+}
+
+assertEquals(hex_md5("abc"), "900150983cd24fb0d6963f7d28e17f72");
+for (var i = 0; i < 11; ++i) {
+ assertEquals(hex_md5(plainText), "1b8719c72d5d8bfd06e096ef6c6288c5");
+}
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 25d7c00432..83449cc1e6 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -99,6 +99,14 @@ var assertInstanceof;
// Assert that this code is never executed (i.e., always fails if executed).
var assertUnreachable;
+// Assert that the function code is (not) optimized. If "no sync" is passed
+// as second argument, we do not wait for the parallel optimization thread to
+// finish when polling for optimization status.
+// Only works with --allow-natives-syntax.
+var assertOptimized;
+var assertUnoptimized;
+
+
(function () { // Scope for utility functions.
function classOf(object) {
@@ -353,5 +361,26 @@ var assertUnreachable;
throw new MjsUnitAssertionError(message);
};
+
+ var OptimizationStatus;
+ try {
+ OptimizationStatus =
+ new Function("fun", "sync", "return %GetOptimizationStatus(fun, sync);");
+ } catch (e) {
+ OptimizationStatus = function() {
+ throw new Error("natives syntax not allowed");
+ }
+ }
+
+ assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) {
+ if (sync_opt === undefined) sync_opt = "";
+ assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt);
+ }
+
+ assertOptimized = function assertOptimized(fun, sync_opt, name_opt) {
+ if (sync_opt === undefined) sync_opt = "";
+ assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt);
+ }
+
})();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 7e8d5b9584..50a4c7090f 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -228,6 +228,10 @@ debug-liveedit-double-call: SKIP
# As noted above none of them are run in the arm.debug case.
fuzz-natives-part4: SKIP
+# NaCl builds have problems with this test since Pepper_28.
+# V8 Issue 2786
+math-exp-precision: SKIP
+
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
# should be platform-independent.
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
new file mode 100644
index 0000000000..55b1f11981
--- /dev/null
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -0,0 +1,63 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function o1() {
+}
+
+if (%GetOptimizationStatus(o1) != 4) {
+ // 4 == optimization disabled.
+ o1(); o1();
+ %OptimizeFunctionOnNextCall(o1);
+ o1();
+
+ // Check that the given function was optimized.
+ assertOptimized(o1);
+
+ // Test the %NeverOptimizeFunction runtime call.
+ %NeverOptimizeFunction(u1);
+ function u1() {
+ }
+
+ function u2() {
+ u1();
+ }
+
+ u1(); u1();
+ u2(); u2();
+
+ %OptimizeFunctionOnNextCall(u1);
+ %OptimizeFunctionOnNextCall(u2);
+
+ u1(); u1();
+ u2(); u2();
+
+ // 2 => not optimized.
+ assertUnoptimized(u1);
+ assertOptimized(u2);
+} \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/opt-elements-kind.js b/deps/v8/test/mjsunit/opt-elements-kind.js
index 3df1d9ba2b..83ad702c2d 100644
--- a/deps/v8/test/mjsunit/opt-elements-kind.js
+++ b/deps/v8/test/mjsunit/opt-elements-kind.js
@@ -108,24 +108,24 @@ function assertKind(expected, obj, name_opt) {
assertEquals(expected, getKind(obj), name_opt);
}
+%NeverOptimizeFunction(construct_smis);
function construct_smis() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = [0, 0, 0];
a[0] = 0; // Send the COW array map to the steak house.
assertKind(elements_kind.fast_smi_only, a);
return a;
}
+%NeverOptimizeFunction(construct_doubles);
function construct_doubles() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = construct_smis();
a[0] = 1.5;
assertKind(elements_kind.fast_double, a);
return a;
}
+%NeverOptimizeFunction(convert_mixed);
function convert_mixed(array, value, kind) {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
array[1] = value;
assertKind(kind, array);
assertEquals(value, array[1]);
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index 9b0f506b48..6d3c8176af 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -109,18 +109,19 @@ function assertKind(expected, obj, name_opt) {
}
// long-running loop forces OSR.
+%NeverOptimizeFunction(construct_smis);
+%NeverOptimizeFunction(construct_doubles);
+%NeverOptimizeFunction(convert_mixed);
for (var i = 0; i < 1000000; i++) { }
if (support_smi_only_arrays) {
function construct_smis() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = [0, 0, 0];
a[0] = 0; // Send the COW array map to the steak house.
assertKind(elements_kind.fast_smi_only, a);
return a;
}
function construct_doubles() {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
var a = construct_smis();
a[0] = 1.5;
assertKind(elements_kind.fast_double, a);
@@ -130,7 +131,6 @@ if (support_smi_only_arrays) {
// Test transition chain SMI->DOUBLE->FAST (crankshafted function will
// transition to FAST directly).
function convert_mixed(array, value, kind) {
- try {} catch (e) {} // TODO(titzer): DisableOptimization
array[1] = value;
assertKind(kind, array);
assertEquals(value, array[1]);
diff --git a/deps/v8/test/mjsunit/parallel-initial-prototype-change.js b/deps/v8/test/mjsunit/parallel-initial-prototype-change.js
index 9f698bae63..942d9abc3c 100644
--- a/deps/v8/test/mjsunit/parallel-initial-prototype-change.js
+++ b/deps/v8/test/mjsunit/parallel-initial-prototype-change.js
@@ -26,17 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
+// Flags: --parallel-recompilation --parallel-recompilation-delay=100
if (!%IsParallelRecompilationSupported()) {
print("Parallel recompilation is disabled. Skipping this test.");
quit();
}
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
function f1(a, i) {
return a[i] + 0.5;
}
@@ -47,9 +43,12 @@ assertEquals(0.5, f1(arr, 0));
// Optimized code of f1 depends on initial object and array maps.
%OptimizeFunctionOnNextCall(f1, "parallel");
+// Trigger optimization in the background thread
assertEquals(0.5, f1(arr, 0));
-assertUnoptimized(f1); // Not yet optimized.
Object.prototype[1] = 1.5; // Invalidate current initial object map.
assertEquals(2, f1(arr, 1));
-%CompleteOptimization(f1); // Conclude optimization with...
-assertUnoptimized(f1); // ... bailing out due to map dependency.
+// Not yet optimized while background thread is running.
+assertUnoptimized(f1, "no sync");
+// Sync with background thread to conclude optimization, which bails out
+// due to map dependency.
+assertUnoptimized(f1, "sync");
diff --git a/deps/v8/test/mjsunit/parallel-invalidate-transition-map.js b/deps/v8/test/mjsunit/parallel-invalidate-transition-map.js
index 2a2276f1e2..716f63198c 100644
--- a/deps/v8/test/mjsunit/parallel-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/parallel-invalidate-transition-map.js
@@ -26,17 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --track-fields --track-double-fields --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
+// Flags: --parallel-recompilation --parallel-recompilation-delay=100
if (!%IsParallelRecompilationSupported()) {
print("Parallel recompilation is disabled. Skipping this test.");
quit();
}
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
function new_object() {
var o = {};
o.a = 1;
@@ -53,9 +49,9 @@ add_field(new_object());
%OptimizeFunctionOnNextCall(add_field, "parallel");
var o = new_object();
-add_field(o); // Trigger optimization.
-assertUnoptimized(add_field); // Not yet optimized.
-o.c = 2.2; // Invalidate transition map.
-%CompleteOptimization(add_field); // Conclude optimization with...
-assertUnoptimized(add_field); // ... bailing out due to map dependency.
-
+// Trigger optimization in the background thread.
+add_field(o);
+// Invalidate transition map while optimization is underway.
+o.c = 2.2;
+// Sync with background thread to conclude optimization that bailed out.
+assertUnoptimized(add_field, "sync");
diff --git a/deps/v8/test/mjsunit/parallel-optimize-disabled.js b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
index b56303e08f..e19dbd095b 100644
--- a/deps/v8/test/mjsunit/parallel-optimize-disabled.js
+++ b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
@@ -48,4 +48,4 @@ f();
%OptimizeFunctionOnNextCall(g, "parallel");
f(0); // g() is disabled for optimization on inlining attempt.
// Attempt to optimize g() should not run into any assertion.
-%CompleteOptimization(g);
+assertUnoptimized(g, "sync");
diff --git a/deps/v8/test/mjsunit/regress/poly_count_operation.js b/deps/v8/test/mjsunit/regress/poly_count_operation.js
new file mode 100644
index 0000000000..a8a1ed2ebc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/poly_count_operation.js
@@ -0,0 +1,155 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var o1 = {x:1};
+var o2 = {};
+var deopt_getter = false;
+var deopt_setter = false;
+
+function f_mono(o) {
+ return 5 + o.x++;
+}
+
+var to_deopt = f_mono;
+
+var v = 1;
+var g = 0;
+var s = 0;
+
+Object.defineProperty(o2, "x",
+ {get:function() {
+ g++;
+ if (deopt_getter) {
+ deopt_getter = false;
+ %DeoptimizeFunction(to_deopt);
+ }
+ return v;
+ },
+ set:function(new_v) {
+ v = new_v;
+ s++;
+ if (deopt_setter) {
+ deopt_setter = false;
+ %DeoptimizeFunction(to_deopt);
+ }
+ }});
+
+assertEquals(6, f_mono(o2));
+assertEquals(1, g);
+assertEquals(1, s);
+assertEquals(7, f_mono(o2));
+assertEquals(2, g);
+assertEquals(2, s);
+%OptimizeFunctionOnNextCall(f_mono);
+deopt_setter = true;
+assertEquals(8, f_mono(o2));
+assertEquals(3, g);
+assertEquals(3, s);
+
+function f_poly(o) {
+ return 5 + o.x++;
+}
+
+v = 1;
+to_deopt = f_poly;
+
+f_poly(o1);
+f_poly(o1);
+assertEquals(6, f_poly(o2));
+assertEquals(4, g);
+assertEquals(4, s);
+assertEquals(7, f_poly(o2));
+assertEquals(5, g);
+assertEquals(5, s);
+%OptimizeFunctionOnNextCall(f_poly);
+deopt_setter = true;
+assertEquals(8, f_poly(o2));
+assertEquals(6, g);
+assertEquals(6, s);
+
+%OptimizeFunctionOnNextCall(f_poly);
+v = undefined;
+assertEquals(NaN, f_poly(o2));
+assertEquals(7, g);
+assertEquals(7, s);
+
+function f_pre(o) {
+ return 5 + ++o.x;
+}
+
+v = 1;
+to_deopt = f_pre;
+
+f_pre(o1);
+f_pre(o1);
+assertEquals(7, f_pre(o2));
+assertEquals(8, g);
+assertEquals(8, s);
+assertEquals(8, f_pre(o2));
+assertEquals(9, g);
+assertEquals(9, s);
+%OptimizeFunctionOnNextCall(f_pre);
+deopt_setter = true;
+assertEquals(9, f_pre(o2));
+assertEquals(10, g);
+assertEquals(10, s);
+
+%OptimizeFunctionOnNextCall(f_pre);
+v = undefined;
+assertEquals(NaN, f_pre(o2));
+assertEquals(11, g);
+assertEquals(11, s);
+
+
+function f_get(o) {
+ return 5 + o.x++;
+}
+
+v = 1;
+to_deopt = f_get;
+
+f_get(o1);
+f_get(o1);
+assertEquals(6, f_get(o2));
+assertEquals(12, g);
+assertEquals(12, s);
+assertEquals(7, f_get(o2));
+assertEquals(13, g);
+assertEquals(13, s);
+%OptimizeFunctionOnNextCall(f_get);
+deopt_getter = true;
+assertEquals(8, f_get(o2));
+assertEquals(14, g);
+assertEquals(14, s);
+
+%OptimizeFunctionOnNextCall(f_get);
+v = undefined;
+assertEquals(NaN, f_get(o2));
+assertEquals(15, g);
+assertEquals(15, s);
diff --git a/deps/v8/test/mjsunit/regress/regress-1118.js b/deps/v8/test/mjsunit/regress/regress-1118.js
index 3e3920f3dc..4d27963779 100644
--- a/deps/v8/test/mjsunit/regress/regress-1118.js
+++ b/deps/v8/test/mjsunit/regress/regress-1118.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --noparallel-recompilation
+// Flags: --allow-natives-syntax
// An exception thrown in a function optimized by on-stack replacement (OSR)
// should be able to construct a receiver from all optimized stack frames.
@@ -52,7 +52,7 @@ function h() {
g();
} else {
// Run for a bit as long as h is unoptimized.
- while (%GetOptimizationStatus(h) == 2) {
+ while (%GetOptimizationStatus(h, "no sync") == 2) {
for (var j = 0; j < 100; j++) g();
}
g();
diff --git a/deps/v8/test/mjsunit/regress/regress-1713b.js b/deps/v8/test/mjsunit/regress/regress-1713b.js
new file mode 100644
index 0000000000..cc16bf5119
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1713b.js
@@ -0,0 +1,126 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-compact --expose-gc
+
+var O = { get f() { return 0; } };
+
+var CODE = [];
+
+var R = [];
+
+function Allocate4Kb(N) {
+ var arr = [];
+ do {arr.push(new Array(1024));} while (--N > 0);
+ return arr;
+}
+
+function AllocateXMb(X) {
+ return Allocate4Kb((1024 * X) / 4);
+}
+
+function Node(v, next) { this.v = v; this.next = next; }
+
+Node.prototype.execute = function (O) {
+ var n = this;
+ while (n.next !== null) n = n.next;
+ n.v(O);
+};
+
+function LongList(N, x) {
+ if (N == 0) return new Node(x, null);
+ return new Node(new Array(1024), LongList(N - 1, x));
+}
+
+var L = LongList(1024, function (O) {
+ for (var i = 0; i < 5; i++) O.f;
+});
+
+
+
+%NeverOptimizeFunction(Incremental);
+function Incremental(O, x) {
+ if (!x) {
+ return;
+ }
+ function CreateCode(i) {
+ var f = new Function("return O.f_" + i);
+ CODE.push(f);
+ f(); // compile
+ f(); // compile
+ f(); // compile
+ }
+
+ for (var i = 0; i < 1e4; i++) CreateCode(i);
+ gc();
+ gc();
+ gc();
+
+ print(">>> 1 <<<");
+
+ L.execute(O);
+
+ L = null;
+ print(">>> 2 <<<");
+ AllocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+
+}
+
+function foo(O, x) {
+ Incremental(O, x);
+
+ print('f');
+
+ for (var i = 0; i < 5; i++) O.f;
+
+
+ print('g');
+
+ bar(x);
+}
+
+function bar(x) {
+ if (!x) return;
+ %DeoptimizeFunction(foo);
+ AllocateXMb(8);
+ AllocateXMb(8);
+}
+
+var O1 = {};
+var O2 = {};
+var O3 = {};
+var O4 = {f:0};
+
+foo(O1, false);
+foo(O2, false);
+foo(O3, false);
+%OptimizeFunctionOnNextCall(foo);
+foo(O4, true);
diff --git a/deps/v8/test/mjsunit/regress/regress-173361.js b/deps/v8/test/mjsunit/regress/regress-173361.js
new file mode 100644
index 0000000000..f9cfb6684c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-173361.js
@@ -0,0 +1,33 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+
+const x = 7;
+
+function f() { const y = 8; }
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-2132.js b/deps/v8/test/mjsunit/regress/regress-2132.js
index d8987a554a..9eb2dc5b07 100644
--- a/deps/v8/test/mjsunit/regress/regress-2132.js
+++ b/deps/v8/test/mjsunit/regress/regress-2132.js
@@ -35,7 +35,7 @@ mul(0, 0);
mul(0, 0);
%OptimizeFunctionOnNextCall(mul);
assertEquals(0, mul(0, -1));
-assertTrue(%GetOptimizationStatus(mul) != 2);
+assertOptimized(mul);
function div(x, y) {
return (x / y) | 0;
@@ -45,4 +45,4 @@ div(4, 2);
div(4, 2);
%OptimizeFunctionOnNextCall(div);
assertEquals(1, div(5, 3));
-assertTrue(%GetOptimizationStatus(div) != 2);
+assertOptimized(div);
diff --git a/deps/v8/test/mjsunit/regress/regress-2250.js b/deps/v8/test/mjsunit/regress/regress-2250.js
index b3b0db3fc3..9d2fd4412f 100644
--- a/deps/v8/test/mjsunit/regress/regress-2250.js
+++ b/deps/v8/test/mjsunit/regress/regress-2250.js
@@ -64,5 +64,5 @@ test();
// Second compilation should have noticed that LICM wasn't a good idea, and now
// function should no longer deopt when called.
test();
-assertTrue(2 != %GetOptimizationStatus(test));
+assertOptimized(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-2315.js b/deps/v8/test/mjsunit/regress/regress-2315.js
index a3f9182c95..28c78eae48 100644
--- a/deps/v8/test/mjsunit/regress/regress-2315.js
+++ b/deps/v8/test/mjsunit/regress/regress-2315.js
@@ -36,5 +36,4 @@ foo();
%OptimizeFunctionOnNextCall(foo);
foo();
-// Function should be optimized now.
-assertTrue(%GetOptimizationStatus(foo) != 2);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-2339.js b/deps/v8/test/mjsunit/regress/regress-2339.js
index b16821dbad..8355446cfc 100644
--- a/deps/v8/test/mjsunit/regress/regress-2339.js
+++ b/deps/v8/test/mjsunit/regress/regress-2339.js
@@ -27,18 +27,6 @@
// Flags: --allow-natives-syntax --expose-gc
-/**
- * The possible optimization states of a function. Must be in sync with the
- * return values of Runtime_GetOptimizationStatus() in runtime.cc!
- */
-
-var OptimizationState = {
- YES: 1,
- NO: 2,
- ALWAYS: 3,
- NEVER: 4
-};
-
function simple() {
return simple_two_args(0, undefined);
}
@@ -53,7 +41,5 @@ simple();
simple();
%OptimizeFunctionOnNextCall(simple);
simple();
-var raw_optimized = %GetOptimizationStatus(simple);
-assertFalse(raw_optimized == OptimizationState.NO);
+assertOptimized(simple);
gc();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2451.js b/deps/v8/test/mjsunit/regress/regress-2451.js
index 465e4e68c2..c1749b178f 100644
--- a/deps/v8/test/mjsunit/regress/regress-2451.js
+++ b/deps/v8/test/mjsunit/regress/regress-2451.js
@@ -37,5 +37,4 @@ f();
f();
%OptimizeFunctionOnNextCall(f);
f();
-assertTrue(%GetOptimizationStatus(f) != 2);
-
+assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-252797.js b/deps/v8/test/mjsunit/regress/regress-252797.js
new file mode 100644
index 0000000000..379205f599
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-252797.js
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// The type feedback oracle had a bug when retrieving the map from an IC
+// starting with a negative lookup.
+
+// Create a holder in fast mode.
+var holder = Object.create(null, {
+ holderMethod: {value: function() {}}
+});
+assertTrue(%HasFastProperties(holder));
+
+// Create a receiver into dictionary mode.
+var receiver = Object.create(holder, {
+ killMe: {value: 0, configurable: true},
+});
+delete receiver.killMe;
+assertFalse(%HasFastProperties(receiver));
+
+// The actual function to test, triggering the retrieval of the wrong map.
+function callConstantFunctionOnPrototype(obj) {
+ obj.holderMethod();
+}
+
+callConstantFunctionOnPrototype(receiver);
+callConstantFunctionOnPrototype(receiver);
+%OptimizeFunctionOnNextCall(callConstantFunctionOnPrototype);
+callConstantFunctionOnPrototype(receiver);
+
+// Make sure that the function is still optimized.
+assertOptimized(callConstantFunctionOnPrototype);
diff --git a/deps/v8/test/mjsunit/regress/regress-2537.js b/deps/v8/test/mjsunit/regress/regress-2537.js
index c6b5af9490..1a86000619 100644
--- a/deps/v8/test/mjsunit/regress/regress-2537.js
+++ b/deps/v8/test/mjsunit/regress/regress-2537.js
@@ -31,7 +31,8 @@ var large_int = 0x40000000;
function foo(x, expected) {
assertEquals(expected, x); // This succeeds.
- x += 0; // Force int32 representation so that CompareIDAndBranch is used.
+ x += 0; // Force int32 representation so that
+ // CompareNumericAndBranch is used.
if (3 != x) {
x += 0; // Poor man's "iDef".
// Fails due to Smi-tagging without overflow check.
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 638b71e622..3509db2d45 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -30,7 +30,7 @@
function f() {
do {
do {
- for (i = 0; i < 10000000; i++) {
+ for (var i = 0; i < 10000000; i++) {
// This should run long enough to trigger OSR.
}
} while (false);
@@ -38,7 +38,7 @@ function f() {
}
f();
-assertTrue(%GetOptimizationStatus(f) != 2);
+assertOptimized(f);
function g() {
@@ -46,7 +46,7 @@ function g() {
do {
do {
- for (i = 0; i < 1; i++) { }
+ for (var i = 0; i < 1; i++) { }
} while (false);
} while (false);
@@ -58,7 +58,7 @@ function g() {
do {
do {
do {
- for (i = 0; i < 10000000; i++) { }
+ for (var i = 0; i < 10000000; i++) { }
} while (false);
} while (false);
} while (false);
@@ -70,5 +70,4 @@ function g() {
}
g();
-assertTrue(%GetOptimizationStatus(g) != 2);
-
+assertOptimized(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-2711.js b/deps/v8/test/mjsunit/regress/regress-2711.js
new file mode 100644
index 0000000000..a58e789745
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2711.js
@@ -0,0 +1,33 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that frozen arrays don't let their length change
+var a = Object.freeze([1]);
+a.push(2);
+assertEquals(1, a.length);
+a.push(2);
+assertEquals(1, a.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-97116b.js b/deps/v8/test/mjsunit/regress/regress-97116b.js
new file mode 100644
index 0000000000..91e7d6e0ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-97116b.js
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+// Check that we are not flushing code for inlined functions that
+// have a pending lazy deoptimization on the stack.
+
+%NeverOptimizeFunction(deopt);
+function deopt() {
+ %DeoptimizeFunction(outer);
+ for (var i = 0; i < 10; i++) gc(); // Force code flushing.
+}
+
+function outer(should_deopt) {
+ inner(should_deopt);
+}
+
+function inner(should_deopt) {
+ if (should_deopt) deopt();
+}
+
+outer(false);
+outer(false);
+%OptimizeFunctionOnNextCall(outer);
+outer(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
index 68efdbf2d7..19f7e68250 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
@@ -46,7 +46,7 @@
function outer() {
inner(1,2,3);
// Trigger OSR.
- while (%GetOptimizationStatus(outer) == 2) {}
+ while (%GetOptimizationStatus(outer, "no sync") == 2) {}
}
outer();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js b/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js
new file mode 100644
index 0000000000..4ecfd64eaf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var X = 1.1;
+var K = 0.5;
+
+var O = 0;
+var result = new Float64Array(2);
+
+%NeverOptimizeFunction(spill);
+function spill() {
+}
+
+function buggy() {
+ var v = X;
+ var phi1 = v + K;
+ var phi2 = v - K;
+
+ spill(); // At this point initial values for phi1 and phi2 are spilled.
+
+ var xmm1 = v;
+ var xmm2 = v*v*v;
+ var xmm3 = v*v*v*v;
+ var xmm4 = v*v*v*v*v;
+ var xmm5 = v*v*v*v*v*v;
+ var xmm6 = v*v*v*v*v*v*v;
+ var xmm7 = v*v*v*v*v*v*v*v;
+ var xmm8 = v*v*v*v*v*v*v*v*v;
+
+ // All registers are blocked and phis for phi1 and phi2 are spilled because
+ // their left (incoming) value is spilled, there are no free registers,
+ // and phis themselves have only ANY-policy uses.
+
+ for (var x = 0; x < 2; x++) {
+ xmm1 += xmm1 * xmm6;
+ xmm2 += xmm1 * xmm5;
+ xmm3 += xmm1 * xmm4;
+ xmm4 += xmm1 * xmm3;
+ xmm5 += xmm1 * xmm2;
+
+ // Now swap values of phi1 and phi2 to create cycle between phis.
+ var t = phi1;
+ phi1 = phi2;
+ phi2 = t;
+ }
+
+ // Now we want to get values of phi1 and phi2. However we would like to
+ // do it in a way that does not produce any uses of phi1&phi2 that have
+ // a register beneficial policy. How? We just hide these uses behind phis.
+ result[0] = (O === 0) ? phi1 : phi2;
+ result[1] = (O !== 0) ? phi1 : phi2;
+}
+
+function test() {
+ buggy();
+ assertArrayEquals([X + K, X - K], result);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(buggy);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-259300.js b/deps/v8/test/mjsunit/regress/regress-crbug-259300.js
new file mode 100644
index 0000000000..c57b0e6f91
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-259300.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+var listened = false;
+var recursion_depth = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ recursion_depth++;
+ var disable_break = (recursion_depth > 2);
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ exec_state.frame(i).evaluate("debugger", disable_break);
+ }
+ }
+ listened = true;
+}
+
+Debug.setListener(listener);
+eval("debugger");
+Debug.setListener(null);
+assertTrue(listened);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-260345.js b/deps/v8/test/mjsunit/regress/regress-crbug-260345.js
new file mode 100644
index 0000000000..75832ab4be
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-260345.js
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var steps = 100000;
+var undefined_values = [undefined, "go on"];
+var null_values = [null, "go on"];
+
+function get_undefined_object(i) {
+ return undefined_values[(i / steps) | 0];
+}
+
+function test_undefined() {
+ var objects = 0;
+ for (var i = 0; i < 2 * steps; i++) {
+ undefined == get_undefined_object(i) && objects++;
+ }
+ return objects;
+}
+
+assertEquals(steps, test_undefined());
+
+
+function get_null_object(i) {
+ return null_values[(i / steps) | 0];
+}
+
+function test_null() {
+ var objects = 0;
+ for (var i = 0; i < 2 * steps; i++) {
+ null == get_null_object(i) && objects++;
+ }
+ return objects;
+}
+
+assertEquals(steps, test_null());
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-gcb.js b/deps/v8/test/mjsunit/regress/regress-deopt-gcb.js
new file mode 100644
index 0000000000..fed92b424f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-gcb.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// This tests that we can correctly handle a GC immediately after a function
+// has been deoptimized, even when we have an activation of this function on
+// the stack.
+
+// Ensure that there is code objects before the code for the opt_me function.
+(function() { var a = 10; a++; })();
+
+function opt_me() {
+ deopt();
+}
+
+// Make sure we don't inline this function
+%NeverOptimizeFunction(deopt);
+function deopt() {
+ %DeoptimizeFunction(opt_me);
+ gc();
+}
+
+
+opt_me();
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js b/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js
new file mode 100644
index 0000000000..59094d3aeb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js
@@ -0,0 +1,82 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test deopt after generic store with effect context.
+var pro = { x : 1 }
+var a = {}
+a.__proto__ = pro
+delete pro.x
+
+function g(o) {
+ return 7 + (o.z = 1, 20);
+}
+
+g(a);
+g(a);
+%OptimizeFunctionOnNextCall(g);
+Object.defineProperty(pro, "z", {
+ set: function(v) { %DeoptimizeFunction(g); },
+ get: function() { return 20; }
+});
+
+assertEquals(27, g(a));
+
+// Test deopt after polymorphic as monomorphic store with effect context.
+
+var i = { z : 2, r : 1 }
+var j = { z : 2 }
+var p = { a : 10 }
+var pp = { a : 20, b : 1 }
+
+function bar(o, p) {
+ return 7 + (o.z = 1, p.a);
+}
+
+bar(i, p);
+bar(i, p);
+bar(j, p);
+%OptimizeFunctionOnNextCall(bar);
+assertEquals(27, bar(i, pp));
+
+// Test deopt after polymorphic store with effect context.
+
+var i = { r : 1, z : 2 }
+var j = { z : 2 }
+var p = { a : 10 }
+var pp = { a : 20, b : 1 }
+
+function bar1(o, p) {
+ return 7 + (o.z = 1, p.a);
+}
+
+bar1(i, p);
+bar1(i, p);
+bar1(j, p);
+%OptimizeFunctionOnNextCall(bar1);
+assertEquals(27, bar1(i, pp));
diff --git a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
index afb3835688..6a63da2fde 100644
--- a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -34,19 +34,21 @@ if (!%IsParallelRecompilationSupported()) {
quit();
}
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
function test(fun) {
fun();
fun();
+ // Mark for parallel optimization.
%OptimizeFunctionOnNextCall(fun, "parallel");
- fun(); // Trigger optimization in the background.
- gc(); // Tenure cons string.
- assertUnoptimized(fun); // Compilation not complete yet.
- %CompleteOptimization(fun); // Compilation embeds tenured cons string.
- gc(); // Visit embedded cons string during mark compact.
+ //Trigger optimization in the background.
+ fun();
+ //Tenure cons string.
+ gc();
+ // In the mean time, parallel recompiling is not complete yet.
+ assertUnoptimized(fun, "no sync");
+ // Parallel recompilation eventually finishes and embeds tenured cons string.
+ assertOptimized(fun, "sync");
+ //Visit embedded cons string during mark compact.
+ gc();
}
function f() {
diff --git a/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js b/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js
new file mode 100644
index 0000000000..d15ed4d00a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+Debug = debug.Debug;
+var listened = false;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Exception) {
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ print(exec_state.frame(i).receiver());
+ print(exec_state.frame(i).func().name());
+ }
+ }
+ listened = true;
+}
+
+Debug.setListener(listener);
+Debug.setBreakOnException();
+
+assertThrows(function() { delete null['foo']; });
+
+Debug.clearBreakOnException();
+Debug.setListener(null);
+
+assertTrue(listened);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js b/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js
new file mode 100644
index 0000000000..4203ac48da
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function boom(a) {
+ return ((a | 0) * (a | 0)) | 0;
+}
+%NeverOptimizeFunction(boom_unoptimized);
+function boom_unoptimized(a) {
+ return ((a | 0) * (a | 0)) | 0;
+}
+
+boom(1, 1);
+boom(2, 2);
+
+%OptimizeFunctionOnNextCall(boom);
+var big_int = 0x5F00000F;
+var expected = boom_unoptimized(big_int);
+var actual = boom(big_int)
+assertEquals(expected, actual);
diff --git a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js b/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
index be12cc56fd..3de0217c81 100644
--- a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=300
+// Flags: --parallel-recompilation --parallel-recompilation-delay=100
if (!%IsParallelRecompilationSupported()) {
print("Parallel recompilation is disabled. Skipping this test.");
@@ -62,7 +62,8 @@ f(); // Kick off parallel recompilation.
Debug.setListener(listener); // Activate debugger.
Debug.setBreakPoint(f, 2, 0); // Force deopt.
-%CompleteOptimization(f); // Install optimized code.
+// Sync with parallel optimization thread. But no optimized code is installed.
+assertUnoptimized(f, "sync");
f(); // Trigger break point.
assertEquals(1, listened);
diff --git a/deps/v8/test/mjsunit/tools/profviz-test.default b/deps/v8/test/mjsunit/tools/profviz-test.default
new file mode 100644
index 0000000000..04185a260c
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/profviz-test.default
@@ -0,0 +1,1566 @@
+[
+ "set yrange [0:24.5]",
+ "set xlabel \"execution time in ms\"",
+ "set xrange [2.4204999999999997:141.1669999999999]",
+ "set style fill pattern 2 bo 1",
+ "set style rect fs solid 1 noborder",
+ "set style line 1 lt 1 lw 1 lc rgb \"#000000\"",
+ "set border 15 lw 0.2",
+ "set style line 2 lt 1 lw 1 lc rgb \"#9944CC\"",
+ "set xtics out nomirror",
+ "unset key",
+ "set object 1 rect from 87.51699999999991, 7 to 87.60371656249991, 3 fc rgb \"#9944CC\"",
+ "set object 2 rect from 110.7114999999999, 7 to 110.7982165624999, 3 fc rgb \"#9944CC\"",
+ "set object 3 rect from 133.5129999999999, 7 to 133.59971656249988, 3 fc rgb \"#9944CC\"",
+ "set object 4 rect from 61.49249999999995, 7 to 61.57921656249995, 5.020618556701031 fc rgb \"#9944CC\"",
+ "set object 5 rect from 101.40849999999992, 7 to 101.49521656249992, 5.103092783505154 fc rgb \"#9944CC\"",
+ "set object 6 rect from 57.242999999999974, 7 to 57.329716562499975, 6.766323024054983 fc rgb \"#9944CC\"",
+ "set object 7 rect from 58.751499999999965, 7 to 58.838216562499966, 6.766323024054983 fc rgb \"#9944CC\"",
+ "set object 8 rect from 60.72499999999996, 7 to 60.81171656249996, 6.766323024054983 fc rgb \"#9944CC\"",
+ "set ytics out nomirror (\"execution (59.6%%)\" 12.5, \"external (0.2%%)\" 13.5, \"compile unopt (3.1%%)\" 14.5, \"recompile sync (6.7%%)\" 15.5, \"recompile async (11.6%%)\" 16.5, \"compile eval (0.0%%)\" 17.5, \"parse (10.0%%)\" 18.5, \"preparse (0.8%%)\" 19.5, \"lazy parse (2.9%%)\" 20.5, \"gc scavenge (1.7%%)\" 21.5, \"gc compaction (3.3%%)\" 22.5, \"gc context (0.0%%)\" 23.5, \"code kind color coding\" 11, \"code kind in execution\" 10, \"top 8 js stack frames\" 9, \"pause times\" 0, \"max deopt size: 9.1 kB\" 7)",
+ "set object 9 rect from 42.11000000000001, 12.83 to 42.28050000000001, 12.17 fc rgb \"#000000\"",
+ "set object 10 rect from 42.298000000000016, 12.83 to 42.30000000000002, 12.17 fc rgb \"#000000\"",
+ "set object 11 rect from 42.31450000000002, 12.83 to 42.62700000000002, 12.17 fc rgb \"#000000\"",
+ "set object 12 rect from 42.656500000000015, 12.83 to 42.66150000000002, 12.17 fc rgb \"#000000\"",
+ "set object 13 rect from 42.70600000000002, 12.83 to 42.747000000000014, 12.17 fc rgb \"#000000\"",
+ "set object 14 rect from 42.763500000000015, 12.83 to 42.76550000000001, 12.17 fc rgb \"#000000\"",
+ "set object 15 rect from 42.795000000000016, 12.83 to 42.812500000000014, 12.17 fc rgb \"#000000\"",
+ "set object 16 rect from 42.83300000000001, 12.83 to 42.844000000000015, 12.17 fc rgb \"#000000\"",
+ "set object 17 rect from 42.858500000000014, 12.83 to 42.85950000000001, 12.17 fc rgb \"#000000\"",
+ "set object 18 rect from 42.88200000000001, 12.83 to 43.60550000000001, 12.17 fc rgb \"#000000\"",
+ "set object 19 rect from 43.62000000000002, 12.83 to 43.622000000000014, 12.17 fc rgb \"#000000\"",
+ "set object 20 rect from 43.632500000000014, 12.83 to 44.796000000000014, 12.17 fc rgb \"#000000\"",
+ "set object 21 rect from 44.81150000000002, 12.83 to 44.812500000000014, 12.17 fc rgb \"#000000\"",
+ "set object 22 rect from 44.82200000000002, 12.83 to 44.84500000000001, 12.17 fc rgb \"#000000\"",
+ "set object 23 rect from 44.87150000000002, 12.83 to 44.87550000000002, 12.17 fc rgb \"#000000\"",
+ "set object 24 rect from 44.917000000000016, 12.83 to 44.996000000000016, 12.17 fc rgb \"#000000\"",
+ "set object 25 rect from 45.00850000000001, 12.83 to 45.01150000000001, 12.17 fc rgb \"#000000\"",
+ "set object 26 rect from 45.02900000000001, 12.83 to 45.04700000000001, 12.17 fc rgb \"#000000\"",
+ "set object 27 rect from 45.06450000000002, 12.83 to 45.068500000000014, 12.17 fc rgb \"#000000\"",
+ "set object 28 rect from 45.08700000000001, 12.83 to 45.09600000000001, 12.17 fc rgb \"#000000\"",
+ "set object 29 rect from 45.107500000000016, 12.83 to 45.110500000000016, 12.17 fc rgb \"#000000\"",
+ "set object 30 rect from 45.13500000000002, 12.83 to 45.14400000000002, 12.17 fc rgb \"#000000\"",
+ "set object 31 rect from 45.16150000000002, 12.83 to 45.32050000000002, 12.17 fc rgb \"#000000\"",
+ "set object 32 rect from 45.33700000000002, 12.83 to 45.34000000000002, 12.17 fc rgb \"#000000\"",
+ "set object 33 rect from 45.361500000000014, 12.83 to 45.38750000000002, 12.17 fc rgb \"#000000\"",
+ "set object 34 rect from 45.402000000000015, 12.83 to 45.405000000000015, 12.17 fc rgb \"#000000\"",
+ "set object 35 rect from 45.41750000000002, 12.83 to 45.43250000000002, 12.17 fc rgb \"#000000\"",
+ "set object 36 rect from 45.442000000000014, 12.83 to 45.49750000000001, 12.17 fc rgb \"#000000\"",
+ "set object 37 rect from 45.55900000000001, 12.83 to 45.56900000000001, 12.17 fc rgb \"#000000\"",
+ "set object 38 rect from 45.626500000000014, 12.83 to 45.66050000000001, 12.17 fc rgb \"#000000\"",
+ "set object 39 rect from 45.70300000000001, 12.83 to 45.71200000000001, 12.17 fc rgb \"#000000\"",
+ "set object 40 rect from 45.76150000000001, 12.83 to 45.79350000000001, 12.17 fc rgb \"#000000\"",
+ "set object 41 rect from 45.81700000000001, 12.83 to 45.82000000000001, 12.17 fc rgb \"#000000\"",
+ "set object 42 rect from 45.84850000000001, 12.83 to 45.86950000000001, 12.17 fc rgb \"#000000\"",
+ "set object 43 rect from 45.92300000000001, 12.83 to 45.93000000000001, 12.17 fc rgb \"#000000\"",
+ "set object 44 rect from 45.97850000000001, 12.83 to 45.99450000000001, 12.17 fc rgb \"#000000\"",
+ "set object 45 rect from 46.060500000000005, 12.83 to 46.08650000000001, 12.17 fc rgb \"#000000\"",
+ "set object 46 rect from 46.13100000000001, 12.83 to 46.18500000000001, 12.17 fc rgb \"#000000\"",
+ "set object 47 rect from 46.28150000000001, 12.83 to 46.291500000000006, 12.17 fc rgb \"#000000\"",
+ "set object 48 rect from 46.37200000000001, 12.83 to 46.550000000000004, 12.17 fc rgb \"#000000\"",
+ "set object 49 rect from 46.5915, 12.83 to 46.59550000000001, 12.17 fc rgb \"#000000\"",
+ "set object 50 rect from 46.621, 12.83 to 46.65500000000001, 12.17 fc rgb \"#000000\"",
+ "set object 51 rect from 46.691500000000005, 12.83 to 46.694500000000005, 12.17 fc rgb \"#000000\"",
+ "set object 52 rect from 46.74, 12.83 to 46.861000000000004, 12.17 fc rgb \"#000000\"",
+ "set object 53 rect from 46.8935, 12.83 to 46.8975, 12.17 fc rgb \"#000000\"",
+ "set object 54 rect from 46.9925, 12.83 to 47.039500000000004, 12.17 fc rgb \"#000000\"",
+ "set object 55 rect from 47.049, 12.83 to 47.0765, 12.17 fc rgb \"#000000\"",
+ "set object 56 rect from 47.135000000000005, 12.83 to 47.141, 12.17 fc rgb \"#000000\"",
+ "set object 57 rect from 47.3935, 12.83 to 47.4125, 12.17 fc rgb \"#000000\"",
+ "set object 58 rect from 47.465, 12.83 to 47.472, 12.17 fc rgb \"#000000\"",
+ "set object 59 rect from 47.5235, 12.83 to 49.454499999999996, 12.17 fc rgb \"#000000\"",
+ "set object 60 rect from 49.467, 12.83 to 49.469, 12.17 fc rgb \"#000000\"",
+ "set object 61 rect from 49.4955, 12.83 to 49.6855, 12.17 fc rgb \"#000000\"",
+ "set object 62 rect from 49.726, 12.83 to 49.732, 12.17 fc rgb \"#000000\"",
+ "set object 63 rect from 49.780499999999996, 12.83 to 49.799499999999995, 12.17 fc rgb \"#000000\"",
+ "set object 64 rect from 49.812999999999995, 12.83 to 49.814, 12.17 fc rgb \"#000000\"",
+ "set object 65 rect from 49.82449999999999, 12.83 to 49.851, 12.17 fc rgb \"#000000\"",
+ "set object 66 rect from 49.8685, 12.83 to 49.894499999999994, 12.17 fc rgb \"#000000\"",
+ "set object 67 rect from 49.9695, 12.83 to 50.083999999999996, 12.17 fc rgb \"#000000\"",
+ "set object 68 rect from 50.14149999999999, 12.83 to 50.147499999999994, 12.17 fc rgb \"#000000\"",
+ "set object 69 rect from 50.20799999999999, 12.83 to 50.29299999999999, 12.17 fc rgb \"#000000\"",
+ "set object 70 rect from 50.31249999999999, 12.83 to 50.314499999999995, 12.17 fc rgb \"#000000\"",
+ "set object 71 rect from 50.32899999999999, 12.83 to 50.36699999999999, 12.17 fc rgb \"#000000\"",
+ "set object 72 rect from 50.39849999999999, 12.83 to 50.40249999999999, 12.17 fc rgb \"#000000\"",
+ "set object 73 rect from 50.43099999999999, 12.83 to 50.54899999999999, 12.17 fc rgb \"#000000\"",
+ "set object 74 rect from 50.62049999999999, 12.83 to 50.62949999999999, 12.17 fc rgb \"#000000\"",
+ "set object 75 rect from 51.02349999999999, 12.83 to 51.27549999999999, 12.17 fc rgb \"#000000\"",
+ "set object 76 rect from 51.29099999999999, 12.83 to 51.292999999999985, 12.17 fc rgb \"#000000\"",
+ "set object 77 rect from 51.30249999999999, 12.83 to 51.52249999999999, 12.17 fc rgb \"#000000\"",
+ "set object 78 rect from 51.56899999999999, 12.83 to 51.57499999999999, 12.17 fc rgb \"#000000\"",
+ "set object 79 rect from 51.78349999999999, 12.83 to 51.87299999999998, 12.17 fc rgb \"#000000\"",
+ "set object 80 rect from 51.89049999999999, 12.83 to 51.89349999999999, 12.17 fc rgb \"#000000\"",
+ "set object 81 rect from 51.91599999999998, 12.83 to 52.115999999999985, 12.17 fc rgb \"#000000\"",
+ "set object 82 rect from 52.13449999999999, 12.83 to 52.13749999999999, 12.17 fc rgb \"#000000\"",
+ "set object 83 rect from 52.15399999999998, 12.83 to 52.286999999999985, 12.17 fc rgb \"#000000\"",
+ "set object 84 rect from 52.300499999999985, 12.83 to 52.30249999999998, 12.17 fc rgb \"#000000\"",
+ "set object 85 rect from 52.31499999999998, 12.83 to 52.362999999999985, 12.17 fc rgb \"#000000\"",
+ "set object 86 rect from 52.404499999999985, 12.83 to 52.40949999999998, 12.17 fc rgb \"#000000\"",
+ "set object 87 rect from 52.448999999999984, 12.83 to 54.55999999999998, 12.17 fc rgb \"#000000\"",
+ "set object 88 rect from 54.951999999999984, 12.83 to 55.48599999999998, 12.17 fc rgb \"#000000\"",
+ "set object 89 rect from 55.66249999999998, 12.83 to 55.79999999999998, 12.17 fc rgb \"#000000\"",
+ "set object 90 rect from 56.198999999999984, 12.83 to 56.25149999999998, 12.17 fc rgb \"#000000\"",
+ "set object 91 rect from 56.52499999999998, 12.83 to 56.55699999999998, 12.17 fc rgb \"#000000\"",
+ "set object 92 rect from 56.634499999999974, 12.83 to 56.63999999999998, 12.17 fc rgb \"#000000\"",
+ "set object 93 rect from 56.69449999999998, 12.83 to 56.746499999999976, 12.17 fc rgb \"#000000\"",
+ "set object 94 rect from 56.845999999999975, 12.83 to 56.85849999999998, 12.17 fc rgb \"#000000\"",
+ "set object 95 rect from 56.97649999999997, 12.83 to 57.03599999999997, 12.17 fc rgb \"#000000\"",
+ "set object 96 rect from 57.205999999999975, 12.83 to 57.27249999999997, 12.17 fc rgb \"#000000\"",
+ "set object 97 rect from 57.33299999999997, 12.83 to 57.565999999999974, 12.17 fc rgb \"#000000\"",
+ "set object 98 rect from 57.64849999999997, 12.83 to 57.878499999999974, 12.17 fc rgb \"#000000\"",
+ "set object 99 rect from 57.934999999999974, 12.83 to 57.97299999999997, 12.17 fc rgb \"#000000\"",
+ "set object 100 rect from 58.07699999999997, 12.83 to 58.09149999999997, 12.17 fc rgb \"#000000\"",
+ "set object 101 rect from 58.12149999999997, 12.83 to 58.14299999999997, 12.17 fc rgb \"#000000\"",
+ "set object 102 rect from 58.17349999999997, 12.83 to 58.17499999999997, 12.17 fc rgb \"#000000\"",
+ "set object 103 rect from 58.21549999999997, 12.83 to 58.23599999999997, 12.17 fc rgb \"#000000\"",
+ "set object 104 rect from 58.275499999999965, 12.83 to 58.27599999999997, 12.17 fc rgb \"#000000\"",
+ "set object 105 rect from 58.300499999999964, 12.83 to 58.30299999999997, 12.17 fc rgb \"#000000\"",
+ "set object 106 rect from 58.316999999999965, 12.83 to 58.409499999999966, 12.17 fc rgb \"#000000\"",
+ "set object 107 rect from 58.58699999999997, 12.83 to 58.589499999999965, 12.17 fc rgb \"#000000\"",
+ "set object 108 rect from 58.65749999999996, 12.83 to 58.92499999999996, 12.17 fc rgb \"#000000\"",
+ "set object 109 rect from 59.02199999999996, 12.83 to 59.02349999999996, 12.17 fc rgb \"#000000\"",
+ "set object 110 rect from 59.042999999999964, 12.83 to 59.641499999999965, 12.17 fc rgb \"#000000\"",
+ "set object 111 rect from 59.69699999999996, 12.83 to 59.89099999999996, 12.17 fc rgb \"#000000\"",
+ "set object 112 rect from 59.93649999999996, 12.83 to 60.04699999999996, 12.17 fc rgb \"#000000\"",
+ "set object 113 rect from 60.08349999999996, 12.83 to 60.17149999999996, 12.17 fc rgb \"#000000\"",
+ "set object 114 rect from 60.54849999999996, 12.83 to 60.55099999999995, 12.17 fc rgb \"#000000\"",
+ "set object 115 rect from 60.65699999999996, 12.83 to 60.91649999999996, 12.17 fc rgb \"#000000\"",
+ "set object 116 rect from 61.253999999999955, 12.83 to 61.31249999999996, 12.17 fc rgb \"#000000\"",
+ "set object 117 rect from 61.464999999999954, 12.83 to 62.16149999999996, 12.17 fc rgb \"#000000\"",
+ "set object 118 rect from 62.548999999999964, 12.83 to 62.62699999999996, 12.17 fc rgb \"#000000\"",
+ "set object 119 rect from 63.024999999999956, 12.83 to 63.14749999999995, 12.17 fc rgb \"#000000\"",
+ "set object 120 rect from 63.41299999999995, 12.83 to 64.40899999999996, 12.17 fc rgb \"#000000\"",
+ "set object 121 rect from 64.61749999999995, 12.83 to 65.56449999999995, 12.17 fc rgb \"#000000\"",
+ "set object 122 rect from 65.61699999999995, 12.83 to 67.34249999999994, 12.17 fc rgb \"#000000\"",
+ "set object 123 rect from 67.45099999999994, 12.83 to 67.45549999999994, 12.17 fc rgb \"#000000\"",
+ "set object 124 rect from 67.48749999999995, 12.83 to 67.53599999999994, 12.17 fc rgb \"#000000\"",
+ "set object 125 rect from 67.57649999999995, 12.83 to 67.57799999999995, 12.17 fc rgb \"#000000\"",
+ "set object 126 rect from 67.59199999999996, 12.83 to 68.70599999999996, 12.17 fc rgb \"#000000\"",
+ "set object 127 rect from 68.76649999999995, 12.83 to 69.10849999999995, 12.17 fc rgb \"#000000\"",
+ "set object 128 rect from 69.49599999999995, 12.83 to 70.31749999999994, 12.17 fc rgb \"#000000\"",
+ "set object 129 rect from 70.33949999999994, 12.83 to 70.34449999999994, 12.17 fc rgb \"#000000\"",
+ "set object 130 rect from 70.35799999999995, 12.83 to 70.40899999999993, 12.17 fc rgb \"#000000\"",
+ "set object 131 rect from 70.58649999999994, 12.83 to 72.22199999999995, 12.17 fc rgb \"#000000\"",
+ "set object 132 rect from 72.28049999999995, 12.83 to 74.40699999999995, 12.17 fc rgb \"#000000\"",
+ "set object 133 rect from 74.63849999999994, 12.83 to 75.04799999999994, 12.17 fc rgb \"#000000\"",
+ "set object 134 rect from 75.20099999999994, 12.83 to 75.41849999999994, 12.17 fc rgb \"#000000\"",
+ "set object 135 rect from 75.46799999999995, 12.83 to 78.16449999999993, 12.17 fc rgb \"#000000\"",
+ "set object 136 rect from 78.23649999999994, 12.83 to 80.90399999999994, 12.17 fc rgb \"#000000\"",
+ "set object 137 rect from 80.95049999999993, 12.83 to 83.58349999999993, 12.17 fc rgb \"#000000\"",
+ "set object 138 rect from 83.63999999999993, 12.83 to 84.09549999999993, 12.17 fc rgb \"#000000\"",
+ "set object 139 rect from 84.84549999999993, 12.83 to 84.91749999999993, 12.17 fc rgb \"#000000\"",
+ "set object 140 rect from 85.13799999999992, 12.83 to 85.37849999999993, 12.17 fc rgb \"#000000\"",
+ "set object 141 rect from 86.05649999999993, 12.83 to 86.75549999999993, 12.17 fc rgb \"#000000\"",
+ "set object 142 rect from 87.27399999999992, 12.83 to 87.27549999999992, 12.17 fc rgb \"#000000\"",
+ "set object 143 rect from 87.36899999999991, 12.83 to 88.75199999999992, 12.17 fc rgb \"#000000\"",
+ "set object 144 rect from 88.82299999999992, 12.83 to 88.83949999999992, 12.17 fc rgb \"#000000\"",
+ "set object 145 rect from 89.21399999999991, 12.83 to 91.90999999999991, 12.17 fc rgb \"#000000\"",
+ "set object 146 rect from 91.96649999999993, 12.83 to 94.55599999999993, 12.17 fc rgb \"#000000\"",
+ "set object 147 rect from 94.6054999999999, 12.83 to 97.20749999999991, 12.17 fc rgb \"#000000\"",
+ "set object 148 rect from 97.26099999999992, 12.83 to 99.86649999999992, 12.17 fc rgb \"#000000\"",
+ "set object 149 rect from 99.92199999999991, 12.83 to 102.56049999999992, 12.17 fc rgb \"#000000\"",
+ "set object 150 rect from 102.61199999999991, 12.83 to 102.74149999999992, 12.17 fc rgb \"#000000\"",
+ "set object 151 rect from 102.99499999999992, 12.83 to 104.13299999999992, 12.17 fc rgb \"#000000\"",
+ "set object 152 rect from 104.4429999999999, 12.83 to 105.88099999999991, 12.17 fc rgb \"#000000\"",
+ "set object 153 rect from 105.93349999999991, 12.83 to 107.51699999999991, 12.17 fc rgb \"#000000\"",
+ "set object 154 rect from 108.09449999999991, 12.83 to 109.2659999999999, 12.17 fc rgb \"#000000\"",
+ "set object 155 rect from 109.41799999999989, 12.83 to 110.0909999999999, 12.17 fc rgb \"#000000\"",
+ "set object 156 rect from 110.4839999999999, 12.83 to 112.6029999999999, 12.17 fc rgb \"#000000\"",
+ "set object 157 rect from 112.6564999999999, 12.83 to 115.36399999999989, 12.17 fc rgb \"#000000\"",
+ "set object 158 rect from 115.4124999999999, 12.83 to 118.1434999999999, 12.17 fc rgb \"#000000\"",
+ "set object 159 rect from 118.19199999999991, 12.83 to 120.9194999999999, 12.17 fc rgb \"#000000\"",
+ "set object 160 rect from 121.0314999999999, 12.83 to 123.77499999999989, 12.17 fc rgb \"#000000\"",
+ "set object 161 rect from 123.8254999999999, 12.83 to 126.55149999999989, 12.17 fc rgb \"#000000\"",
+ "set object 162 rect from 126.59899999999989, 12.83 to 129.3344999999999, 12.17 fc rgb \"#000000\"",
+ "set object 163 rect from 129.48849999999987, 12.83 to 130.5424999999999, 12.17 fc rgb \"#000000\"",
+ "set object 164 rect from 131.1209999999999, 12.83 to 132.8659999999999, 12.17 fc rgb \"#000000\"",
+ "set object 165 rect from 132.92249999999987, 12.83 to 133.04349999999988, 12.17 fc rgb \"#000000\"",
+ "set object 166 rect from 133.4079999999999, 12.83 to 136.14449999999988, 12.17 fc rgb \"#000000\"",
+ "set object 167 rect from 136.19799999999987, 12.83 to 138.9289999999999, 12.17 fc rgb \"#000000\"",
+ "set object 168 rect from 138.98049999999986, 12.83 to 140.86699999999988, 12.17 fc rgb \"#000000\"",
+ "set object 169 rect from 140.8814999999999, 12.83 to 140.88349999999988, 12.17 fc rgb \"#000000\"",
+ "set object 170 rect from 140.89599999999987, 12.83 to 140.9319999999999, 12.17 fc rgb \"#000000\"",
+ "set object 171 rect from 140.9574999999999, 12.83 to 140.96249999999986, 12.17 fc rgb \"#000000\"",
+ "set object 172 rect from 140.9779999999999, 12.83 to 141.0599999999999, 12.17 fc rgb \"#000000\"",
+ "set object 173 rect from 141.0984999999999, 12.83 to 141.09999999999988, 12.17 fc rgb \"#000000\"",
+ "set object 174 rect from 2.4490000000000003, 13.83 to 2.4545, 13.17 fc rgb \"#3399FF\"",
+ "set object 175 rect from 3.7920000000000003, 13.83 to 3.8075, 13.17 fc rgb \"#3399FF\"",
+ "set object 176 rect from 6.276000000000001, 13.83 to 6.2805, 13.17 fc rgb \"#3399FF\"",
+ "set object 177 rect from 7.373, 13.83 to 7.3865, 13.17 fc rgb \"#3399FF\"",
+ "set object 178 rect from 9.299, 13.83 to 9.302499999999998, 13.17 fc rgb \"#3399FF\"",
+ "set object 179 rect from 10.405000000000001, 13.83 to 10.4235, 13.17 fc rgb \"#3399FF\"",
+ "set object 180 rect from 12.882, 13.83 to 12.8865, 13.17 fc rgb \"#3399FF\"",
+ "set object 181 rect from 13.897, 13.83 to 13.910499999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 182 rect from 55.80349999999998, 13.83 to 55.80399999999998, 13.17 fc rgb \"#3399FF\"",
+ "set object 183 rect from 56.19399999999998, 13.83 to 56.19849999999998, 13.17 fc rgb \"#3399FF\"",
+ "set object 184 rect from 62.16599999999996, 13.83 to 62.166499999999964, 13.17 fc rgb \"#3399FF\"",
+ "set object 185 rect from 62.54499999999995, 13.83 to 62.54849999999996, 13.17 fc rgb \"#3399FF\"",
+ "set object 186 rect from 65.56999999999996, 13.83 to 65.57049999999997, 13.17 fc rgb \"#3399FF\"",
+ "set object 187 rect from 65.61499999999995, 13.83 to 65.61649999999995, 13.17 fc rgb \"#3399FF\"",
+ "set object 188 rect from 68.71249999999995, 13.83 to 68.71399999999994, 13.17 fc rgb \"#3399FF\"",
+ "set object 189 rect from 68.76249999999995, 13.83 to 68.76599999999993, 13.17 fc rgb \"#3399FF\"",
+ "set object 190 rect from 72.22849999999994, 13.83 to 72.22899999999994, 13.17 fc rgb \"#3399FF\"",
+ "set object 191 rect from 72.27749999999995, 13.83 to 72.27999999999994, 13.17 fc rgb \"#3399FF\"",
+ "set object 192 rect from 75.42299999999994, 13.83 to 75.42349999999995, 13.17 fc rgb \"#3399FF\"",
+ "set object 193 rect from 75.46599999999995, 13.83 to 75.46749999999994, 13.17 fc rgb \"#3399FF\"",
+ "set object 194 rect from 78.17099999999994, 13.83 to 78.17149999999994, 13.17 fc rgb \"#3399FF\"",
+ "set object 195 rect from 78.23049999999994, 13.83 to 78.23599999999993, 13.17 fc rgb \"#3399FF\"",
+ "set object 196 rect from 80.91049999999994, 13.83 to 80.91099999999994, 13.17 fc rgb \"#3399FF\"",
+ "set object 197 rect from 80.94849999999994, 13.83 to 80.94999999999993, 13.17 fc rgb \"#3399FF\"",
+ "set object 198 rect from 83.58999999999995, 13.83 to 83.59049999999995, 13.17 fc rgb \"#3399FF\"",
+ "set object 199 rect from 83.63699999999994, 13.83 to 83.63949999999993, 13.17 fc rgb \"#3399FF\"",
+ "set object 200 rect from 88.75849999999993, 13.83 to 88.75899999999993, 13.17 fc rgb \"#3399FF\"",
+ "set object 201 rect from 88.81899999999993, 13.83 to 88.82249999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 202 rect from 91.91649999999991, 13.83 to 91.91699999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 203 rect from 91.96349999999993, 13.83 to 91.96599999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 204 rect from 94.56249999999991, 13.83 to 94.56299999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 205 rect from 94.60349999999991, 13.83 to 94.6049999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 206 rect from 97.21399999999991, 13.83 to 97.21449999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 207 rect from 97.25899999999993, 13.83 to 97.26049999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 208 rect from 99.87599999999992, 13.83 to 99.87649999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 209 rect from 99.91899999999993, 13.83 to 99.92149999999991, 13.17 fc rgb \"#3399FF\"",
+ "set object 210 rect from 102.56599999999992, 13.83 to 102.56649999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 211 rect from 102.6099999999999, 13.83 to 102.61149999999991, 13.17 fc rgb \"#3399FF\"",
+ "set object 212 rect from 105.88749999999992, 13.83 to 105.88799999999992, 13.17 fc rgb \"#3399FF\"",
+ "set object 213 rect from 105.93149999999991, 13.83 to 105.93299999999991, 13.17 fc rgb \"#3399FF\"",
+ "set object 214 rect from 109.27249999999991, 13.83 to 109.27299999999991, 13.17 fc rgb \"#3399FF\"",
+ "set object 215 rect from 109.38599999999991, 13.83 to 109.4024999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 216 rect from 112.6104999999999, 13.83 to 112.6109999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 217 rect from 112.6544999999999, 13.83 to 112.65599999999989, 13.17 fc rgb \"#3399FF\"",
+ "set object 218 rect from 115.37049999999991, 13.83 to 115.37099999999991, 13.17 fc rgb \"#3399FF\"",
+ "set object 219 rect from 115.4104999999999, 13.83 to 115.41199999999989, 13.17 fc rgb \"#3399FF\"",
+ "set object 220 rect from 118.14999999999989, 13.83 to 118.1504999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 221 rect from 118.18999999999991, 13.83 to 118.1914999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 222 rect from 120.9319999999999, 13.83 to 120.9324999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 223 rect from 121.0104999999999, 13.83 to 121.0259999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 224 rect from 123.78149999999991, 13.83 to 123.78199999999991, 13.17 fc rgb \"#3399FF\"",
+ "set object 225 rect from 123.8234999999999, 13.83 to 123.8249999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 226 rect from 126.5569999999999, 13.83 to 126.5574999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 227 rect from 126.5969999999999, 13.83 to 126.59849999999989, 13.17 fc rgb \"#3399FF\"",
+ "set object 228 rect from 129.4124999999999, 13.83 to 129.4249999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 229 rect from 129.4864999999999, 13.83 to 129.4879999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 230 rect from 132.87149999999988, 13.83 to 132.87199999999987, 13.17 fc rgb \"#3399FF\"",
+ "set object 231 rect from 132.9204999999999, 13.83 to 132.92199999999988, 13.17 fc rgb \"#3399FF\"",
+ "set object 232 rect from 136.15099999999987, 13.83 to 136.15149999999986, 13.17 fc rgb \"#3399FF\"",
+ "set object 233 rect from 136.19599999999988, 13.83 to 136.19749999999988, 13.17 fc rgb \"#3399FF\"",
+ "set object 234 rect from 138.93549999999988, 13.83 to 138.93599999999986, 13.17 fc rgb \"#3399FF\"",
+ "set object 235 rect from 138.97849999999988, 13.83 to 138.97999999999988, 13.17 fc rgb \"#3399FF\"",
+ "set object 236 rect from 141.0599999999999, 13.83 to 141.0984999999999, 13.17 fc rgb \"#3399FF\"",
+ "set object 237 rect from 16.9945, 14.83 to 17.7705, 14.17 fc rgb \"#CC0000\"",
+ "set object 238 rect from 18.046, 14.83 to 18.1735, 14.17 fc rgb \"#CC0000\"",
+ "set object 239 rect from 19.0915, 14.83 to 19.152, 14.17 fc rgb \"#CC0000\"",
+ "set object 240 rect from 20.624499999999998, 14.83 to 21.063999999999997, 14.17 fc rgb \"#CC0000\"",
+ "set object 241 rect from 21.148500000000002, 14.83 to 21.175, 14.17 fc rgb \"#CC0000\"",
+ "set object 242 rect from 21.2875, 14.83 to 21.363, 14.17 fc rgb \"#CC0000\"",
+ "set object 243 rect from 21.505, 14.83 to 21.525499999999997, 14.17 fc rgb \"#CC0000\"",
+ "set object 244 rect from 21.604000000000003, 14.83 to 21.619500000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 245 rect from 21.747, 14.83 to 21.8475, 14.17 fc rgb \"#CC0000\"",
+ "set object 246 rect from 22.400499999999997, 14.83 to 22.416999999999998, 14.17 fc rgb \"#CC0000\"",
+ "set object 247 rect from 22.4715, 14.83 to 22.486, 14.17 fc rgb \"#CC0000\"",
+ "set object 248 rect from 22.517500000000002, 14.83 to 22.528, 14.17 fc rgb \"#CC0000\"",
+ "set object 249 rect from 22.5655, 14.83 to 22.570999999999998, 14.17 fc rgb \"#CC0000\"",
+ "set object 250 rect from 23.1575, 14.83 to 23.189, 14.17 fc rgb \"#CC0000\"",
+ "set object 251 rect from 23.376, 14.83 to 23.3945, 14.17 fc rgb \"#CC0000\"",
+ "set object 252 rect from 23.518, 14.83 to 23.5775, 14.17 fc rgb \"#CC0000\"",
+ "set object 253 rect from 23.617, 14.83 to 23.8365, 14.17 fc rgb \"#CC0000\"",
+ "set object 254 rect from 23.912000000000003, 14.83 to 23.9205, 14.17 fc rgb \"#CC0000\"",
+ "set object 255 rect from 24.4405, 14.83 to 24.458, 14.17 fc rgb \"#CC0000\"",
+ "set object 256 rect from 24.5085, 14.83 to 24.544000000000004, 14.17 fc rgb \"#CC0000\"",
+ "set object 257 rect from 41.91250000000001, 14.83 to 42.05100000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 258 rect from 42.30000000000002, 14.83 to 42.31450000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 259 rect from 42.66150000000002, 14.83 to 42.70600000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 260 rect from 42.76550000000001, 14.83 to 42.795000000000016, 14.17 fc rgb \"#CC0000\"",
+ "set object 261 rect from 42.812500000000014, 14.83 to 42.83300000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 262 rect from 42.85950000000001, 14.83 to 42.88200000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 263 rect from 43.622000000000014, 14.83 to 43.632500000000014, 14.17 fc rgb \"#CC0000\"",
+ "set object 264 rect from 44.812500000000014, 14.83 to 44.82200000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 265 rect from 44.87550000000002, 14.83 to 44.917000000000016, 14.17 fc rgb \"#CC0000\"",
+ "set object 266 rect from 45.01150000000001, 14.83 to 45.02900000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 267 rect from 45.068500000000014, 14.83 to 45.08700000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 268 rect from 45.110500000000016, 14.83 to 45.13500000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 269 rect from 45.16350000000002, 14.83 to 45.17200000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 270 rect from 45.34000000000002, 14.83 to 45.361500000000014, 14.17 fc rgb \"#CC0000\"",
+ "set object 271 rect from 45.405000000000015, 14.83 to 45.41750000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 272 rect from 45.44250000000002, 14.83 to 45.44850000000002, 14.17 fc rgb \"#CC0000\"",
+ "set object 273 rect from 45.466000000000015, 14.83 to 45.470500000000015, 14.17 fc rgb \"#CC0000\"",
+ "set object 274 rect from 45.484000000000016, 14.83 to 45.489500000000014, 14.17 fc rgb \"#CC0000\"",
+ "set object 275 rect from 45.56900000000001, 14.83 to 45.626500000000014, 14.17 fc rgb \"#CC0000\"",
+ "set object 276 rect from 45.71200000000001, 14.83 to 45.76150000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 277 rect from 45.82000000000001, 14.83 to 45.84850000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 278 rect from 45.93000000000001, 14.83 to 45.97850000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 279 rect from 46.08650000000001, 14.83 to 46.13100000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 280 rect from 46.291500000000006, 14.83 to 46.37200000000001, 14.17 fc rgb \"#CC0000\"",
+ "set object 281 rect from 46.59550000000001, 14.83 to 46.621, 14.17 fc rgb \"#CC0000\"",
+ "set object 282 rect from 46.694500000000005, 14.83 to 46.74, 14.17 fc rgb \"#CC0000\"",
+ "set object 283 rect from 46.8975, 14.83 to 46.9925, 14.17 fc rgb \"#CC0000\"",
+ "set object 284 rect from 47.050000000000004, 14.83 to 47.057500000000005, 14.17 fc rgb \"#CC0000\"",
+ "set object 285 rect from 47.141, 14.83 to 47.3935, 14.17 fc rgb \"#CC0000\"",
+ "set object 286 rect from 47.472, 14.83 to 47.5235, 14.17 fc rgb \"#CC0000\"",
+ "set object 287 rect from 49.469, 14.83 to 49.4955, 14.17 fc rgb \"#CC0000\"",
+ "set object 288 rect from 49.732, 14.83 to 49.780499999999996, 14.17 fc rgb \"#CC0000\"",
+ "set object 289 rect from 49.814, 14.83 to 49.82449999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 290 rect from 49.851, 14.83 to 49.8685, 14.17 fc rgb \"#CC0000\"",
+ "set object 291 rect from 49.99849999999999, 14.83 to 50.007, 14.17 fc rgb \"#CC0000\"",
+ "set object 292 rect from 50.147499999999994, 14.83 to 50.20799999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 293 rect from 50.314499999999995, 14.83 to 50.32899999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 294 rect from 50.40249999999999, 14.83 to 50.43099999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 295 rect from 50.52949999999999, 14.83 to 50.53499999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 296 rect from 50.62949999999999, 14.83 to 51.02349999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 297 rect from 51.292999999999985, 14.83 to 51.30249999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 298 rect from 51.57499999999999, 14.83 to 51.78349999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 299 rect from 51.89349999999999, 14.83 to 51.91599999999998, 14.17 fc rgb \"#CC0000\"",
+ "set object 300 rect from 52.13749999999999, 14.83 to 52.15399999999998, 14.17 fc rgb \"#CC0000\"",
+ "set object 301 rect from 52.30249999999998, 14.83 to 52.31499999999998, 14.17 fc rgb \"#CC0000\"",
+ "set object 302 rect from 52.331499999999984, 14.83 to 52.338999999999984, 14.17 fc rgb \"#CC0000\"",
+ "set object 303 rect from 52.40949999999998, 14.83 to 52.448999999999984, 14.17 fc rgb \"#CC0000\"",
+ "set object 304 rect from 70.34449999999994, 14.83 to 70.35799999999995, 14.17 fc rgb \"#CC0000\"",
+ "set object 305 rect from 140.88349999999988, 14.83 to 140.89599999999987, 14.17 fc rgb \"#CC0000\"",
+ "set object 306 rect from 140.96249999999986, 14.83 to 140.9779999999999, 14.17 fc rgb \"#CC0000\"",
+ "set object 307 rect from 141.0404999999999, 14.83 to 141.04699999999988, 14.17 fc rgb \"#CC0000\"",
+ "set object 308 rect from 25.285, 15.83 to 25.4055, 15.17 fc rgb \"#CC0044\"",
+ "set object 309 rect from 25.428000000000004, 15.83 to 25.507500000000004, 15.17 fc rgb \"#CC0044\"",
+ "set object 310 rect from 25.526500000000002, 15.83 to 25.591500000000003, 15.17 fc rgb \"#CC0044\"",
+ "set object 311 rect from 54.55999999999998, 15.83 to 54.566499999999984, 15.17 fc rgb \"#CC0044\"",
+ "set object 312 rect from 54.64299999999998, 15.83 to 54.951999999999984, 15.17 fc rgb \"#CC0044\"",
+ "set object 313 rect from 55.48599999999998, 15.83 to 55.49149999999998, 15.17 fc rgb \"#CC0044\"",
+ "set object 314 rect from 55.53099999999998, 15.83 to 55.66249999999998, 15.17 fc rgb \"#CC0044\"",
+ "set object 315 rect from 56.25149999999998, 15.83 to 56.52499999999998, 15.17 fc rgb \"#CC0044\"",
+ "set object 316 rect from 56.55699999999998, 15.83 to 56.64049999999998, 15.17 fc rgb \"#CC0044\"",
+ "set object 317 rect from 56.64999999999998, 15.83 to 56.69449999999998, 15.17 fc rgb \"#CC0044\"",
+ "set object 318 rect from 56.746499999999976, 15.83 to 56.750999999999976, 15.17 fc rgb \"#CC0044\"",
+ "set object 319 rect from 56.76449999999998, 15.83 to 56.845999999999975, 15.17 fc rgb \"#CC0044\"",
+ "set object 320 rect from 56.85849999999998, 15.83 to 56.97649999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 321 rect from 57.03599999999997, 15.83 to 57.039499999999975, 15.17 fc rgb \"#CC0044\"",
+ "set object 322 rect from 57.076499999999974, 15.83 to 57.205999999999975, 15.17 fc rgb \"#CC0044\"",
+ "set object 323 rect from 57.27249999999997, 15.83 to 57.33299999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 324 rect from 57.565999999999974, 15.83 to 57.64849999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 325 rect from 57.878499999999974, 15.83 to 57.934999999999974, 15.17 fc rgb \"#CC0044\"",
+ "set object 326 rect from 57.97299999999997, 15.83 to 57.97749999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 327 rect from 57.99099999999997, 15.83 to 58.04499999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 328 rect from 58.055499999999974, 15.83 to 58.07699999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 329 rect from 58.09149999999997, 15.83 to 58.12149999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 330 rect from 58.14299999999997, 15.83 to 58.21549999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 331 rect from 58.23599999999997, 15.83 to 58.316999999999965, 15.17 fc rgb \"#CC0044\"",
+ "set object 332 rect from 58.409499999999966, 15.83 to 58.40999999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 333 rect from 58.431499999999964, 15.83 to 58.51699999999997, 15.17 fc rgb \"#CC0044\"",
+ "set object 334 rect from 58.53049999999997, 15.83 to 58.590999999999966, 15.17 fc rgb \"#CC0044\"",
+ "set object 335 rect from 58.60049999999997, 15.83 to 58.65749999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 336 rect from 58.92499999999996, 15.83 to 59.042999999999964, 15.17 fc rgb \"#CC0044\"",
+ "set object 337 rect from 59.641499999999965, 15.83 to 59.65599999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 338 rect from 59.669499999999964, 15.83 to 59.69699999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 339 rect from 59.89099999999996, 15.83 to 59.93649999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 340 rect from 60.04699999999996, 15.83 to 60.05149999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 341 rect from 60.060999999999964, 15.83 to 60.08349999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 342 rect from 60.17149999999996, 15.83 to 60.176999999999964, 15.17 fc rgb \"#CC0044\"",
+ "set object 343 rect from 60.19499999999996, 15.83 to 60.26949999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 344 rect from 60.27999999999996, 15.83 to 60.31149999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 345 rect from 60.34699999999996, 15.83 to 60.471499999999956, 15.17 fc rgb \"#CC0044\"",
+ "set object 346 rect from 60.48399999999996, 15.83 to 60.508499999999955, 15.17 fc rgb \"#CC0044\"",
+ "set object 347 rect from 60.51999999999996, 15.83 to 60.65699999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 348 rect from 60.91649999999996, 15.83 to 60.92099999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 349 rect from 60.98249999999996, 15.83 to 61.253999999999955, 15.17 fc rgb \"#CC0044\"",
+ "set object 350 rect from 61.31249999999996, 15.83 to 61.464999999999954, 15.17 fc rgb \"#CC0044\"",
+ "set object 351 rect from 62.62699999999996, 15.83 to 63.024999999999956, 15.17 fc rgb \"#CC0044\"",
+ "set object 352 rect from 63.14749999999995, 15.83 to 63.15199999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 353 rect from 63.228499999999954, 15.83 to 63.41299999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 354 rect from 64.40899999999996, 15.83 to 64.61749999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 355 rect from 67.34249999999994, 15.83 to 67.34999999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 356 rect from 67.36349999999995, 15.83 to 67.45699999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 357 rect from 67.46599999999995, 15.83 to 67.48749999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 358 rect from 67.53599999999994, 15.83 to 67.59199999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 359 rect from 69.10849999999995, 15.83 to 69.11299999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 360 rect from 69.12949999999995, 15.83 to 69.19199999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 361 rect from 69.22649999999994, 15.83 to 69.30799999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 362 rect from 69.31949999999995, 15.83 to 69.34699999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 363 rect from 69.35749999999994, 15.83 to 69.38399999999996, 15.17 fc rgb \"#CC0044\"",
+ "set object 364 rect from 69.40549999999995, 15.83 to 69.45099999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 365 rect from 69.46349999999994, 15.83 to 69.49599999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 366 rect from 70.40899999999993, 15.83 to 70.58649999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 367 rect from 74.40699999999995, 15.83 to 74.41449999999995, 15.17 fc rgb \"#CC0044\"",
+ "set object 368 rect from 74.43899999999994, 15.83 to 74.52049999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 369 rect from 74.54499999999993, 15.83 to 74.59549999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 370 rect from 74.60899999999995, 15.83 to 74.63849999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 371 rect from 75.04799999999994, 15.83 to 75.20099999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 372 rect from 84.09549999999993, 15.83 to 84.09999999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 373 rect from 84.15349999999994, 15.83 to 84.26099999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 374 rect from 84.27549999999994, 15.83 to 84.34199999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 375 rect from 84.35349999999993, 15.83 to 84.37299999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 376 rect from 84.40149999999993, 15.83 to 84.43999999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 377 rect from 84.46149999999993, 15.83 to 84.53049999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 378 rect from 84.60099999999994, 15.83 to 84.68049999999992, 15.17 fc rgb \"#CC0044\"",
+ "set object 379 rect from 84.69199999999992, 15.83 to 84.71649999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 380 rect from 84.72799999999992, 15.83 to 84.84549999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 381 rect from 84.91749999999993, 15.83 to 84.92199999999994, 15.17 fc rgb \"#CC0044\"",
+ "set object 382 rect from 84.93849999999993, 15.83 to 84.99799999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 383 rect from 85.01049999999992, 15.83 to 85.06199999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 384 rect from 85.07249999999993, 15.83 to 85.13799999999992, 15.17 fc rgb \"#CC0044\"",
+ "set object 385 rect from 85.37849999999993, 15.83 to 85.38399999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 386 rect from 85.43999999999994, 15.83 to 85.59949999999992, 15.17 fc rgb \"#CC0044\"",
+ "set object 387 rect from 85.61599999999993, 15.83 to 85.63749999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 388 rect from 85.65899999999993, 15.83 to 85.69649999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 389 rect from 85.70599999999993, 15.83 to 85.73249999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 390 rect from 85.76899999999992, 15.83 to 85.86549999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 391 rect from 85.87599999999992, 15.83 to 85.91149999999992, 15.17 fc rgb \"#CC0044\"",
+ "set object 392 rect from 85.92499999999993, 15.83 to 86.05649999999993, 15.17 fc rgb \"#CC0044\"",
+ "set object 393 rect from 86.75549999999993, 15.83 to 87.36899999999991, 15.17 fc rgb \"#CC0044\"",
+ "set object 394 rect from 88.83949999999992, 15.83 to 89.21399999999991, 15.17 fc rgb \"#CC0044\"",
+ "set object 395 rect from 102.74149999999992, 15.83 to 102.74599999999992, 15.17 fc rgb \"#CC0044\"",
+ "set object 396 rect from 102.80749999999992, 15.83 to 102.99499999999992, 15.17 fc rgb \"#CC0044\"",
+ "set object 397 rect from 104.13299999999992, 15.83 to 104.4429999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 398 rect from 107.51699999999991, 15.83 to 107.5244999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 399 rect from 107.57199999999992, 15.83 to 107.62449999999991, 15.17 fc rgb \"#CC0044\"",
+ "set object 400 rect from 107.6389999999999, 15.83 to 107.69849999999991, 15.17 fc rgb \"#CC0044\"",
+ "set object 401 rect from 107.70999999999992, 15.83 to 107.7294999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 402 rect from 107.7469999999999, 15.83 to 107.7834999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 403 rect from 107.79299999999992, 15.83 to 107.82049999999991, 15.17 fc rgb \"#CC0044\"",
+ "set object 404 rect from 107.8529999999999, 15.83 to 107.9294999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 405 rect from 107.94099999999992, 15.83 to 107.9654999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 406 rect from 107.97599999999991, 15.83 to 108.09449999999991, 15.17 fc rgb \"#CC0044\"",
+ "set object 407 rect from 110.0909999999999, 15.83 to 110.4839999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 408 rect from 130.5424999999999, 15.83 to 130.5489999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 409 rect from 130.5954999999999, 15.83 to 130.6469999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 410 rect from 130.6614999999999, 15.83 to 130.68999999999988, 15.17 fc rgb \"#CC0044\"",
+ "set object 411 rect from 130.6994999999999, 15.83 to 130.7219999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 412 rect from 130.7324999999999, 15.83 to 130.7519999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 413 rect from 130.76949999999988, 15.83 to 130.8059999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 414 rect from 130.8154999999999, 15.83 to 130.84299999999988, 15.17 fc rgb \"#CC0044\"",
+ "set object 415 rect from 130.87549999999987, 15.83 to 130.95199999999988, 15.17 fc rgb \"#CC0044\"",
+ "set object 416 rect from 130.9644999999999, 15.83 to 130.99099999999987, 15.17 fc rgb \"#CC0044\"",
+ "set object 417 rect from 131.00249999999988, 15.83 to 131.1209999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 418 rect from 133.04349999999988, 15.83 to 133.4079999999999, 15.17 fc rgb \"#CC0044\"",
+ "set object 419 rect from 54.97249999999998, 16.83 to 56.20849999999998, 16.17 fc rgb \"#CC4499\"",
+ "set object 420 rect from 56.238999999999976, 16.83 to 56.53849999999998, 16.17 fc rgb \"#CC4499\"",
+ "set object 421 rect from 56.71599999999998, 16.83 to 56.830999999999975, 16.17 fc rgb \"#CC4499\"",
+ "set object 422 rect from 56.84349999999998, 16.83 to 57.062999999999974, 16.17 fc rgb \"#CC4499\"",
+ "set object 423 rect from 57.228499999999976, 16.83 to 57.545499999999976, 16.17 fc rgb \"#CC4499\"",
+ "set object 424 rect from 57.957499999999975, 16.83 to 58.00149999999997, 16.17 fc rgb \"#CC4499\"",
+ "set object 425 rect from 58.09499999999997, 16.83 to 58.30249999999997, 16.17 fc rgb \"#CC4499\"",
+ "set object 426 rect from 58.603999999999964, 16.83 to 58.964499999999965, 16.17 fc rgb \"#CC4499\"",
+ "set object 427 rect from 59.773999999999965, 16.83 to 59.88149999999996, 16.17 fc rgb \"#CC4499\"",
+ "set object 428 rect from 60.106999999999964, 16.83 to 60.18549999999996, 16.17 fc rgb \"#CC4499\"",
+ "set object 429 rect from 60.56649999999996, 16.83 to 62.55849999999995, 16.17 fc rgb \"#CC4499\"",
+ "set object 430 rect from 63.43549999999995, 16.83 to 64.39649999999995, 16.17 fc rgb \"#CC4499\"",
+ "set object 431 rect from 67.45849999999994, 16.83 to 67.57949999999995, 16.17 fc rgb \"#CC4499\"",
+ "set object 432 rect from 69.52049999999994, 16.83 to 70.32799999999995, 16.17 fc rgb \"#CC4499\"",
+ "set object 433 rect from 74.66299999999993, 16.83 to 75.03649999999993, 16.17 fc rgb \"#CC4499\"",
+ "set object 434 rect from 84.86699999999993, 16.83 to 88.80849999999992, 16.17 fc rgb \"#CC4499\"",
+ "set object 435 rect from 103.02549999999991, 16.83 to 104.04749999999991, 16.17 fc rgb \"#CC4499\"",
+ "set object 436 rect from 108.1159999999999, 16.83 to 110.07649999999991, 16.17 fc rgb \"#CC4499\"",
+ "set object 437 rect from 131.1424999999999, 16.83 to 133.02899999999988, 16.17 fc rgb \"#CC4499\"",
+ "set object 438 rect from 141.13349999999986, 16.83 to 141.1669999999999, 16.17 fc rgb \"#CC4499\"",
+ "set object 439 rect from 22.2675, 18.83 to 22.3815, 18.17 fc rgb \"#00CC00\"",
+ "set object 440 rect from 22.665, 18.83 to 23.1135, 18.17 fc rgb \"#00CC00\"",
+ "set object 441 rect from 27.951000000000004, 18.83 to 27.972500000000004, 18.17 fc rgb \"#00CC00\"",
+ "set object 442 rect from 27.993000000000002, 18.83 to 28.013500000000004, 18.17 fc rgb \"#00CC00\"",
+ "set object 443 rect from 28.043000000000003, 18.83 to 28.063500000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 444 rect from 28.085000000000004, 18.83 to 28.087500000000002, 18.17 fc rgb \"#00CC00\"",
+ "set object 445 rect from 28.115000000000002, 18.83 to 28.139500000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 446 rect from 28.154000000000007, 18.83 to 28.260000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 447 rect from 28.309500000000003, 18.83 to 28.374000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 448 rect from 28.383500000000005, 18.83 to 28.385000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 449 rect from 28.396500000000003, 18.83 to 28.445000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 450 rect from 28.459500000000006, 18.83 to 28.463000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 451 rect from 28.489500000000007, 18.83 to 28.499000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 452 rect from 28.512500000000006, 18.83 to 28.516000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 453 rect from 28.529500000000006, 18.83 to 28.533000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 454 rect from 28.554500000000004, 18.83 to 28.557000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 455 rect from 28.573500000000006, 18.83 to 28.579000000000008, 18.17 fc rgb \"#00CC00\"",
+ "set object 456 rect from 28.59950000000001, 18.83 to 28.602000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 457 rect from 28.623500000000007, 18.83 to 28.625000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 458 rect from 28.637500000000006, 18.83 to 28.647000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 459 rect from 28.657500000000006, 18.83 to 28.669000000000008, 18.17 fc rgb \"#00CC00\"",
+ "set object 460 rect from 28.682500000000005, 18.83 to 28.686000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 461 rect from 28.695500000000006, 18.83 to 28.701000000000008, 18.17 fc rgb \"#00CC00\"",
+ "set object 462 rect from 28.72450000000001, 18.83 to 28.811000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 463 rect from 28.83250000000001, 18.83 to 28.907500000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 464 rect from 28.97100000000001, 18.83 to 28.97450000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 465 rect from 28.99600000000001, 18.83 to 28.99850000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 466 rect from 29.01200000000001, 18.83 to 29.01350000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 467 rect from 29.02600000000001, 18.83 to 29.056500000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 468 rect from 29.06900000000001, 18.83 to 29.159500000000012, 18.17 fc rgb \"#00CC00\"",
+ "set object 469 rect from 29.17100000000001, 18.83 to 29.18450000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 470 rect from 29.19400000000001, 18.83 to 41.84850000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 471 rect from 41.87900000000001, 18.83 to 41.88650000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 472 rect from 27.972500000000004, 19.83 to 28.053000000000004, 19.17 fc rgb \"#44CC00\"",
+ "set object 473 rect from 28.063500000000005, 19.83 to 28.169000000000004, 19.17 fc rgb \"#44CC00\"",
+ "set object 474 rect from 28.260000000000005, 19.83 to 28.489500000000007, 19.17 fc rgb \"#44CC00\"",
+ "set object 475 rect from 28.499000000000006, 19.83 to 28.761500000000005, 19.17 fc rgb \"#44CC00\"",
+ "set object 476 rect from 28.78900000000001, 19.83 to 28.847500000000007, 19.17 fc rgb \"#44CC00\"",
+ "set object 477 rect from 28.907500000000006, 19.83 to 29.047000000000008, 19.17 fc rgb \"#44CC00\"",
+ "set object 478 rect from 29.056500000000007, 19.83 to 29.111000000000008, 19.17 fc rgb \"#44CC00\"",
+ "set object 479 rect from 29.12350000000001, 19.83 to 29.21900000000001, 19.17 fc rgb \"#44CC00\"",
+ "set object 480 rect from 41.82650000000001, 19.83 to 41.83500000000001, 19.17 fc rgb \"#44CC00\"",
+ "set object 481 rect from 41.84850000000001, 19.83 to 41.87900000000001, 19.17 fc rgb \"#44CC00\"",
+ "set object 482 rect from 16.737, 20.83 to 16.9595, 20.17 fc rgb \"#00CC44\"",
+ "set object 483 rect from 17.8715, 20.83 to 18.017000000000003, 20.17 fc rgb \"#00CC44\"",
+ "set object 484 rect from 18.992, 20.83 to 19.0685, 20.17 fc rgb \"#00CC44\"",
+ "set object 485 rect from 20.52, 20.83 to 20.5975, 20.17 fc rgb \"#00CC44\"",
+ "set object 486 rect from 21.109, 20.83 to 21.1335, 20.17 fc rgb \"#00CC44\"",
+ "set object 487 rect from 21.212, 20.83 to 21.2695, 20.17 fc rgb \"#00CC44\"",
+ "set object 488 rect from 21.4595, 20.83 to 21.49, 20.17 fc rgb \"#00CC44\"",
+ "set object 489 rect from 21.566499999999998, 20.83 to 21.588, 20.17 fc rgb \"#00CC44\"",
+ "set object 490 rect from 21.6535, 20.83 to 21.727, 20.17 fc rgb \"#00CC44\"",
+ "set object 491 rect from 22.445, 20.83 to 22.4625, 20.17 fc rgb \"#00CC44\"",
+ "set object 492 rect from 22.502000000000002, 20.83 to 22.5165, 20.17 fc rgb \"#00CC44\"",
+ "set object 493 rect from 22.553, 20.83 to 22.5645, 20.17 fc rgb \"#00CC44\"",
+ "set object 494 rect from 23.233, 20.83 to 23.336000000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 495 rect from 23.4255, 20.83 to 23.506, 20.17 fc rgb \"#00CC44\"",
+ "set object 496 rect from 23.5895, 20.83 to 23.613, 20.17 fc rgb \"#00CC44\"",
+ "set object 497 rect from 23.870500000000003, 20.83 to 23.907, 20.17 fc rgb \"#00CC44\"",
+ "set object 498 rect from 24.393, 20.83 to 24.430500000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 499 rect from 24.470000000000002, 20.83 to 24.504500000000004, 20.17 fc rgb \"#00CC44\"",
+ "set object 500 rect from 25.267500000000002, 20.83 to 25.283, 20.17 fc rgb \"#00CC44\"",
+ "set object 501 rect from 25.4195, 20.83 to 25.427, 20.17 fc rgb \"#00CC44\"",
+ "set object 502 rect from 25.519500000000004, 20.83 to 25.526000000000003, 20.17 fc rgb \"#00CC44\"",
+ "set object 503 rect from 42.28050000000001, 20.83 to 42.298000000000016, 20.17 fc rgb \"#00CC44\"",
+ "set object 504 rect from 42.62700000000002, 20.83 to 42.656500000000015, 20.17 fc rgb \"#00CC44\"",
+ "set object 505 rect from 42.747000000000014, 20.83 to 42.763500000000015, 20.17 fc rgb \"#00CC44\"",
+ "set object 506 rect from 42.80300000000001, 20.83 to 42.81050000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 507 rect from 42.844000000000015, 20.83 to 42.858500000000014, 20.17 fc rgb \"#00CC44\"",
+ "set object 508 rect from 43.60550000000001, 20.83 to 43.62000000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 509 rect from 44.796000000000014, 20.83 to 44.81150000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 510 rect from 44.84500000000001, 20.83 to 44.87150000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 511 rect from 44.996000000000016, 20.83 to 45.00850000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 512 rect from 45.04700000000001, 20.83 to 45.06450000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 513 rect from 45.09600000000001, 20.83 to 45.107500000000016, 20.17 fc rgb \"#00CC44\"",
+ "set object 514 rect from 45.14400000000002, 20.83 to 45.16150000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 515 rect from 45.32050000000002, 20.83 to 45.33700000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 516 rect from 45.38750000000002, 20.83 to 45.402000000000015, 20.17 fc rgb \"#00CC44\"",
+ "set object 517 rect from 45.43250000000002, 20.83 to 45.442000000000014, 20.17 fc rgb \"#00CC44\"",
+ "set object 518 rect from 45.46050000000002, 20.83 to 45.46500000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 519 rect from 45.47750000000001, 20.83 to 45.48300000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 520 rect from 45.49750000000001, 20.83 to 45.55900000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 521 rect from 45.66050000000001, 20.83 to 45.70300000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 522 rect from 45.79350000000001, 20.83 to 45.81700000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 523 rect from 45.86950000000001, 20.83 to 45.92300000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 524 rect from 45.99450000000001, 20.83 to 46.060500000000005, 20.17 fc rgb \"#00CC44\"",
+ "set object 525 rect from 46.18500000000001, 20.83 to 46.28150000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 526 rect from 46.550000000000004, 20.83 to 46.5915, 20.17 fc rgb \"#00CC44\"",
+ "set object 527 rect from 46.65500000000001, 20.83 to 46.691500000000005, 20.17 fc rgb \"#00CC44\"",
+ "set object 528 rect from 46.861000000000004, 20.83 to 46.8935, 20.17 fc rgb \"#00CC44\"",
+ "set object 529 rect from 47.039500000000004, 20.83 to 47.049, 20.17 fc rgb \"#00CC44\"",
+ "set object 530 rect from 47.0765, 20.83 to 47.135000000000005, 20.17 fc rgb \"#00CC44\"",
+ "set object 531 rect from 47.4125, 20.83 to 47.465, 20.17 fc rgb \"#00CC44\"",
+ "set object 532 rect from 49.454499999999996, 20.83 to 49.467, 20.17 fc rgb \"#00CC44\"",
+ "set object 533 rect from 49.6855, 20.83 to 49.726, 20.17 fc rgb \"#00CC44\"",
+ "set object 534 rect from 49.799499999999995, 20.83 to 49.812999999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 535 rect from 49.841499999999996, 20.83 to 49.849999999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 536 rect from 49.894499999999994, 20.83 to 49.9695, 20.17 fc rgb \"#00CC44\"",
+ "set object 537 rect from 50.083999999999996, 20.83 to 50.14149999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 538 rect from 50.29299999999999, 20.83 to 50.31249999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 539 rect from 50.36699999999999, 20.83 to 50.39849999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 540 rect from 50.520999999999994, 20.83 to 50.528499999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 541 rect from 50.54899999999999, 20.83 to 50.62049999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 542 rect from 51.27549999999999, 20.83 to 51.29099999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 543 rect from 51.52249999999999, 20.83 to 51.56899999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 544 rect from 51.87299999999998, 20.83 to 51.89049999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 545 rect from 52.115999999999985, 20.83 to 52.13449999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 546 rect from 52.286999999999985, 20.83 to 52.300499999999985, 20.17 fc rgb \"#00CC44\"",
+ "set object 547 rect from 52.326999999999984, 20.83 to 52.33049999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 548 rect from 52.362999999999985, 20.83 to 52.404499999999985, 20.17 fc rgb \"#00CC44\"",
+ "set object 549 rect from 54.566499999999984, 20.83 to 54.64299999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 550 rect from 55.49149999999998, 20.83 to 55.53099999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 551 rect from 56.64049999999998, 20.83 to 56.64999999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 552 rect from 56.750999999999976, 20.83 to 56.76449999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 553 rect from 57.039499999999975, 20.83 to 57.076499999999974, 20.17 fc rgb \"#00CC44\"",
+ "set object 554 rect from 57.885999999999974, 20.83 to 57.893499999999975, 20.17 fc rgb \"#00CC44\"",
+ "set object 555 rect from 57.97749999999997, 20.83 to 57.99099999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 556 rect from 58.04499999999997, 20.83 to 58.055499999999974, 20.17 fc rgb \"#00CC44\"",
+ "set object 557 rect from 58.14549999999997, 20.83 to 58.15399999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 558 rect from 58.17549999999997, 20.83 to 58.18399999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 559 rect from 58.40999999999997, 20.83 to 58.431499999999964, 20.17 fc rgb \"#00CC44\"",
+ "set object 560 rect from 58.51699999999997, 20.83 to 58.53049999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 561 rect from 58.590999999999966, 20.83 to 58.60049999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 562 rect from 59.65599999999996, 20.83 to 59.669499999999964, 20.17 fc rgb \"#00CC44\"",
+ "set object 563 rect from 60.05149999999996, 20.83 to 60.060999999999964, 20.17 fc rgb \"#00CC44\"",
+ "set object 564 rect from 60.176999999999964, 20.83 to 60.19499999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 565 rect from 60.26949999999996, 20.83 to 60.27999999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 566 rect from 60.31149999999996, 20.83 to 60.34699999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 567 rect from 60.471499999999956, 20.83 to 60.48399999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 568 rect from 60.508499999999955, 20.83 to 60.51999999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 569 rect from 60.92099999999996, 20.83 to 60.98249999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 570 rect from 63.15199999999995, 20.83 to 63.228499999999954, 20.17 fc rgb \"#00CC44\"",
+ "set object 571 rect from 67.34999999999994, 20.83 to 67.36349999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 572 rect from 67.40699999999995, 20.83 to 67.41249999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 573 rect from 67.45699999999994, 20.83 to 67.46599999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 574 rect from 69.11299999999994, 20.83 to 69.12949999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 575 rect from 69.19199999999995, 20.83 to 69.22649999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 576 rect from 69.30799999999994, 20.83 to 69.31949999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 577 rect from 69.34699999999995, 20.83 to 69.35749999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 578 rect from 69.38399999999996, 20.83 to 69.40549999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 579 rect from 69.45099999999994, 20.83 to 69.46349999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 580 rect from 70.31749999999994, 20.83 to 70.33949999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 581 rect from 74.41449999999995, 20.83 to 74.43899999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 582 rect from 74.52049999999994, 20.83 to 74.54499999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 583 rect from 74.59549999999994, 20.83 to 74.60899999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 584 rect from 84.09999999999994, 20.83 to 84.15349999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 585 rect from 84.26099999999994, 20.83 to 84.27549999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 586 rect from 84.31099999999992, 20.83 to 84.31949999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 587 rect from 84.34199999999993, 20.83 to 84.35349999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 588 rect from 84.37299999999993, 20.83 to 84.40149999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 589 rect from 84.43999999999994, 20.83 to 84.46149999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 590 rect from 84.53049999999993, 20.83 to 84.60099999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 591 rect from 84.68049999999992, 20.83 to 84.69199999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 592 rect from 84.71649999999993, 20.83 to 84.72799999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 593 rect from 84.92199999999994, 20.83 to 84.93849999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 594 rect from 84.99799999999993, 20.83 to 85.01049999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 595 rect from 85.03599999999992, 20.83 to 85.04449999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 596 rect from 85.06199999999993, 20.83 to 85.07249999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 597 rect from 85.09499999999994, 20.83 to 85.10249999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 598 rect from 85.38399999999993, 20.83 to 85.43999999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 599 rect from 85.59949999999992, 20.83 to 85.61599999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 600 rect from 85.63749999999993, 20.83 to 85.65899999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 601 rect from 85.69649999999993, 20.83 to 85.70599999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 602 rect from 85.73249999999993, 20.83 to 85.76899999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 603 rect from 85.86549999999993, 20.83 to 85.87599999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 604 rect from 85.91149999999992, 20.83 to 85.92499999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 605 rect from 102.74599999999992, 20.83 to 102.80749999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 606 rect from 107.5244999999999, 20.83 to 107.57199999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 607 rect from 107.62449999999991, 20.83 to 107.6389999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 608 rect from 107.6674999999999, 20.83 to 107.6759999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 609 rect from 107.69849999999991, 20.83 to 107.70999999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 610 rect from 107.7294999999999, 20.83 to 107.7469999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 611 rect from 107.7834999999999, 20.83 to 107.79299999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 612 rect from 107.82049999999991, 20.83 to 107.8529999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 613 rect from 107.9294999999999, 20.83 to 107.94099999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 614 rect from 107.9654999999999, 20.83 to 107.97599999999991, 20.17 fc rgb \"#00CC44\"",
+ "set object 615 rect from 130.5489999999999, 20.83 to 130.5954999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 616 rect from 130.6469999999999, 20.83 to 130.6614999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 617 rect from 130.68999999999988, 20.83 to 130.6994999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 618 rect from 130.7219999999999, 20.83 to 130.7324999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 619 rect from 130.7519999999999, 20.83 to 130.76949999999988, 20.17 fc rgb \"#00CC44\"",
+ "set object 620 rect from 130.8059999999999, 20.83 to 130.8154999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 621 rect from 130.84299999999988, 20.83 to 130.87549999999987, 20.17 fc rgb \"#00CC44\"",
+ "set object 622 rect from 130.95199999999988, 20.83 to 130.9644999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 623 rect from 130.99099999999987, 20.83 to 131.00249999999988, 20.17 fc rgb \"#00CC44\"",
+ "set object 624 rect from 140.86699999999988, 20.83 to 140.8814999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 625 rect from 140.9319999999999, 20.83 to 140.9574999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 626 rect from 141.0299999999999, 20.83 to 141.03849999999989, 20.17 fc rgb \"#00CC44\"",
+ "set object 627 rect from 55.79999999999998, 21.83 to 56.198999999999984, 21.17 fc rgb \"#0044CC\"",
+ "set object 628 rect from 62.16149999999996, 21.83 to 62.548999999999964, 21.17 fc rgb \"#0044CC\"",
+ "set object 629 rect from 65.56449999999995, 21.83 to 65.61699999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 630 rect from 68.70599999999996, 21.83 to 68.76649999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 631 rect from 72.22199999999995, 21.83 to 72.28049999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 632 rect from 75.41849999999994, 21.83 to 75.46799999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 633 rect from 78.16449999999993, 21.83 to 78.23649999999994, 21.17 fc rgb \"#0044CC\"",
+ "set object 634 rect from 80.90399999999994, 21.83 to 80.95049999999993, 21.17 fc rgb \"#0044CC\"",
+ "set object 635 rect from 83.58349999999993, 21.83 to 83.63999999999993, 21.17 fc rgb \"#0044CC\"",
+ "set object 636 rect from 88.75199999999992, 21.83 to 88.82299999999992, 21.17 fc rgb \"#0044CC\"",
+ "set object 637 rect from 91.90999999999991, 21.83 to 91.96649999999993, 21.17 fc rgb \"#0044CC\"",
+ "set object 638 rect from 94.55599999999993, 21.83 to 94.6054999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 639 rect from 97.20749999999991, 21.83 to 97.26099999999992, 21.17 fc rgb \"#0044CC\"",
+ "set object 640 rect from 99.86649999999992, 21.83 to 99.92199999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 641 rect from 102.56049999999992, 21.83 to 102.61199999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 642 rect from 105.88099999999991, 21.83 to 105.93349999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 643 rect from 109.2659999999999, 21.83 to 109.38599999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 644 rect from 109.4024999999999, 21.83 to 109.41799999999989, 21.17 fc rgb \"#0044CC\"",
+ "set object 645 rect from 112.6029999999999, 21.83 to 112.6564999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 646 rect from 115.36399999999989, 21.83 to 115.4124999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 647 rect from 118.1434999999999, 21.83 to 118.19199999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 648 rect from 120.9194999999999, 21.83 to 121.0104999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 649 rect from 121.0259999999999, 21.83 to 121.0314999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 650 rect from 123.77499999999989, 21.83 to 123.8254999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 651 rect from 126.55149999999989, 21.83 to 126.59899999999989, 21.17 fc rgb \"#0044CC\"",
+ "set object 652 rect from 129.3344999999999, 21.83 to 129.4124999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 653 rect from 129.4249999999999, 21.83 to 129.48849999999987, 21.17 fc rgb \"#0044CC\"",
+ "set object 654 rect from 132.8659999999999, 21.83 to 132.92249999999987, 21.17 fc rgb \"#0044CC\"",
+ "set object 655 rect from 136.14449999999988, 21.83 to 136.19799999999987, 21.17 fc rgb \"#0044CC\"",
+ "set object 656 rect from 138.9289999999999, 21.83 to 138.98049999999986, 21.17 fc rgb \"#0044CC\"",
+ "set object 657 rect from 2.4204999999999997, 22.83 to 3.7920000000000003, 22.17 fc rgb \"#4444CC\"",
+ "set object 658 rect from 3.8075, 22.83 to 3.8129999999999997, 22.17 fc rgb \"#4444CC\"",
+ "set object 659 rect from 6.2695, 22.83 to 7.373, 22.17 fc rgb \"#4444CC\"",
+ "set object 660 rect from 7.3865, 22.83 to 7.3919999999999995, 22.17 fc rgb \"#4444CC\"",
+ "set object 661 rect from 9.2915, 22.83 to 10.405000000000001, 22.17 fc rgb \"#4444CC\"",
+ "set object 662 rect from 10.4235, 22.83 to 10.43, 22.17 fc rgb \"#4444CC\"",
+ "set object 663 rect from 12.8765, 22.83 to 13.897, 22.17 fc rgb \"#4444CC\"",
+ "set object 664 rect from 13.910499999999999, 22.83 to 13.915999999999999, 22.17 fc rgb \"#4444CC\"",
+ "set object 665 rect from 18.803, 10.2 to 19.803, 9.8 fc rgb \"#000000\"",
+ "set object 666 rect from 19.8815, 10.2 to 20.8815, 9.8 fc rgb \"#000000\"",
+ "set object 667 rect from 20.910999999999998, 10.2 to 21.910999999999998, 9.8 fc rgb \"#000000\"",
+ "set object 668 rect from 28.357000000000006, 10.2 to 30.17350000000001, 9.8 fc rgb \"#000000\"",
+ "set object 669 rect from 30.235000000000014, 10.2 to 31.235000000000014, 9.8 fc rgb \"#000000\"",
+ "set object 670 rect from 31.28350000000001, 10.2 to 32.28350000000001, 9.8 fc rgb \"#000000\"",
+ "set object 671 rect from 32.353000000000016, 10.2 to 33.353000000000016, 9.8 fc rgb \"#000000\"",
+ "set object 672 rect from 33.42150000000001, 10.2 to 34.42150000000001, 9.8 fc rgb \"#000000\"",
+ "set object 673 rect from 34.47700000000001, 10.2 to 35.47700000000001, 9.8 fc rgb \"#000000\"",
+ "set object 674 rect from 35.531500000000015, 10.2 to 36.531500000000015, 9.8 fc rgb \"#000000\"",
+ "set object 675 rect from 36.59600000000001, 10.2 to 37.59600000000001, 9.8 fc rgb \"#000000\"",
+ "set object 676 rect from 37.65150000000001, 10.2 to 38.65150000000001, 9.8 fc rgb \"#000000\"",
+ "set object 677 rect from 38.70800000000001, 10.2 to 39.70800000000001, 9.8 fc rgb \"#000000\"",
+ "set object 678 rect from 39.76650000000001, 10.2 to 40.76650000000001, 9.8 fc rgb \"#000000\"",
+ "set object 679 rect from 40.82900000000001, 10.2 to 42.82200000000002, 9.8 fc rgb \"#000000\"",
+ "set object 680 rect from 43.79150000000001, 10.2 to 44.79150000000001, 9.8 fc rgb \"#000000\"",
+ "set object 681 rect from 45.50300000000001, 10.2 to 48.4035, 9.8 fc rgb \"#000000\"",
+ "set object 682 rect from 48.49, 10.2 to 53.228999999999985, 9.8 fc rgb \"#000000\"",
+ "set object 683 rect from 54.33049999999998, 10.2 to 55.33049999999998, 9.8 fc rgb \"#000000\"",
+ "set object 684 rect from 55.37549999999998, 10.2 to 56.37549999999998, 9.8 fc rgb \"#000000\"",
+ "set object 685 rect from 58.19399999999997, 10.2 to 59.19399999999997, 9.8 fc rgb \"#000000\"",
+ "set object 686 rect from 62.20049999999995, 10.2 to 63.20049999999995, 9.8 fc rgb \"#000000\"",
+ "set object 687 rect from 65.31249999999996, 10.2 to 66.31249999999996, 9.8 fc rgb \"#000000\"",
+ "set object 688 rect from 66.37099999999995, 10.2 to 67.37099999999995, 9.8 fc rgb \"#000000\"",
+ "set object 689 rect from 68.37699999999995, 10.2 to 70.37999999999994, 9.8 fc rgb \"#000000\"",
+ "set object 690 rect from 72.48699999999994, 10.2 to 73.48699999999994, 9.8 fc rgb \"#000000\"",
+ "set object 691 rect from 73.55549999999994, 10.2 to 74.55549999999994, 9.8 fc rgb \"#000000\"",
+ "set object 692 rect from 76.64899999999994, 10.2 to 77.64899999999994, 9.8 fc rgb \"#000000\"",
+ "set object 693 rect from 79.81349999999993, 10.2 to 80.81349999999993, 9.8 fc rgb \"#000000\"",
+ "set object 694 rect from 81.90449999999993, 10.2 to 82.90449999999993, 9.8 fc rgb \"#000000\"",
+ "set object 695 rect from 83.96999999999994, 10.2 to 85.90049999999992, 9.8 fc rgb \"#000000\"",
+ "set object 696 rect from 90.04699999999991, 10.2 to 91.04699999999991, 9.8 fc rgb \"#000000\"",
+ "set object 697 rect from 91.11749999999992, 10.2 to 92.11749999999992, 9.8 fc rgb \"#000000\"",
+ "set object 698 rect from 94.24599999999992, 10.2 to 95.24599999999992, 9.8 fc rgb \"#000000\"",
+ "set object 699 rect from 96.36399999999992, 10.2 to 97.36399999999992, 9.8 fc rgb \"#000000\"",
+ "set object 700 rect from 108.83549999999991, 10.2 to 109.83549999999991, 9.8 fc rgb \"#000000\"",
+ "set object 701 rect from 109.87949999999991, 10.2 to 110.87949999999991, 9.8 fc rgb \"#000000\"",
+ "set object 702 rect from 112.9859999999999, 10.2 to 113.9859999999999, 9.8 fc rgb \"#000000\"",
+ "set object 703 rect from 114.0484999999999, 10.2 to 115.0484999999999, 9.8 fc rgb \"#000000\"",
+ "set object 704 rect from 119.3669999999999, 10.2 to 120.3669999999999, 9.8 fc rgb \"#000000\"",
+ "set object 705 rect from 120.4439999999999, 10.2 to 121.4439999999999, 9.8 fc rgb \"#000000\"",
+ "set object 706 rect from 121.46899999999991, 10.2 to 122.46899999999991, 9.8 fc rgb \"#000000\"",
+ "set object 707 rect from 126.7234999999999, 10.2 to 127.7234999999999, 9.8 fc rgb \"#000000\"",
+ "set object 708 rect from 127.77299999999991, 10.2 to 128.7729999999999, 9.8 fc rgb \"#000000\"",
+ "set object 709 rect from 132.9344999999999, 10.2 to 133.9344999999999, 9.8 fc rgb \"#000000\"",
+ "set object 710 rect from 135.0459999999999, 10.2 to 136.0459999999999, 9.8 fc rgb \"#000000\"",
+ "set object 711 rect from 137.1609999999999, 10.2 to 138.1609999999999, 9.8 fc rgb \"#000000\"",
+ "set object 712 rect from 139.24899999999988, 10.2 to 140.24899999999988, 9.8 fc rgb \"#000000\"",
+ "set object 713 rect from 61.17299999999995, 10.2 to 62.17299999999995, 9.8 fc rgb \"#DD0000\"",
+ "set object 714 rect from 63.23999999999996, 10.2 to 64.23999999999995, 9.8 fc rgb \"#DD0000\"",
+ "set object 715 rect from 88.98349999999992, 10.2 to 89.98349999999992, 9.8 fc rgb \"#DD0000\"",
+ "set object 716 rect from 106.85949999999991, 10.2 to 107.85949999999991, 9.8 fc rgb \"#DD0000\"",
+ "set object 717 rect from 18.803, 9.399999999999999 to 19.803, 9.2 fc rgb \"#DD0000\"",
+ "set object 718 rect from 19.8815, 9.399999999999999 to 20.8815, 9.2 fc rgb \"#DD0000\"",
+ "set object 719 rect from 20.910999999999998, 9.399999999999999 to 21.910999999999998, 9.2 fc rgb \"#DD0000\"",
+ "set object 720 rect from 41.82200000000002, 9.399999999999999 to 42.82200000000002, 9.2 fc rgb \"#DD0000\"",
+ "set object 721 rect from 43.79150000000001, 9.399999999999999 to 44.79150000000001, 9.2 fc rgb \"#DD0000\"",
+ "set object 722 rect from 45.50300000000001, 9.399999999999999 to 48.4035, 9.2 fc rgb \"#DD0000\"",
+ "set object 723 rect from 48.49, 9.399999999999999 to 53.228999999999985, 9.2 fc rgb \"#DD0000\"",
+ "set object 724 rect from 54.33049999999998, 9.399999999999999 to 55.33049999999998, 9.2 fc rgb \"#DD0000\"",
+ "set object 725 rect from 57.33799999999997, 9.399999999999999 to 59.19399999999997, 9.2 fc rgb \"#DD0000\"",
+ "set object 726 rect from 62.20049999999995, 9.399999999999999 to 63.20049999999995, 9.2 fc rgb \"#DD0000\"",
+ "set object 727 rect from 64.28099999999995, 9.399999999999999 to 65.28099999999995, 9.2 fc rgb \"#DD0000\"",
+ "set object 728 rect from 67.35049999999995, 9.399999999999999 to 68.35049999999995, 9.2 fc rgb \"#DD0000\"",
+ "set object 729 rect from 71.44949999999994, 9.399999999999999 to 72.44949999999994, 9.2 fc rgb \"#DD0000\"",
+ "set object 730 rect from 75.59449999999994, 9.399999999999999 to 76.59449999999994, 9.2 fc rgb \"#DD0000\"",
+ "set object 731 rect from 77.68599999999995, 9.399999999999999 to 78.68599999999995, 9.2 fc rgb \"#DD0000\"",
+ "set object 732 rect from 78.74099999999993, 9.399999999999999 to 79.74099999999993, 9.2 fc rgb \"#DD0000\"",
+ "set object 733 rect from 82.96499999999995, 9.399999999999999 to 84.96999999999994, 9.2 fc rgb \"#DD0000\"",
+ "set object 734 rect from 88.98349999999992, 9.399999999999999 to 89.98349999999992, 9.2 fc rgb \"#DD0000\"",
+ "set object 735 rect from 90.04699999999991, 9.399999999999999 to 91.04699999999991, 9.2 fc rgb \"#DD0000\"",
+ "set object 736 rect from 91.11749999999992, 9.399999999999999 to 92.11749999999992, 9.2 fc rgb \"#DD0000\"",
+ "set object 737 rect from 94.24599999999992, 9.399999999999999 to 95.24599999999992, 9.2 fc rgb \"#DD0000\"",
+ "set object 738 rect from 98.46299999999991, 9.399999999999999 to 99.46299999999991, 9.2 fc rgb \"#DD0000\"",
+ "set object 739 rect from 106.85949999999991, 9.399999999999999 to 108.7474999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 740 rect from 109.87949999999991, 9.399999999999999 to 110.87949999999991, 9.2 fc rgb \"#DD0000\"",
+ "set object 741 rect from 115.0789999999999, 9.399999999999999 to 116.0789999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 742 rect from 119.3669999999999, 9.399999999999999 to 120.3669999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 743 rect from 123.56399999999988, 9.399999999999999 to 124.56399999999988, 9.2 fc rgb \"#DD0000\"",
+ "set object 744 rect from 127.77299999999991, 9.399999999999999 to 128.7729999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 745 rect from 128.8479999999999, 9.399999999999999 to 129.8479999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 746 rect from 131.90949999999987, 9.399999999999999 to 132.90949999999987, 9.2 fc rgb \"#DD0000\"",
+ "set object 747 rect from 132.9344999999999, 9.399999999999999 to 133.9344999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 748 rect from 138.2054999999999, 9.399999999999999 to 139.2054999999999, 9.2 fc rgb \"#DD0000\"",
+ "set object 749 rect from 18.803, 9.149999999999999 to 19.803, 8.95 fc rgb \"#DD0000\"",
+ "set object 750 rect from 19.8815, 9.149999999999999 to 20.8815, 8.95 fc rgb \"#DD0000\"",
+ "set object 751 rect from 20.910999999999998, 9.149999999999999 to 21.910999999999998, 8.95 fc rgb \"#DD0000\"",
+ "set object 752 rect from 45.50300000000001, 9.149999999999999 to 48.4035, 8.95 fc rgb \"#DD0000\"",
+ "set object 753 rect from 48.49, 9.149999999999999 to 53.228999999999985, 8.95 fc rgb \"#DD0000\"",
+ "set object 754 rect from 54.33049999999998, 9.149999999999999 to 55.33049999999998, 8.95 fc rgb \"#DD0000\"",
+ "set object 755 rect from 57.33799999999997, 9.149999999999999 to 59.19399999999997, 8.95 fc rgb \"#DD0000\"",
+ "set object 756 rect from 61.17299999999995, 9.149999999999999 to 62.17299999999995, 8.95 fc rgb \"#DD0000\"",
+ "set object 757 rect from 62.20049999999995, 9.149999999999999 to 63.20049999999995, 8.95 fc rgb \"#DD0000\"",
+ "set object 758 rect from 63.23999999999996, 9.149999999999999 to 64.23999999999995, 8.95 fc rgb \"#DD0000\"",
+ "set object 759 rect from 64.28099999999995, 9.149999999999999 to 65.28099999999995, 8.95 fc rgb \"#DD0000\"",
+ "set object 760 rect from 65.31249999999996, 9.149999999999999 to 66.31249999999996, 8.95 fc rgb \"#DD0000\"",
+ "set object 761 rect from 66.37099999999995, 9.149999999999999 to 68.35049999999995, 8.95 fc rgb \"#DD0000\"",
+ "set object 762 rect from 68.37699999999995, 9.149999999999999 to 70.37999999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 763 rect from 70.39199999999994, 9.149999999999999 to 71.39199999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 764 rect from 71.44949999999994, 9.149999999999999 to 72.44949999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 765 rect from 72.48699999999994, 9.149999999999999 to 73.48699999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 766 rect from 73.55549999999994, 9.149999999999999 to 75.56249999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 767 rect from 75.59449999999994, 9.149999999999999 to 76.59449999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 768 rect from 76.64899999999994, 9.149999999999999 to 77.64899999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 769 rect from 77.68599999999995, 9.149999999999999 to 78.68599999999995, 8.95 fc rgb \"#DD0000\"",
+ "set object 770 rect from 78.74099999999993, 9.149999999999999 to 79.74099999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 771 rect from 79.81349999999993, 9.149999999999999 to 80.81349999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 772 rect from 80.83699999999993, 9.149999999999999 to 81.83699999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 773 rect from 81.90449999999993, 9.149999999999999 to 82.90449999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 774 rect from 82.96499999999995, 9.149999999999999 to 84.96999999999994, 8.95 fc rgb \"#DD0000\"",
+ "set object 775 rect from 85.90099999999993, 9.149999999999999 to 86.90099999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 776 rect from 87.97249999999993, 9.149999999999999 to 88.97249999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 777 rect from 88.98349999999992, 9.149999999999999 to 89.98349999999992, 8.95 fc rgb \"#DD0000\"",
+ "set object 778 rect from 90.04699999999991, 9.149999999999999 to 91.04699999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 779 rect from 91.11749999999992, 9.149999999999999 to 92.11749999999992, 8.95 fc rgb \"#DD0000\"",
+ "set object 780 rect from 92.14499999999992, 9.149999999999999 to 93.14499999999992, 8.95 fc rgb \"#DD0000\"",
+ "set object 781 rect from 93.21149999999992, 9.149999999999999 to 94.21149999999992, 8.95 fc rgb \"#DD0000\"",
+ "set object 782 rect from 96.36399999999992, 9.149999999999999 to 97.36399999999992, 8.95 fc rgb \"#DD0000\"",
+ "set object 783 rect from 98.46299999999991, 9.149999999999999 to 99.46299999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 784 rect from 101.62199999999993, 9.149999999999999 to 102.62199999999993, 8.95 fc rgb \"#DD0000\"",
+ "set object 785 rect from 102.63099999999991, 9.149999999999999 to 103.63099999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 786 rect from 104.73949999999991, 9.149999999999999 to 105.73949999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 787 rect from 106.85949999999991, 9.149999999999999 to 108.7474999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 788 rect from 109.87949999999991, 9.149999999999999 to 110.87949999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 789 rect from 110.93299999999991, 9.149999999999999 to 111.93299999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 790 rect from 112.00149999999991, 9.149999999999999 to 113.9859999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 791 rect from 115.0789999999999, 9.149999999999999 to 116.0789999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 792 rect from 117.26799999999992, 9.149999999999999 to 118.26799999999992, 8.95 fc rgb \"#DD0000\"",
+ "set object 793 rect from 120.4439999999999, 9.149999999999999 to 121.4439999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 794 rect from 121.46899999999991, 9.149999999999999 to 122.46899999999991, 8.95 fc rgb \"#DD0000\"",
+ "set object 795 rect from 122.52449999999989, 9.149999999999999 to 123.52449999999989, 8.95 fc rgb \"#DD0000\"",
+ "set object 796 rect from 123.56399999999988, 9.149999999999999 to 124.56399999999988, 8.95 fc rgb \"#DD0000\"",
+ "set object 797 rect from 126.7234999999999, 9.149999999999999 to 127.7234999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 798 rect from 127.77299999999991, 9.149999999999999 to 128.7729999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 799 rect from 128.8479999999999, 9.149999999999999 to 129.8479999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 800 rect from 131.90949999999987, 9.149999999999999 to 132.90949999999987, 8.95 fc rgb \"#DD0000\"",
+ "set object 801 rect from 132.9344999999999, 9.149999999999999 to 133.9344999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 802 rect from 133.9804999999999, 9.149999999999999 to 134.9804999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 803 rect from 137.1609999999999, 9.149999999999999 to 138.1609999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 804 rect from 138.2054999999999, 9.149999999999999 to 139.2054999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 805 rect from 140.3004999999999, 9.149999999999999 to 141.3004999999999, 8.95 fc rgb \"#DD0000\"",
+ "set object 806 rect from 18.803, 8.899999999999999 to 19.803, 8.7 fc rgb \"#DD0000\"",
+ "set object 807 rect from 19.8815, 8.899999999999999 to 20.8815, 8.7 fc rgb \"#DD0000\"",
+ "set object 808 rect from 20.910999999999998, 8.899999999999999 to 21.910999999999998, 8.7 fc rgb \"#DD0000\"",
+ "set object 809 rect from 45.50300000000001, 8.899999999999999 to 48.4035, 8.7 fc rgb \"#DD0000\"",
+ "set object 810 rect from 48.49, 8.899999999999999 to 53.228999999999985, 8.7 fc rgb \"#DD0000\"",
+ "set object 811 rect from 54.33049999999998, 8.899999999999999 to 55.33049999999998, 8.7 fc rgb \"#DD0000\"",
+ "set object 812 rect from 57.33799999999997, 8.899999999999999 to 59.19399999999997, 8.7 fc rgb \"#DD0000\"",
+ "set object 813 rect from 61.17299999999995, 8.899999999999999 to 62.17299999999995, 8.7 fc rgb \"#DD0000\"",
+ "set object 814 rect from 62.20049999999995, 8.899999999999999 to 63.20049999999995, 8.7 fc rgb \"#DD0000\"",
+ "set object 815 rect from 63.23999999999996, 8.899999999999999 to 64.23999999999995, 8.7 fc rgb \"#DD0000\"",
+ "set object 816 rect from 64.28099999999995, 8.899999999999999 to 65.28099999999995, 8.7 fc rgb \"#DD0000\"",
+ "set object 817 rect from 65.31249999999996, 8.899999999999999 to 66.31249999999996, 8.7 fc rgb \"#DD0000\"",
+ "set object 818 rect from 66.37099999999995, 8.899999999999999 to 68.35049999999995, 8.7 fc rgb \"#DD0000\"",
+ "set object 819 rect from 68.37699999999995, 8.899999999999999 to 70.37999999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 820 rect from 70.39199999999994, 8.899999999999999 to 71.39199999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 821 rect from 71.44949999999994, 8.899999999999999 to 72.44949999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 822 rect from 72.48699999999994, 8.899999999999999 to 73.48699999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 823 rect from 73.55549999999994, 8.899999999999999 to 75.56249999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 824 rect from 75.59449999999994, 8.899999999999999 to 76.59449999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 825 rect from 76.64899999999994, 8.899999999999999 to 77.64899999999994, 8.7 fc rgb \"#DD0000\"",
+ "set object 826 rect from 77.68599999999995, 8.899999999999999 to 78.68599999999995, 8.7 fc rgb \"#DD0000\"",
+ "set object 827 rect from 78.74099999999993, 8.899999999999999 to 79.74099999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 828 rect from 79.81349999999993, 8.899999999999999 to 80.81349999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 829 rect from 80.83699999999993, 8.899999999999999 to 81.83699999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 830 rect from 81.90449999999993, 8.899999999999999 to 82.90449999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 831 rect from 82.96499999999995, 8.899999999999999 to 86.90099999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 832 rect from 87.97249999999993, 8.899999999999999 to 88.97249999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 833 rect from 88.98349999999992, 8.899999999999999 to 89.98349999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 834 rect from 90.04699999999991, 8.899999999999999 to 91.04699999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 835 rect from 91.11749999999992, 8.899999999999999 to 92.11749999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 836 rect from 92.14499999999992, 8.899999999999999 to 93.14499999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 837 rect from 93.21149999999992, 8.899999999999999 to 94.21149999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 838 rect from 94.24599999999992, 8.899999999999999 to 95.24599999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 839 rect from 95.31249999999991, 8.899999999999999 to 96.31249999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 840 rect from 96.36399999999992, 8.899999999999999 to 97.36399999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 841 rect from 97.39349999999992, 8.899999999999999 to 98.39349999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 842 rect from 98.46299999999991, 8.899999999999999 to 99.46299999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 843 rect from 99.48249999999992, 8.899999999999999 to 100.48249999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 844 rect from 100.56199999999993, 8.899999999999999 to 101.56199999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 845 rect from 101.62199999999993, 8.899999999999999 to 102.62199999999993, 8.7 fc rgb \"#DD0000\"",
+ "set object 846 rect from 102.63099999999991, 8.899999999999999 to 103.63099999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 847 rect from 103.67849999999991, 8.899999999999999 to 104.67849999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 848 rect from 104.73949999999991, 8.899999999999999 to 105.73949999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 849 rect from 105.77499999999992, 8.899999999999999 to 106.77499999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 850 rect from 106.85949999999991, 8.899999999999999 to 108.7474999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 851 rect from 109.87949999999991, 8.899999999999999 to 110.87949999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 852 rect from 110.93299999999991, 8.899999999999999 to 111.93299999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 853 rect from 112.00149999999991, 8.899999999999999 to 113.9859999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 854 rect from 114.0484999999999, 8.899999999999999 to 115.0484999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 855 rect from 115.0789999999999, 8.899999999999999 to 116.0789999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 856 rect from 116.14449999999991, 8.899999999999999 to 117.14449999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 857 rect from 117.26799999999992, 8.899999999999999 to 118.26799999999992, 8.7 fc rgb \"#DD0000\"",
+ "set object 858 rect from 118.3064999999999, 8.899999999999999 to 119.3064999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 859 rect from 119.3669999999999, 8.899999999999999 to 120.3669999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 860 rect from 120.4439999999999, 8.899999999999999 to 121.4439999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 861 rect from 121.46899999999991, 8.899999999999999 to 122.46899999999991, 8.7 fc rgb \"#DD0000\"",
+ "set object 862 rect from 122.52449999999989, 8.899999999999999 to 123.52449999999989, 8.7 fc rgb \"#DD0000\"",
+ "set object 863 rect from 123.56399999999988, 8.899999999999999 to 124.56399999999988, 8.7 fc rgb \"#DD0000\"",
+ "set object 864 rect from 124.6324999999999, 8.899999999999999 to 125.6324999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 865 rect from 125.6929999999999, 8.899999999999999 to 126.6929999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 866 rect from 126.7234999999999, 8.899999999999999 to 127.7234999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 867 rect from 127.77299999999991, 8.899999999999999 to 128.7729999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 868 rect from 128.8479999999999, 8.899999999999999 to 129.8479999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 869 rect from 129.8879999999999, 8.899999999999999 to 131.8519999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 870 rect from 131.90949999999987, 8.899999999999999 to 132.90949999999987, 8.7 fc rgb \"#DD0000\"",
+ "set object 871 rect from 132.9344999999999, 8.899999999999999 to 133.9344999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 872 rect from 133.9804999999999, 8.899999999999999 to 134.9804999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 873 rect from 135.0459999999999, 8.899999999999999 to 136.0459999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 874 rect from 136.08249999999987, 8.899999999999999 to 137.08249999999987, 8.7 fc rgb \"#DD0000\"",
+ "set object 875 rect from 137.1609999999999, 8.899999999999999 to 138.1609999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 876 rect from 138.2054999999999, 8.899999999999999 to 139.2054999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 877 rect from 139.24899999999988, 8.899999999999999 to 140.24899999999988, 8.7 fc rgb \"#DD0000\"",
+ "set object 878 rect from 140.3004999999999, 8.899999999999999 to 141.3004999999999, 8.7 fc rgb \"#DD0000\"",
+ "set object 879 rect from 18.803, 8.649999999999999 to 19.803, 8.45 fc rgb \"#DD0000\"",
+ "set object 880 rect from 19.8815, 8.649999999999999 to 20.8815, 8.45 fc rgb \"#DD0000\"",
+ "set object 881 rect from 20.910999999999998, 8.649999999999999 to 21.910999999999998, 8.45 fc rgb \"#DD0000\"",
+ "set object 882 rect from 45.50300000000001, 8.649999999999999 to 48.4035, 8.45 fc rgb \"#DD0000\"",
+ "set object 883 rect from 48.49, 8.649999999999999 to 53.228999999999985, 8.45 fc rgb \"#DD0000\"",
+ "set object 884 rect from 54.33049999999998, 8.649999999999999 to 55.33049999999998, 8.45 fc rgb \"#DD0000\"",
+ "set object 885 rect from 57.33799999999997, 8.649999999999999 to 59.19399999999997, 8.45 fc rgb \"#DD0000\"",
+ "set object 886 rect from 61.17299999999995, 8.649999999999999 to 62.17299999999995, 8.45 fc rgb \"#DD0000\"",
+ "set object 887 rect from 62.20049999999995, 8.649999999999999 to 63.20049999999995, 8.45 fc rgb \"#DD0000\"",
+ "set object 888 rect from 63.23999999999996, 8.649999999999999 to 64.23999999999995, 8.45 fc rgb \"#DD0000\"",
+ "set object 889 rect from 64.28099999999995, 8.649999999999999 to 65.28099999999995, 8.45 fc rgb \"#DD0000\"",
+ "set object 890 rect from 65.31249999999996, 8.649999999999999 to 66.31249999999996, 8.45 fc rgb \"#DD0000\"",
+ "set object 891 rect from 66.37099999999995, 8.649999999999999 to 68.35049999999995, 8.45 fc rgb \"#DD0000\"",
+ "set object 892 rect from 68.37699999999995, 8.649999999999999 to 70.37999999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 893 rect from 70.39199999999994, 8.649999999999999 to 71.39199999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 894 rect from 71.44949999999994, 8.649999999999999 to 72.44949999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 895 rect from 72.48699999999994, 8.649999999999999 to 73.48699999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 896 rect from 73.55549999999994, 8.649999999999999 to 75.56249999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 897 rect from 75.59449999999994, 8.649999999999999 to 76.59449999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 898 rect from 76.64899999999994, 8.649999999999999 to 77.64899999999994, 8.45 fc rgb \"#DD0000\"",
+ "set object 899 rect from 77.68599999999995, 8.649999999999999 to 78.68599999999995, 8.45 fc rgb \"#DD0000\"",
+ "set object 900 rect from 78.74099999999993, 8.649999999999999 to 79.74099999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 901 rect from 79.81349999999993, 8.649999999999999 to 80.81349999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 902 rect from 80.83699999999993, 8.649999999999999 to 81.83699999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 903 rect from 81.90449999999993, 8.649999999999999 to 82.90449999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 904 rect from 82.96499999999995, 8.649999999999999 to 86.90099999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 905 rect from 87.97249999999993, 8.649999999999999 to 88.97249999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 906 rect from 88.98349999999992, 8.649999999999999 to 89.98349999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 907 rect from 90.04699999999991, 8.649999999999999 to 91.04699999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 908 rect from 91.11749999999992, 8.649999999999999 to 92.11749999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 909 rect from 92.14499999999992, 8.649999999999999 to 93.14499999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 910 rect from 93.21149999999992, 8.649999999999999 to 94.21149999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 911 rect from 94.24599999999992, 8.649999999999999 to 95.24599999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 912 rect from 95.31249999999991, 8.649999999999999 to 96.31249999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 913 rect from 96.36399999999992, 8.649999999999999 to 97.36399999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 914 rect from 97.39349999999992, 8.649999999999999 to 98.39349999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 915 rect from 98.46299999999991, 8.649999999999999 to 99.46299999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 916 rect from 99.48249999999992, 8.649999999999999 to 100.48249999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 917 rect from 100.56199999999993, 8.649999999999999 to 101.56199999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 918 rect from 101.62199999999993, 8.649999999999999 to 102.62199999999993, 8.45 fc rgb \"#DD0000\"",
+ "set object 919 rect from 102.63099999999991, 8.649999999999999 to 103.63099999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 920 rect from 103.67849999999991, 8.649999999999999 to 104.67849999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 921 rect from 104.73949999999991, 8.649999999999999 to 105.73949999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 922 rect from 105.77499999999992, 8.649999999999999 to 106.77499999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 923 rect from 106.85949999999991, 8.649999999999999 to 108.7474999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 924 rect from 109.87949999999991, 8.649999999999999 to 110.87949999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 925 rect from 110.93299999999991, 8.649999999999999 to 111.93299999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 926 rect from 112.00149999999991, 8.649999999999999 to 113.9859999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 927 rect from 114.0484999999999, 8.649999999999999 to 115.0484999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 928 rect from 115.0789999999999, 8.649999999999999 to 116.0789999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 929 rect from 116.14449999999991, 8.649999999999999 to 117.14449999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 930 rect from 117.26799999999992, 8.649999999999999 to 118.26799999999992, 8.45 fc rgb \"#DD0000\"",
+ "set object 931 rect from 118.3064999999999, 8.649999999999999 to 119.3064999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 932 rect from 119.3669999999999, 8.649999999999999 to 120.3669999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 933 rect from 120.4439999999999, 8.649999999999999 to 121.4439999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 934 rect from 121.46899999999991, 8.649999999999999 to 122.46899999999991, 8.45 fc rgb \"#DD0000\"",
+ "set object 935 rect from 122.52449999999989, 8.649999999999999 to 123.52449999999989, 8.45 fc rgb \"#DD0000\"",
+ "set object 936 rect from 123.56399999999988, 8.649999999999999 to 124.56399999999988, 8.45 fc rgb \"#DD0000\"",
+ "set object 937 rect from 124.6324999999999, 8.649999999999999 to 125.6324999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 938 rect from 125.6929999999999, 8.649999999999999 to 126.6929999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 939 rect from 126.7234999999999, 8.649999999999999 to 127.7234999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 940 rect from 127.77299999999991, 8.649999999999999 to 128.7729999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 941 rect from 128.8479999999999, 8.649999999999999 to 129.8479999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 942 rect from 129.8879999999999, 8.649999999999999 to 131.8519999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 943 rect from 131.90949999999987, 8.649999999999999 to 132.90949999999987, 8.45 fc rgb \"#DD0000\"",
+ "set object 944 rect from 132.9344999999999, 8.649999999999999 to 133.9344999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 945 rect from 133.9804999999999, 8.649999999999999 to 134.9804999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 946 rect from 135.0459999999999, 8.649999999999999 to 136.0459999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 947 rect from 136.08249999999987, 8.649999999999999 to 137.08249999999987, 8.45 fc rgb \"#DD0000\"",
+ "set object 948 rect from 137.1609999999999, 8.649999999999999 to 138.1609999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 949 rect from 138.2054999999999, 8.649999999999999 to 139.2054999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 950 rect from 139.24899999999988, 8.649999999999999 to 140.24899999999988, 8.45 fc rgb \"#DD0000\"",
+ "set object 951 rect from 140.3004999999999, 8.649999999999999 to 141.3004999999999, 8.45 fc rgb \"#DD0000\"",
+ "set object 952 rect from 18.803, 8.399999999999999 to 19.803, 8.2 fc rgb \"#DD0000\"",
+ "set object 953 rect from 19.8815, 8.399999999999999 to 20.8815, 8.2 fc rgb \"#DD0000\"",
+ "set object 954 rect from 20.910999999999998, 8.399999999999999 to 21.910999999999998, 8.2 fc rgb \"#DD0000\"",
+ "set object 955 rect from 45.50300000000001, 8.399999999999999 to 48.4035, 8.2 fc rgb \"#DD0000\"",
+ "set object 956 rect from 48.49, 8.399999999999999 to 53.228999999999985, 8.2 fc rgb \"#DD0000\"",
+ "set object 957 rect from 54.33049999999998, 8.399999999999999 to 55.33049999999998, 8.2 fc rgb \"#DD0000\"",
+ "set object 958 rect from 57.33799999999997, 8.399999999999999 to 59.19399999999997, 8.2 fc rgb \"#DD0000\"",
+ "set object 959 rect from 61.17299999999995, 8.399999999999999 to 62.17299999999995, 8.2 fc rgb \"#DD0000\"",
+ "set object 960 rect from 62.20049999999995, 8.399999999999999 to 63.20049999999995, 8.2 fc rgb \"#DD0000\"",
+ "set object 961 rect from 63.23999999999996, 8.399999999999999 to 64.23999999999995, 8.2 fc rgb \"#DD0000\"",
+ "set object 962 rect from 64.28099999999995, 8.399999999999999 to 65.28099999999995, 8.2 fc rgb \"#DD0000\"",
+ "set object 963 rect from 65.31249999999996, 8.399999999999999 to 66.31249999999996, 8.2 fc rgb \"#DD0000\"",
+ "set object 964 rect from 66.37099999999995, 8.399999999999999 to 68.35049999999995, 8.2 fc rgb \"#DD0000\"",
+ "set object 965 rect from 68.37699999999995, 8.399999999999999 to 70.37999999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 966 rect from 70.39199999999994, 8.399999999999999 to 71.39199999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 967 rect from 71.44949999999994, 8.399999999999999 to 72.44949999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 968 rect from 72.48699999999994, 8.399999999999999 to 73.48699999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 969 rect from 73.55549999999994, 8.399999999999999 to 75.56249999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 970 rect from 75.59449999999994, 8.399999999999999 to 76.59449999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 971 rect from 76.64899999999994, 8.399999999999999 to 77.64899999999994, 8.2 fc rgb \"#DD0000\"",
+ "set object 972 rect from 77.68599999999995, 8.399999999999999 to 78.68599999999995, 8.2 fc rgb \"#DD0000\"",
+ "set object 973 rect from 78.74099999999993, 8.399999999999999 to 79.74099999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 974 rect from 79.81349999999993, 8.399999999999999 to 80.81349999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 975 rect from 80.83699999999993, 8.399999999999999 to 81.83699999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 976 rect from 81.90449999999993, 8.399999999999999 to 82.90449999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 977 rect from 82.96499999999995, 8.399999999999999 to 86.90099999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 978 rect from 87.97249999999993, 8.399999999999999 to 88.97249999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 979 rect from 88.98349999999992, 8.399999999999999 to 89.98349999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 980 rect from 90.04699999999991, 8.399999999999999 to 91.04699999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 981 rect from 91.11749999999992, 8.399999999999999 to 92.11749999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 982 rect from 92.14499999999992, 8.399999999999999 to 93.14499999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 983 rect from 93.21149999999992, 8.399999999999999 to 94.21149999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 984 rect from 94.24599999999992, 8.399999999999999 to 95.24599999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 985 rect from 95.31249999999991, 8.399999999999999 to 96.31249999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 986 rect from 96.36399999999992, 8.399999999999999 to 97.36399999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 987 rect from 97.39349999999992, 8.399999999999999 to 98.39349999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 988 rect from 98.46299999999991, 8.399999999999999 to 99.46299999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 989 rect from 99.48249999999992, 8.399999999999999 to 100.48249999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 990 rect from 100.56199999999993, 8.399999999999999 to 101.56199999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 991 rect from 101.62199999999993, 8.399999999999999 to 102.62199999999993, 8.2 fc rgb \"#DD0000\"",
+ "set object 992 rect from 102.63099999999991, 8.399999999999999 to 103.63099999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 993 rect from 103.67849999999991, 8.399999999999999 to 104.67849999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 994 rect from 104.73949999999991, 8.399999999999999 to 105.73949999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 995 rect from 105.77499999999992, 8.399999999999999 to 106.77499999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 996 rect from 106.85949999999991, 8.399999999999999 to 108.7474999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 997 rect from 109.87949999999991, 8.399999999999999 to 110.87949999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 998 rect from 110.93299999999991, 8.399999999999999 to 111.93299999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 999 rect from 112.00149999999991, 8.399999999999999 to 113.9859999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1000 rect from 114.0484999999999, 8.399999999999999 to 115.0484999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1001 rect from 115.0789999999999, 8.399999999999999 to 116.0789999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1002 rect from 116.14449999999991, 8.399999999999999 to 117.14449999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 1003 rect from 117.26799999999992, 8.399999999999999 to 118.26799999999992, 8.2 fc rgb \"#DD0000\"",
+ "set object 1004 rect from 118.3064999999999, 8.399999999999999 to 119.3064999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1005 rect from 119.3669999999999, 8.399999999999999 to 120.3669999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1006 rect from 120.4439999999999, 8.399999999999999 to 121.4439999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1007 rect from 121.46899999999991, 8.399999999999999 to 122.46899999999991, 8.2 fc rgb \"#DD0000\"",
+ "set object 1008 rect from 122.52449999999989, 8.399999999999999 to 123.52449999999989, 8.2 fc rgb \"#DD0000\"",
+ "set object 1009 rect from 123.56399999999988, 8.399999999999999 to 124.56399999999988, 8.2 fc rgb \"#DD0000\"",
+ "set object 1010 rect from 124.6324999999999, 8.399999999999999 to 125.6324999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1011 rect from 125.6929999999999, 8.399999999999999 to 126.6929999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1012 rect from 126.7234999999999, 8.399999999999999 to 127.7234999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1013 rect from 127.77299999999991, 8.399999999999999 to 128.7729999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1014 rect from 128.8479999999999, 8.399999999999999 to 129.8479999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1015 rect from 129.8879999999999, 8.399999999999999 to 131.8519999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1016 rect from 131.90949999999987, 8.399999999999999 to 132.90949999999987, 8.2 fc rgb \"#DD0000\"",
+ "set object 1017 rect from 132.9344999999999, 8.399999999999999 to 133.9344999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1018 rect from 133.9804999999999, 8.399999999999999 to 134.9804999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1019 rect from 135.0459999999999, 8.399999999999999 to 136.0459999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1020 rect from 136.08249999999987, 8.399999999999999 to 137.08249999999987, 8.2 fc rgb \"#DD0000\"",
+ "set object 1021 rect from 137.1609999999999, 8.399999999999999 to 138.1609999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1022 rect from 138.2054999999999, 8.399999999999999 to 139.2054999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1023 rect from 139.24899999999988, 8.399999999999999 to 140.24899999999988, 8.2 fc rgb \"#DD0000\"",
+ "set object 1024 rect from 140.3004999999999, 8.399999999999999 to 141.3004999999999, 8.2 fc rgb \"#DD0000\"",
+ "set object 1025 rect from 19.8815, 8.149999999999999 to 20.8815, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1026 rect from 20.910999999999998, 8.149999999999999 to 21.910999999999998, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1027 rect from 45.50300000000001, 8.149999999999999 to 48.4035, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1028 rect from 48.49, 8.149999999999999 to 53.228999999999985, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1029 rect from 54.33049999999998, 8.149999999999999 to 55.33049999999998, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1030 rect from 57.33799999999997, 8.149999999999999 to 59.19399999999997, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1031 rect from 61.17299999999995, 8.149999999999999 to 62.17299999999995, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1032 rect from 62.20049999999995, 8.149999999999999 to 63.20049999999995, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1033 rect from 63.23999999999996, 8.149999999999999 to 64.23999999999995, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1034 rect from 64.28099999999995, 8.149999999999999 to 65.28099999999995, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1035 rect from 65.31249999999996, 8.149999999999999 to 66.31249999999996, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1036 rect from 66.37099999999995, 8.149999999999999 to 68.35049999999995, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1037 rect from 68.37699999999995, 8.149999999999999 to 70.37999999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1038 rect from 70.39199999999994, 8.149999999999999 to 71.39199999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1039 rect from 71.44949999999994, 8.149999999999999 to 72.44949999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1040 rect from 72.48699999999994, 8.149999999999999 to 73.48699999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1041 rect from 73.55549999999994, 8.149999999999999 to 75.56249999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1042 rect from 75.59449999999994, 8.149999999999999 to 76.59449999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1043 rect from 76.64899999999994, 8.149999999999999 to 77.64899999999994, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1044 rect from 77.68599999999995, 8.149999999999999 to 78.68599999999995, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1045 rect from 78.74099999999993, 8.149999999999999 to 79.74099999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1046 rect from 79.81349999999993, 8.149999999999999 to 80.81349999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1047 rect from 80.83699999999993, 8.149999999999999 to 81.83699999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1048 rect from 81.90449999999993, 8.149999999999999 to 82.90449999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1049 rect from 82.96499999999995, 8.149999999999999 to 86.90099999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1050 rect from 87.97249999999993, 8.149999999999999 to 88.97249999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1051 rect from 88.98349999999992, 8.149999999999999 to 89.98349999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1052 rect from 90.04699999999991, 8.149999999999999 to 91.04699999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1053 rect from 91.11749999999992, 8.149999999999999 to 92.11749999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1054 rect from 92.14499999999992, 8.149999999999999 to 93.14499999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1055 rect from 93.21149999999992, 8.149999999999999 to 94.21149999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1056 rect from 94.24599999999992, 8.149999999999999 to 95.24599999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1057 rect from 95.31249999999991, 8.149999999999999 to 96.31249999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1058 rect from 96.36399999999992, 8.149999999999999 to 97.36399999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1059 rect from 97.39349999999992, 8.149999999999999 to 98.39349999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1060 rect from 98.46299999999991, 8.149999999999999 to 99.46299999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1061 rect from 99.48249999999992, 8.149999999999999 to 100.48249999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1062 rect from 100.56199999999993, 8.149999999999999 to 101.56199999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1063 rect from 101.62199999999993, 8.149999999999999 to 102.62199999999993, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1064 rect from 102.63099999999991, 8.149999999999999 to 103.63099999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1065 rect from 103.67849999999991, 8.149999999999999 to 104.67849999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1066 rect from 104.73949999999991, 8.149999999999999 to 105.73949999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1067 rect from 105.77499999999992, 8.149999999999999 to 106.77499999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1068 rect from 106.85949999999991, 8.149999999999999 to 108.7474999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1069 rect from 109.87949999999991, 8.149999999999999 to 110.87949999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1070 rect from 110.93299999999991, 8.149999999999999 to 111.93299999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1071 rect from 112.00149999999991, 8.149999999999999 to 113.9859999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1072 rect from 114.0484999999999, 8.149999999999999 to 115.0484999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1073 rect from 115.0789999999999, 8.149999999999999 to 116.0789999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1074 rect from 116.14449999999991, 8.149999999999999 to 117.14449999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1075 rect from 117.26799999999992, 8.149999999999999 to 118.26799999999992, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1076 rect from 118.3064999999999, 8.149999999999999 to 119.3064999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1077 rect from 119.3669999999999, 8.149999999999999 to 120.3669999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1078 rect from 120.4439999999999, 8.149999999999999 to 121.4439999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1079 rect from 121.46899999999991, 8.149999999999999 to 122.46899999999991, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1080 rect from 122.52449999999989, 8.149999999999999 to 123.52449999999989, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1081 rect from 123.56399999999988, 8.149999999999999 to 124.56399999999988, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1082 rect from 124.6324999999999, 8.149999999999999 to 125.6324999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1083 rect from 125.6929999999999, 8.149999999999999 to 126.6929999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1084 rect from 126.7234999999999, 8.149999999999999 to 127.7234999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1085 rect from 127.77299999999991, 8.149999999999999 to 128.7729999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1086 rect from 128.8479999999999, 8.149999999999999 to 129.8479999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1087 rect from 129.8879999999999, 8.149999999999999 to 131.8519999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1088 rect from 131.90949999999987, 8.149999999999999 to 132.90949999999987, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1089 rect from 132.9344999999999, 8.149999999999999 to 133.9344999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1090 rect from 133.9804999999999, 8.149999999999999 to 134.9804999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1091 rect from 135.0459999999999, 8.149999999999999 to 136.0459999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1092 rect from 136.08249999999987, 8.149999999999999 to 137.08249999999987, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1093 rect from 137.1609999999999, 8.149999999999999 to 138.1609999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1094 rect from 138.2054999999999, 8.149999999999999 to 139.2054999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1095 rect from 139.24899999999988, 8.149999999999999 to 140.24899999999988, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1096 rect from 140.3004999999999, 8.149999999999999 to 141.3004999999999, 7.949999999999999 fc rgb \"#DD0000\"",
+ "set object 1097 rect from 19.8815, 7.899999999999999 to 20.8815, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1098 rect from 20.910999999999998, 7.899999999999999 to 21.910999999999998, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1099 rect from 45.50300000000001, 7.899999999999999 to 48.4035, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1100 rect from 48.49, 7.899999999999999 to 53.228999999999985, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1101 rect from 54.33049999999998, 7.899999999999999 to 55.33049999999998, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1102 rect from 57.33799999999997, 7.899999999999999 to 59.19399999999997, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1103 rect from 61.17299999999995, 7.899999999999999 to 62.17299999999995, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1104 rect from 62.20049999999995, 7.899999999999999 to 63.20049999999995, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1105 rect from 63.23999999999996, 7.899999999999999 to 64.23999999999995, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1106 rect from 64.28099999999995, 7.899999999999999 to 65.28099999999995, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1107 rect from 65.31249999999996, 7.899999999999999 to 66.31249999999996, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1108 rect from 66.37099999999995, 7.899999999999999 to 68.35049999999995, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1109 rect from 68.37699999999995, 7.899999999999999 to 70.37999999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1110 rect from 70.39199999999994, 7.899999999999999 to 71.39199999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1111 rect from 71.44949999999994, 7.899999999999999 to 72.44949999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1112 rect from 72.48699999999994, 7.899999999999999 to 73.48699999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1113 rect from 73.55549999999994, 7.899999999999999 to 75.56249999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1114 rect from 75.59449999999994, 7.899999999999999 to 76.59449999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1115 rect from 76.64899999999994, 7.899999999999999 to 77.64899999999994, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1116 rect from 77.68599999999995, 7.899999999999999 to 78.68599999999995, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1117 rect from 78.74099999999993, 7.899999999999999 to 79.74099999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1118 rect from 79.81349999999993, 7.899999999999999 to 80.81349999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1119 rect from 80.83699999999993, 7.899999999999999 to 81.83699999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1120 rect from 81.90449999999993, 7.899999999999999 to 82.90449999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1121 rect from 82.96499999999995, 7.899999999999999 to 86.90099999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1122 rect from 87.97249999999993, 7.899999999999999 to 88.97249999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1123 rect from 88.98349999999992, 7.899999999999999 to 89.98349999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1124 rect from 90.04699999999991, 7.899999999999999 to 91.04699999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1125 rect from 91.11749999999992, 7.899999999999999 to 92.11749999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1126 rect from 92.14499999999992, 7.899999999999999 to 93.14499999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1127 rect from 93.21149999999992, 7.899999999999999 to 94.21149999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1128 rect from 94.24599999999992, 7.899999999999999 to 95.24599999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1129 rect from 95.31249999999991, 7.899999999999999 to 96.31249999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1130 rect from 96.36399999999992, 7.899999999999999 to 97.36399999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1131 rect from 97.39349999999992, 7.899999999999999 to 98.39349999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1132 rect from 98.46299999999991, 7.899999999999999 to 99.46299999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1133 rect from 99.48249999999992, 7.899999999999999 to 100.48249999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1134 rect from 100.56199999999993, 7.899999999999999 to 101.56199999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1135 rect from 101.62199999999993, 7.899999999999999 to 102.62199999999993, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1136 rect from 102.63099999999991, 7.899999999999999 to 103.63099999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1137 rect from 103.67849999999991, 7.899999999999999 to 104.67849999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1138 rect from 104.73949999999991, 7.899999999999999 to 105.73949999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1139 rect from 105.77499999999992, 7.899999999999999 to 106.77499999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1140 rect from 106.85949999999991, 7.899999999999999 to 108.7474999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1141 rect from 109.87949999999991, 7.899999999999999 to 110.87949999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1142 rect from 110.93299999999991, 7.899999999999999 to 111.93299999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1143 rect from 112.00149999999991, 7.899999999999999 to 113.9859999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1144 rect from 114.0484999999999, 7.899999999999999 to 115.0484999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1145 rect from 115.0789999999999, 7.899999999999999 to 116.0789999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1146 rect from 116.14449999999991, 7.899999999999999 to 117.14449999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1147 rect from 117.26799999999992, 7.899999999999999 to 118.26799999999992, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1148 rect from 118.3064999999999, 7.899999999999999 to 119.3064999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1149 rect from 119.3669999999999, 7.899999999999999 to 120.3669999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1150 rect from 120.4439999999999, 7.899999999999999 to 121.4439999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1151 rect from 121.46899999999991, 7.899999999999999 to 122.46899999999991, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1152 rect from 122.52449999999989, 7.899999999999999 to 123.52449999999989, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1153 rect from 123.56399999999988, 7.899999999999999 to 124.56399999999988, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1154 rect from 124.6324999999999, 7.899999999999999 to 125.6324999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1155 rect from 125.6929999999999, 7.899999999999999 to 126.6929999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1156 rect from 126.7234999999999, 7.899999999999999 to 127.7234999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1157 rect from 127.77299999999991, 7.899999999999999 to 128.7729999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1158 rect from 128.8479999999999, 7.899999999999999 to 129.8479999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1159 rect from 129.8879999999999, 7.899999999999999 to 131.8519999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1160 rect from 131.90949999999987, 7.899999999999999 to 132.90949999999987, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1161 rect from 132.9344999999999, 7.899999999999999 to 133.9344999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1162 rect from 133.9804999999999, 7.899999999999999 to 134.9804999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1163 rect from 135.0459999999999, 7.899999999999999 to 136.0459999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1164 rect from 136.08249999999987, 7.899999999999999 to 137.08249999999987, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1165 rect from 137.1609999999999, 7.899999999999999 to 138.1609999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1166 rect from 138.2054999999999, 7.899999999999999 to 139.2054999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1167 rect from 139.24899999999988, 7.899999999999999 to 140.24899999999988, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1168 rect from 140.3004999999999, 7.899999999999999 to 141.3004999999999, 7.699999999999999 fc rgb \"#DD0000\"",
+ "set object 1169 rect from 19.8815, 7.649999999999999 to 20.8815, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1170 rect from 20.910999999999998, 7.649999999999999 to 21.910999999999998, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1171 rect from 45.50300000000001, 7.649999999999999 to 48.4035, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1172 rect from 48.49, 7.649999999999999 to 53.228999999999985, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1173 rect from 54.33049999999998, 7.649999999999999 to 55.33049999999998, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1174 rect from 57.33799999999997, 7.649999999999999 to 59.19399999999997, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1175 rect from 61.17299999999995, 7.649999999999999 to 62.17299999999995, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1176 rect from 62.20049999999995, 7.649999999999999 to 63.20049999999995, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1177 rect from 63.23999999999996, 7.649999999999999 to 64.23999999999995, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1178 rect from 64.28099999999995, 7.649999999999999 to 65.28099999999995, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1179 rect from 65.31249999999996, 7.649999999999999 to 66.31249999999996, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1180 rect from 66.37099999999995, 7.649999999999999 to 68.35049999999995, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1181 rect from 68.37699999999995, 7.649999999999999 to 70.37999999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1182 rect from 70.39199999999994, 7.649999999999999 to 71.39199999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1183 rect from 71.44949999999994, 7.649999999999999 to 72.44949999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1184 rect from 72.48699999999994, 7.649999999999999 to 73.48699999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1185 rect from 73.55549999999994, 7.649999999999999 to 75.56249999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1186 rect from 75.59449999999994, 7.649999999999999 to 76.59449999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1187 rect from 76.64899999999994, 7.649999999999999 to 77.64899999999994, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1188 rect from 77.68599999999995, 7.649999999999999 to 78.68599999999995, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1189 rect from 78.74099999999993, 7.649999999999999 to 79.74099999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1190 rect from 79.81349999999993, 7.649999999999999 to 80.81349999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1191 rect from 80.83699999999993, 7.649999999999999 to 81.83699999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1192 rect from 81.90449999999993, 7.649999999999999 to 82.90449999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1193 rect from 82.96499999999995, 7.649999999999999 to 86.90099999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1194 rect from 87.97249999999993, 7.649999999999999 to 88.97249999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1195 rect from 88.98349999999992, 7.649999999999999 to 89.98349999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1196 rect from 90.04699999999991, 7.649999999999999 to 91.04699999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1197 rect from 91.11749999999992, 7.649999999999999 to 92.11749999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1198 rect from 92.14499999999992, 7.649999999999999 to 93.14499999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1199 rect from 93.21149999999992, 7.649999999999999 to 94.21149999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1200 rect from 94.24599999999992, 7.649999999999999 to 95.24599999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1201 rect from 95.31249999999991, 7.649999999999999 to 96.31249999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1202 rect from 96.36399999999992, 7.649999999999999 to 97.36399999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1203 rect from 97.39349999999992, 7.649999999999999 to 98.39349999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1204 rect from 98.46299999999991, 7.649999999999999 to 99.46299999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1205 rect from 99.48249999999992, 7.649999999999999 to 100.48249999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1206 rect from 100.56199999999993, 7.649999999999999 to 101.56199999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1207 rect from 101.62199999999993, 7.649999999999999 to 102.62199999999993, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1208 rect from 102.63099999999991, 7.649999999999999 to 103.63099999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1209 rect from 103.67849999999991, 7.649999999999999 to 104.67849999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1210 rect from 104.73949999999991, 7.649999999999999 to 105.73949999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1211 rect from 105.77499999999992, 7.649999999999999 to 106.77499999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1212 rect from 106.85949999999991, 7.649999999999999 to 108.7474999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1213 rect from 109.87949999999991, 7.649999999999999 to 110.87949999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1214 rect from 110.93299999999991, 7.649999999999999 to 111.93299999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1215 rect from 112.00149999999991, 7.649999999999999 to 113.9859999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1216 rect from 114.0484999999999, 7.649999999999999 to 115.0484999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1217 rect from 115.0789999999999, 7.649999999999999 to 116.0789999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1218 rect from 116.14449999999991, 7.649999999999999 to 117.14449999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1219 rect from 117.26799999999992, 7.649999999999999 to 118.26799999999992, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1220 rect from 118.3064999999999, 7.649999999999999 to 119.3064999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1221 rect from 119.3669999999999, 7.649999999999999 to 120.3669999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1222 rect from 120.4439999999999, 7.649999999999999 to 121.4439999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1223 rect from 121.46899999999991, 7.649999999999999 to 122.46899999999991, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1224 rect from 122.52449999999989, 7.649999999999999 to 123.52449999999989, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1225 rect from 123.56399999999988, 7.649999999999999 to 124.56399999999988, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1226 rect from 124.6324999999999, 7.649999999999999 to 125.6324999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1227 rect from 125.6929999999999, 7.649999999999999 to 126.6929999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1228 rect from 126.7234999999999, 7.649999999999999 to 127.7234999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1229 rect from 127.77299999999991, 7.649999999999999 to 128.7729999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1230 rect from 128.8479999999999, 7.649999999999999 to 129.8479999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1231 rect from 129.8879999999999, 7.649999999999999 to 131.8519999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1232 rect from 131.90949999999987, 7.649999999999999 to 132.90949999999987, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1233 rect from 132.9344999999999, 7.649999999999999 to 133.9344999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1234 rect from 133.9804999999999, 7.649999999999999 to 134.9804999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1235 rect from 135.0459999999999, 7.649999999999999 to 136.0459999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1236 rect from 136.08249999999987, 7.649999999999999 to 137.08249999999987, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1237 rect from 137.1609999999999, 7.649999999999999 to 138.1609999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1238 rect from 138.2054999999999, 7.649999999999999 to 139.2054999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1239 rect from 139.24899999999988, 7.649999999999999 to 140.24899999999988, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1240 rect from 140.3004999999999, 7.649999999999999 to 141.3004999999999, 7.449999999999999 fc rgb \"#DD0000\"",
+ "set object 1241 rect from 64.28099999999995, 10.2 to 65.28099999999995, 9.8 fc rgb \"#00EE00\"",
+ "set object 1242 rect from 70.39199999999994, 10.2 to 71.39199999999994, 9.8 fc rgb \"#00EE00\"",
+ "set object 1243 rect from 74.56249999999994, 10.2 to 75.56249999999994, 9.8 fc rgb \"#00EE00\"",
+ "set object 1244 rect from 75.59449999999994, 10.2 to 76.59449999999994, 9.8 fc rgb \"#00EE00\"",
+ "set object 1245 rect from 77.68599999999995, 10.2 to 78.68599999999995, 9.8 fc rgb \"#00EE00\"",
+ "set object 1246 rect from 82.96499999999995, 10.2 to 83.96499999999995, 9.8 fc rgb \"#00EE00\"",
+ "set object 1247 rect from 85.90099999999993, 10.2 to 86.90099999999993, 9.8 fc rgb \"#00EE00\"",
+ "set object 1248 rect from 87.97249999999993, 10.2 to 88.97249999999993, 9.8 fc rgb \"#00EE00\"",
+ "set object 1249 rect from 92.14499999999992, 10.2 to 93.14499999999992, 9.8 fc rgb \"#00EE00\"",
+ "set object 1250 rect from 95.31249999999991, 10.2 to 96.31249999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1251 rect from 97.39349999999992, 10.2 to 98.39349999999992, 9.8 fc rgb \"#00EE00\"",
+ "set object 1252 rect from 98.46299999999991, 10.2 to 99.46299999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1253 rect from 99.48249999999992, 10.2 to 100.48249999999992, 9.8 fc rgb \"#00EE00\"",
+ "set object 1254 rect from 100.56199999999993, 10.2 to 101.56199999999993, 9.8 fc rgb \"#00EE00\"",
+ "set object 1255 rect from 101.62199999999993, 10.2 to 102.62199999999993, 9.8 fc rgb \"#00EE00\"",
+ "set object 1256 rect from 102.63099999999991, 10.2 to 103.63099999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1257 rect from 103.67849999999991, 10.2 to 104.67849999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1258 rect from 104.73949999999991, 10.2 to 105.73949999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1259 rect from 105.77499999999992, 10.2 to 106.77499999999992, 9.8 fc rgb \"#00EE00\"",
+ "set object 1260 rect from 107.7474999999999, 10.2 to 108.7474999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1261 rect from 110.93299999999991, 10.2 to 111.93299999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1262 rect from 115.0789999999999, 10.2 to 116.0789999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1263 rect from 116.14449999999991, 10.2 to 117.14449999999991, 9.8 fc rgb \"#00EE00\"",
+ "set object 1264 rect from 117.26799999999992, 10.2 to 118.26799999999992, 9.8 fc rgb \"#00EE00\"",
+ "set object 1265 rect from 118.3064999999999, 10.2 to 119.3064999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1266 rect from 122.52449999999989, 10.2 to 123.52449999999989, 9.8 fc rgb \"#00EE00\"",
+ "set object 1267 rect from 123.56399999999988, 10.2 to 124.56399999999988, 9.8 fc rgb \"#00EE00\"",
+ "set object 1268 rect from 124.6324999999999, 10.2 to 125.6324999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1269 rect from 125.6929999999999, 10.2 to 126.6929999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1270 rect from 128.8479999999999, 10.2 to 129.8479999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1271 rect from 129.8879999999999, 10.2 to 131.8519999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1272 rect from 131.90949999999987, 10.2 to 132.90949999999987, 9.8 fc rgb \"#00EE00\"",
+ "set object 1273 rect from 133.9804999999999, 10.2 to 134.9804999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1274 rect from 136.08249999999987, 10.2 to 137.08249999999987, 9.8 fc rgb \"#00EE00\"",
+ "set object 1275 rect from 140.3004999999999, 10.2 to 141.3004999999999, 9.8 fc rgb \"#00EE00\"",
+ "set object 1276 rect from 61.17299999999995, 9.399999999999999 to 62.17299999999995, 9.2 fc rgb \"#00EE00\"",
+ "set object 1277 rect from 63.23999999999996, 9.399999999999999 to 64.23999999999995, 9.2 fc rgb \"#00EE00\"",
+ "set object 1278 rect from 65.31249999999996, 9.399999999999999 to 66.31249999999996, 9.2 fc rgb \"#00EE00\"",
+ "set object 1279 rect from 66.37099999999995, 9.399999999999999 to 67.37099999999995, 9.2 fc rgb \"#00EE00\"",
+ "set object 1280 rect from 68.37699999999995, 9.399999999999999 to 70.37999999999994, 9.2 fc rgb \"#00EE00\"",
+ "set object 1281 rect from 70.39199999999994, 9.399999999999999 to 71.39199999999994, 9.2 fc rgb \"#00EE00\"",
+ "set object 1282 rect from 72.48699999999994, 9.399999999999999 to 73.48699999999994, 9.2 fc rgb \"#00EE00\"",
+ "set object 1283 rect from 73.55549999999994, 9.399999999999999 to 75.56249999999994, 9.2 fc rgb \"#00EE00\"",
+ "set object 1284 rect from 76.64899999999994, 9.399999999999999 to 77.64899999999994, 9.2 fc rgb \"#00EE00\"",
+ "set object 1285 rect from 79.81349999999993, 9.399999999999999 to 80.81349999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1286 rect from 80.83699999999993, 9.399999999999999 to 81.83699999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1287 rect from 81.90449999999993, 9.399999999999999 to 82.90449999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1288 rect from 84.90049999999992, 9.399999999999999 to 86.90099999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1289 rect from 87.97249999999993, 9.399999999999999 to 88.97249999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1290 rect from 92.14499999999992, 9.399999999999999 to 93.14499999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1291 rect from 93.21149999999992, 9.399999999999999 to 94.21149999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1292 rect from 95.31249999999991, 9.399999999999999 to 96.31249999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1293 rect from 96.36399999999992, 9.399999999999999 to 97.36399999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1294 rect from 97.39349999999992, 9.399999999999999 to 98.39349999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1295 rect from 99.48249999999992, 9.399999999999999 to 100.48249999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1296 rect from 100.56199999999993, 9.399999999999999 to 101.56199999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1297 rect from 101.62199999999993, 9.399999999999999 to 102.62199999999993, 9.2 fc rgb \"#00EE00\"",
+ "set object 1298 rect from 102.63099999999991, 9.399999999999999 to 103.63099999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1299 rect from 103.67849999999991, 9.399999999999999 to 104.67849999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1300 rect from 104.73949999999991, 9.399999999999999 to 105.73949999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1301 rect from 105.77499999999992, 9.399999999999999 to 106.77499999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1302 rect from 110.93299999999991, 9.399999999999999 to 111.93299999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1303 rect from 112.00149999999991, 9.399999999999999 to 113.9859999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1304 rect from 114.0484999999999, 9.399999999999999 to 115.0484999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1305 rect from 116.14449999999991, 9.399999999999999 to 117.14449999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1306 rect from 117.26799999999992, 9.399999999999999 to 118.26799999999992, 9.2 fc rgb \"#00EE00\"",
+ "set object 1307 rect from 118.3064999999999, 9.399999999999999 to 119.3064999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1308 rect from 120.4439999999999, 9.399999999999999 to 121.4439999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1309 rect from 121.46899999999991, 9.399999999999999 to 122.46899999999991, 9.2 fc rgb \"#00EE00\"",
+ "set object 1310 rect from 122.52449999999989, 9.399999999999999 to 123.52449999999989, 9.2 fc rgb \"#00EE00\"",
+ "set object 1311 rect from 124.6324999999999, 9.399999999999999 to 125.6324999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1312 rect from 125.6929999999999, 9.399999999999999 to 126.6929999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1313 rect from 126.7234999999999, 9.399999999999999 to 127.7234999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1314 rect from 129.8879999999999, 9.399999999999999 to 131.8519999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1315 rect from 133.9804999999999, 9.399999999999999 to 134.9804999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1316 rect from 135.0459999999999, 9.399999999999999 to 136.0459999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1317 rect from 136.08249999999987, 9.399999999999999 to 137.08249999999987, 9.2 fc rgb \"#00EE00\"",
+ "set object 1318 rect from 137.1609999999999, 9.399999999999999 to 138.1609999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1319 rect from 139.24899999999988, 9.399999999999999 to 140.24899999999988, 9.2 fc rgb \"#00EE00\"",
+ "set object 1320 rect from 140.3004999999999, 9.399999999999999 to 141.3004999999999, 9.2 fc rgb \"#00EE00\"",
+ "set object 1321 rect from 84.90049999999992, 9.149999999999999 to 85.90049999999992, 8.95 fc rgb \"#00EE00\"",
+ "set object 1322 rect from 94.24599999999992, 9.149999999999999 to 95.24599999999992, 8.95 fc rgb \"#00EE00\"",
+ "set object 1323 rect from 95.31249999999991, 9.149999999999999 to 96.31249999999991, 8.95 fc rgb \"#00EE00\"",
+ "set object 1324 rect from 97.39349999999992, 9.149999999999999 to 98.39349999999992, 8.95 fc rgb \"#00EE00\"",
+ "set object 1325 rect from 99.48249999999992, 9.149999999999999 to 100.48249999999992, 8.95 fc rgb \"#00EE00\"",
+ "set object 1326 rect from 100.56199999999993, 9.149999999999999 to 101.56199999999993, 8.95 fc rgb \"#00EE00\"",
+ "set object 1327 rect from 103.67849999999991, 9.149999999999999 to 104.67849999999991, 8.95 fc rgb \"#00EE00\"",
+ "set object 1328 rect from 105.77499999999992, 9.149999999999999 to 106.77499999999992, 8.95 fc rgb \"#00EE00\"",
+ "set object 1329 rect from 114.0484999999999, 9.149999999999999 to 115.0484999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1330 rect from 116.14449999999991, 9.149999999999999 to 117.14449999999991, 8.95 fc rgb \"#00EE00\"",
+ "set object 1331 rect from 118.3064999999999, 9.149999999999999 to 119.3064999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1332 rect from 119.3669999999999, 9.149999999999999 to 120.3669999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1333 rect from 124.6324999999999, 9.149999999999999 to 125.6324999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1334 rect from 125.6929999999999, 9.149999999999999 to 126.6929999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1335 rect from 129.8879999999999, 9.149999999999999 to 131.8519999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1336 rect from 135.0459999999999, 9.149999999999999 to 136.0459999999999, 8.95 fc rgb \"#00EE00\"",
+ "set object 1337 rect from 136.08249999999987, 9.149999999999999 to 137.08249999999987, 8.95 fc rgb \"#00EE00\"",
+ "set object 1338 rect from 139.24899999999988, 9.149999999999999 to 140.24899999999988, 8.95 fc rgb \"#00EE00\"",
+ "set object 1339 rect from 78.74099999999993, 10.2 to 79.74099999999993, 9.8 fc rgb \"#FF00FF\"",
+ "set object 1340 rect from 93.21149999999992, 10.2 to 94.21149999999992, 9.8 fc rgb \"#FF00FF\"",
+ "set object 1341 rect from 112.00149999999991, 10.2 to 113.00149999999991, 9.8 fc rgb \"#FF00FF\"",
+ "set object 1342 rect from 80.83699999999993, 10.2 to 81.83699999999993, 9.8 fc rgb \"#AA00AA\"",
+ "set object 1343 rect from 57.33799999999997, 10.2 to 58.33799999999997, 9.8 fc rgb \"#4444AA\"",
+ "set object 1344 rect from 67.35049999999995, 10.2 to 68.35049999999995, 9.8 fc rgb \"#4444AA\"",
+ "set object 1345 rect from 71.44949999999994, 10.2 to 72.44949999999994, 9.8 fc rgb \"#4444AA\"",
+ "set object 1346 rect from 138.2054999999999, 10.2 to 139.2054999999999, 9.8 fc rgb \"#4444AA\"",
+ "set label \"external \" at 11.092156249999992,11 textcolor rgb \"#3399FF\" font \"Helvetica,9'\"",
+ "set label \"runtime \" at 19.763812499999986,11 textcolor rgb \"#000000\" font \"Helvetica,9'\"",
+ "set label \"full code\" at 28.435468749999977,11 textcolor rgb \"#DD0000\" font \"Helvetica,9'\"",
+ "set label \"opt code \" at 37.10712499999997,11 textcolor rgb \"#00EE00\" font \"Helvetica,9'\"",
+ "set label \"code stub\" at 45.77878124999996,11 textcolor rgb \"#FF00FF\" font \"Helvetica,9'\"",
+ "set label \"built-in \" at 54.45043749999995,11 textcolor rgb \"#AA00AA\" font \"Helvetica,9'\"",
+ "set label \"inl.cache\" at 63.12209374999994,11 textcolor rgb \"#4444AA\" font \"Helvetica,9'\"",
+ "set label \"reg.exp. \" at 71.79374999999993,11 textcolor rgb \"#0000FF\" font \"Helvetica,9'\"",
+ "set label \"13 ms\" at 42.32008281250001,5.5 font \"Helvetica,7'\"",
+ "set label \"1 ms\" at 4.2255828125,1 font \"Helvetica,7'\"",
+ "set label \"1 ms\" at 10.8385828125,1 font \"Helvetica,7'\"",
+ "set label \"1 ms\" at 7.8065828125,1 font \"Helvetica,7'\"",
+ "set label \"1 ms\" at 14.3305828125,1 font \"Helvetica,7'\"",
+ "set label \"0 ms\" at 18.204082812499998,1 font \"Helvetica,7'\"",
+ "set label \"0 ms\" at 85.27908281249994,1 font \"Helvetica,7'\"",
+ "set y2range [0:59.54259090909095]",
+ "plot '-' using 1:2 axes x1y2 with impulses ls 1",
+ "41.88650000000001 13.935500000000008",
+ "3.7920000000000003 1.3375000000000004",
+ "10.405000000000001 1.113500000000002",
+ "7.373 1.1035000000000004",
+ "13.897 1.0205000000000002",
+ "17.7705 0.7759999999999998",
+ "84.84549999999993 0.75",
+ "86.05649999999993 0.6779999999999973",
+ "87.36899999999991 0.6134999999999877",
+ "131.1209999999999 0.5784999999999911",
+ "108.09449999999991 0.5775000000000006",
+ "60.65699999999996 0.4855000000000018",
+ "23.1135 0.44849999999999923",
+ "21.063999999999997 0.4394999999999989",
+ "56.198999999999984 0.3990000000000009",
+ "63.024999999999956 0.39799999999999613",
+ "51.02349999999999 0.39399999999999835",
+ "110.4839999999999 0.3930000000000007",
+ "54.951999999999984 0.392000000000003",
+ "69.49599999999995 0.38750000000000284",
+ "62.548999999999964 0.38750000000000284",
+ "89.21399999999991 0.3744999999999976",
+ "133.4079999999999 0.3645000000000209",
+ "61.253999999999955 0.3374999999999986",
+ "104.4429999999999 0.30999999999997385",
+ "56.52499999999998 0.2734999999999985",
+ "63.41299999999995 0.26549999999999585",
+ "102.99499999999992 0.2535000000000025",
+ "47.3935 0.25250000000000483",
+ "58.65749999999996 0.24799999999999756",
+ "23.8365 0.2469999999999999",
+ "74.63849999999994 0.23149999999998272",
+ "16.9595 0.22250000000000014",
+ "85.13799999999992 0.22049999999998704",
+ "51.78349999999999 0.2085000000000008",
+ "64.61749999999995 0.20849999999998658",
+ "70.58649999999994 0.1775000000000091",
+ "55.66249999999998 0.17649999999999721",
+ "57.205999999999975 0.1700000000000017",
+ "75.20099999999994 0.1529999999999916",
+ "61.464999999999954 0.1524999999999963",
+ "18.017000000000003 0.14550000000000196",
+ "67.48749999999995 0.14500000000001023",
+ "42.05100000000001 0.1385000000000005",
+ "25.4055 0.13799999999999812",
+ "46.9925 0.1314999999999955",
+ "18.1735 0.12750000000000128",
+ "109.38599999999991 0.12000000000000455",
+ "59.042999999999964 0.1180000000000021",
+ "56.97649999999997 0.117999999999995",
+ "22.3815 0.11400000000000077",
+ "58.07699999999997 0.1039999999999992",
+ "23.336000000000002 0.10300000000000153",
+ "21.8475 0.10050000000000026",
+ "56.845999999999975 0.09949999999999903",
+ "46.28150000000001 0.09649999999999892",
+ "121.0104999999999 0.09099999999999397",
+ "25.507500000000004 0.08800000000000452",
+ "52.448999999999984 0.08599999999999852",
+ "46.74 0.08499999999999375",
+ "57.64849999999997 0.08249999999999602",
+ "58.316999999999965 0.08099999999999596",
+ "23.506 0.08050000000000068",
+ "46.37200000000001 0.08050000000000068",
+ "42.70600000000002 0.07900000000000063",
+ "129.4124999999999 0.07800000000000296",
+ "20.5975 0.07750000000000057",
+ "56.634499999999974 0.07749999999999346",
+ "19.0685 0.07649999999999935",
+ "21.363 0.07549999999999812",
+ "49.9695 0.07500000000000284",
+ "24.544000000000004 0.07400000000000162",
+ "21.727 0.07349999999999923",
+ "58.21549999999997 0.07249999999999801",
+ "44.917000000000016 0.07200000000000273",
+ "25.591500000000003 0.07199999999999918",
+ "50.62049999999999 0.07150000000000034",
+ "46.621 0.07099999999999795",
+ "88.82299999999992 0.07099999999999795",
+ "78.23049999999994 0.0660000000000025",
+ "46.060500000000005 0.0659999999999954",
+ "50.43099999999999 0.06400000000000006",
+ "129.48849999999987 0.06349999999997635",
+ "45.55900000000001 0.06150000000000233",
+ "19.152 0.06050000000000111",
+ "50.20799999999999 0.060499999999997556",
+ "57.33299999999997 0.060499999999997556",
+ "68.76649999999995 0.06049999999999045",
+ "23.5775 0.059499999999999886",
+ "47.135000000000005 0.05850000000000222",
+ "72.28049999999995 0.05849999999999511",
+ "45.626500000000014 0.05750000000000455",
+ "21.2695 0.057500000000000995",
+ "50.14149999999999 0.05749999999999744",
+ "91.96649999999993 0.056500000000013983",
+ "57.934999999999974 0.05649999999999977",
+ "83.63999999999993 0.05649999999999977",
+ "132.92249999999987 0.05649999999997135",
+ "67.59199999999996 0.056000000000011596",
+ "99.92199999999991 0.055499999999995",
+ "59.69699999999996 0.055499999999995",
+ "45.84850000000001 0.054999999999999716",
+ "56.69449999999998 0.05449999999999733",
+ "97.26099999999992 0.05350000000001387",
+ "112.6564999999999 0.05349999999999966",
+ "45.92300000000001 0.05349999999999966",
+ "136.19799999999987 0.05349999999998545",
+ "47.465 0.05250000000000199",
+ "105.93349999999991 0.052499999999994884",
+ "65.61699999999995 0.052499999999994884",
+ "47.5235 0.051499999999997215",
+ "102.61199999999991 0.05149999999999011",
+ "138.98049999999986 0.0514999999999759",
+ "123.8254999999999 0.050500000000013756",
+ "75.46799999999995 0.04950000000000898",
+ "45.76150000000001 0.049500000000001876",
+ "94.6054999999999 0.04949999999998056",
+ "45.97850000000001 0.048500000000004206",
+ "115.4124999999999 0.048500000000004206",
+ "118.19199999999991 0.048500000000004206",
+ "49.780499999999996 0.0484999999999971",
+ "42.795000000000016 0.04800000000000182",
+ "126.59899999999989 0.04749999999999943",
+ "51.56899999999999 0.04650000000000176",
+ "80.95049999999993 0.04649999999999466",
+ "140.9779999999999 0.04599999999999227",
+ "59.93649999999996 0.04549999999999699",
+ "46.13100000000001 0.04449999999999932",
+ "51.91599999999998 0.04299999999999926",
+ "45.70300000000001 0.042499999999996874",
+ "49.4955 0.04100000000000392",
+ "45.361500000000014 0.04099999999999682",
+ "70.35799999999995 0.04050000000000864",
+ "49.726 0.040500000000001535",
+ "45.08700000000001 0.03999999999999915",
+ "45.13500000000002 0.03900000000000858",
+ "52.15399999999998 0.0379999999999967",
+ "42.88200000000001 0.0379999999999967",
+ "24.430500000000002 0.03750000000000142",
+ "23.907 0.036499999999996646",
+ "60.08349999999996 0.036499999999996646",
+ "50.32899999999999 0.036000000000001364",
+ "42.31450000000002 0.034000000000006025",
+ "45.02900000000001 0.032999999999994145",
+ "23.189 0.031500000000001194",
+ "21.49 0.03049999999999997",
+ "42.83300000000001 0.030000000000001137",
+ "58.12149999999997 0.030000000000001137",
+ "45.41750000000002 0.030000000000001137",
+ "140.89599999999987 0.028999999999996362",
+ "2.4490000000000003 0.028500000000000636",
+ "52.31499999999998 0.027999999999998693",
+ "45.17200000000002 0.027999999999998693",
+ "43.632500000000014 0.027000000000001023",
+ "49.8685 0.027000000000001023",
+ "51.30249999999999 0.027000000000001023",
+ "21.175 0.026499999999998636",
+ "44.82200000000002 0.026000000000003354",
+ "22.528 0.02599999999999625",
+ "49.82449999999999 0.02499999999999858",
+ "21.1335 0.024499999999999744",
+ "21.588 0.021500000000003183",
+ "21.525499999999997 0.02049999999999841",
+ "23.3945 0.018499999999999517",
+ "47.057500000000005 0.018000000000000682",
+ "22.570999999999998 0.01799999999999713",
+ "24.458 0.017499999999998295",
+ "22.4625 0.017499999999998295",
+ "141.04699999999988 0.016999999999995907",
+ "22.416999999999998 0.016500000000000625",
+ "45.44850000000002 0.015999999999998238",
+ "21.619500000000002 0.015499999999999403",
+ "109.41799999999989 0.015499999999988745",
+ "22.486 0.014500000000001734",
+ "50.53499999999999 0.013999999999995794",
+ "52.338999999999984 0.012000000000000455",
+ "45.489500000000014 0.012000000000000455",
+ "45.470500000000015 0.00999999999999801",
+ "50.007 0.008500000000005059",
+ "23.9205 0.008499999999997954",
+ "10.43 0.006499999999999062",
+ "13.915999999999999 0.005499999999999616",
+ "7.3919999999999995 0.005499999999999616",
+ "3.8129999999999997 0.005499999999999616",
+ "121.0314999999999 0.00549999999999784",
+ "78.23649999999994 0.0005000000000023874",
+ "e",
+ "# start: 2.4204999999999997",
+ "# end: 141.1669999999999",
+ "# objects: 1547"
+]
diff --git a/deps/v8/test/mjsunit/tools/profviz.js b/deps/v8/test/mjsunit/tools/profviz.js
new file mode 100644
index 0000000000..3a14f4e6be
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/profviz.js
@@ -0,0 +1,83 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Load implementations from <project root>/tools.
+// Files: tools/csvparser.js tools/splaytree.js tools/codemap.js
+// Files: tools/consarray.js tools/profile.js tools/profile_view.js
+// Files: tools/logreader.js tools/tickprocessor.js
+// Files: tools/profviz/composer.js
+// Env: TEST_FILE_NAME
+
+assertEquals('string', typeof TEST_FILE_NAME);
+var path_length = TEST_FILE_NAME.lastIndexOf('/');
+if (path_length == -1) {
+ path_length = TEST_FILE_NAME.lastIndexOf('\\');
+}
+assertTrue(path_length != -1);
+
+var path = TEST_FILE_NAME.substr(0, path_length + 1);
+var input_file = path + "profviz-test.log";
+var reference_file = path + "profviz-test.default";
+
+var content_lines = read(input_file).split("\n");
+var line_cursor = 0;
+var output_lines = [];
+
+function input() {
+ return content_lines[line_cursor++];
+}
+
+function output(line) {
+ output_lines.push(line);
+}
+
+function set_range(start, end) {
+ range_start = start;
+ range_end = end;
+}
+
+var distortion = 4500 / 1000000;
+var resx = 1600;
+var resy = 600;
+
+var psc = new PlotScriptComposer(resx, resy);
+psc.collectData(input, distortion);
+psc.findPlotRange(undefined, undefined, set_range);
+var objects = psc.assembleOutput(output);
+
+output("# start: " + range_start);
+output("# end: " + range_end);
+output("# objects: " + objects);
+
+var create_baseline = false;
+
+if (create_baseline) {
+ print(JSON.stringify(output_lines, null, 2));
+} else {
+ assertArrayEquals(output_lines,
+ JSON.parse(read(reference_file)));
+}
diff --git a/deps/v8/test/mjsunit/transition-elements-kind.js b/deps/v8/test/mjsunit/transition-elements-kind.js
new file mode 100644
index 0000000000..ba05c950d9
--- /dev/null
+++ b/deps/v8/test/mjsunit/transition-elements-kind.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --compiled-transitions
+// Flags: --track-allocation-sites
+
+// Allocation site for empty double arrays.
+function foo() {
+ return new Array();
+}
+var a = foo();
+a[0] = 1.1;
+
+// Emit a TransitionElementsKindStub which transitions from double to object.
+function store(a,x) {
+ a[0] = x;
+}
+store([1.1], 'a');
+store([1.1], 1.1);
+%OptimizeFunctionOnNextCall(store);
+
+// Use the TransitionElementsKindStub to transition from double to object.
+var b = foo();
+store(b, 'a');
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index e773f4b0f7..4e8718eb3f 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -28,7 +28,6 @@
// Test dictionary -> double elements -> dictionary elements round trip
// Flags: --allow-natives-syntax --unbox-double-arrays --expose-gc
-// Flags: --noparallel-recompilation
var large_array_size = 100000;
var approx_dict_to_elements_threshold = 70000;
@@ -346,7 +345,7 @@ function testOneArrayType(allocator) {
-Infinity,
expected_array_value(7));
- assertTrue(%GetOptimizationStatus(test_various_stores) != 2);
+ assertOptimized(test_various_stores);
// Make sure that we haven't converted from fast double.
assertTrue(%HasFastDoubleElements(large_array));
diff --git a/deps/v8/tools/blink_tests/TestExpectations b/deps/v8/tools/blink_tests/TestExpectations
index 5c75db0d7e..b5f4905f60 100644
--- a/deps/v8/tools/blink_tests/TestExpectations
+++ b/deps/v8/tools/blink_tests/TestExpectations
@@ -17,3 +17,11 @@
[ Linux Debug ] fast/text/international/danda-space.html [ Pass Failure Slow ]
[ Linux Debug ] fast/text/international/thai-baht-space.html [ Pass Failure Slow ]
[ Linux Debug ] fast/text/international/thai-line-breaks.html [ Pass Failure Slow ]
+crbug.com/108833 [ Win Debug ] plugins/geturlnotify-during-document-teardown.html [ Crash Failure Timeout ]
+webkit.org/b/48655 [ Win Debug ] plugins/js-from-destroy.html [ Crash Timeout ]
+crbug.com/178745 [ Win Debug ] plugins/open-and-close-window-with-plugin.html [ Crash Failure Timeout ]
+
+# Slow on the trunk builder:
+[ Linux Debug ] fast/js/regress/function-dot-apply.html [ Slow ]
+crbug.com/249894 [ Linux Debug ] fast/js/regress/inline-arguments-access.html [ Pass Failure Crash Slow ]
+[ Linux Debug ] fast/js/regress/inline-arguments-local-escape.html [ Slow ]
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index ccdc4b379e..9719376d7f 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -918,8 +918,9 @@ INSTANCE_TYPES = {
180: "JS_ARRAY_TYPE",
171: "JS_PROXY_TYPE",
183: "JS_WEAK_MAP_TYPE",
- 184: "JS_REGEXP_TYPE",
- 185: "JS_FUNCTION_TYPE",
+ 184: "JS_WEAK_SET_TYPE",
+ 185: "JS_REGEXP_TYPE",
+ 186: "JS_FUNCTION_TYPE",
170: "JS_FUNCTION_PROXY_TYPE",
165: "DEBUG_INFO_TYPE",
166: "BREAK_POINT_INFO_TYPE",
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 892e437dce..3f666039d7 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -29,7 +29,7 @@
'variables': {
'v8_code': 1,
},
- 'includes': ['../../build/common.gypi'],
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
{
'target_name': 'v8',
@@ -335,6 +335,16 @@
'../../src/heap-snapshot-generator.h',
'../../src/heap.cc',
'../../src/heap.h',
+ '../../src/hydrogen-bce.cc',
+ '../../src/hydrogen-bce.h',
+ '../../src/hydrogen-canonicalize.cc',
+ '../../src/hydrogen-canonicalize.h',
+ '../../src/hydrogen-dce.cc',
+ '../../src/hydrogen-dce.h',
+ '../../src/hydrogen-dehoist.cc',
+ '../../src/hydrogen-dehoist.h',
+ '../../src/hydrogen-deoptimizing-mark.cc',
+ '../../src/hydrogen-deoptimizing-mark.h',
'../../src/hydrogen-environment-liveness.cc',
'../../src/hydrogen-environment-liveness.h',
'../../src/hydrogen-escape-analysis.cc',
@@ -347,12 +357,26 @@
'../../src/hydrogen-gvn.h',
'../../src/hydrogen-infer-representation.cc',
'../../src/hydrogen-infer-representation.h',
+ '../../src/hydrogen-infer-types.cc',
+ '../../src/hydrogen-infer-types.h',
+ '../../src/hydrogen-minus-zero.cc',
+ '../../src/hydrogen-minus-zero.h',
'../../src/hydrogen-range-analysis.cc',
'../../src/hydrogen-range-analysis.h',
+ '../../src/hydrogen-redundant-phi.cc',
+ '../../src/hydrogen-redundant-phi.h',
+ '../../src/hydrogen-removable-simulates.cc',
+ '../../src/hydrogen-removable-simulates.h',
+ '../../src/hydrogen-representation-changes.cc',
+ '../../src/hydrogen-representation-changes.h',
+ '../../src/hydrogen-sce.cc',
+ '../../src/hydrogen-sce.h',
'../../src/hydrogen-uint32-analysis.cc',
'../../src/hydrogen-uint32-analysis.h',
'../../src/hydrogen-osr.cc',
'../../src/hydrogen-osr.h',
+ '../../src/icu_util.cc',
+ '../../src/icu_util.h',
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
@@ -813,9 +837,15 @@
'../../src/extensions/i18n/number-format.h',
],
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:*',
+ '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
+ '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
]
}],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ ],
+ }],
],
},
{
@@ -874,7 +904,8 @@
'../../src/object-observe.js',
'../../src/arraybuffer.js',
'../../src/typedarray.js',
- '../../src/generator.js'
+ '../../src/generator.js',
+ '../../src/array-iterator.js'
],
'i18n_library_files': [
'../../src/extensions/i18n/header.js',
diff --git a/deps/v8/tools/oom_dump/oom_dump.cc b/deps/v8/tools/oom_dump/oom_dump.cc
index 1bf5ac19fc..5dfb5dff35 100644
--- a/deps/v8/tools/oom_dump/oom_dump.cc
+++ b/deps/v8/tools/oom_dump/oom_dump.cc
@@ -108,6 +108,7 @@ class IndirectSorter {
int* a_;
};
+
void DumpHeapStats(const char *minidump_file) {
Minidump minidump(minidump_file);
CHECK(minidump.Read());
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index c717ee88f5..12475b33c4 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -331,6 +331,14 @@ class SourceProcessor(SourceFileProcessor):
'gnuplot-4.6.3-emscripten.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
+ def EndOfDeclaration(self, line):
+ return line == "}" or line == "};"
+
+ def StartOfDeclaration(self, line):
+ return line.find("//") == 0 or \
+ line.find("/*") == 0 or \
+ line.find(") {") != -1
+
def ProcessContents(self, name, contents):
result = True
base = basename(name)
@@ -342,7 +350,6 @@ class SourceProcessor(SourceFileProcessor):
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
result = False
- ext = base.split('.').pop()
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
@@ -358,6 +365,30 @@ class SourceProcessor(SourceFileProcessor):
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
+ # Check two empty lines between declarations.
+ if name.endswith(".cc"):
+ line = 0
+ lines = []
+ parts = contents.split('\n')
+ while line < len(parts) - 2:
+ if self.EndOfDeclaration(parts[line]):
+ if self.StartOfDeclaration(parts[line + 1]):
+ lines.append(str(line + 1))
+ line += 1
+ elif parts[line + 1] == "" and \
+ self.StartOfDeclaration(parts[line + 2]):
+ lines.append(str(line + 1))
+ line += 2
+ line += 1
+ if len(lines) >= 1:
+ linenumbers = ', '.join(lines)
+ if len(lines) > 1:
+ print "%s does not have two empty lines between declarations " \
+ "in lines %s." % (name, linenumbers)
+ else:
+ print "%s does not have two empty lines between declarations " \
+ "in line %s." % (name, linenumbers)
+ result = False
return result
def ProcessFiles(self, files, path):
@@ -391,7 +422,8 @@ def Main():
print "Running C++ lint check..."
if not options.no_lint:
success = CppLintProcessor().Run(workspace) and success
- print "Running copyright header and trailing whitespaces check..."
+ print "Running copyright header, trailing whitespaces and " \
+ "two empty lines between declarations check..."
success = SourceProcessor().Run(workspace) and success
if success:
return 0
diff --git a/deps/v8/tools/profviz/composer.js b/deps/v8/tools/profviz/composer.js
index bcc17b2ac8..cdfc0b7b39 100644
--- a/deps/v8/tools/profviz/composer.js
+++ b/deps/v8/tools/profviz/composer.js
@@ -337,7 +337,7 @@ function PlotScriptComposer(kResX, kResY) {
};
var processTickEvent = function(
- pc, sp, timer, unused_x, unused_y, vmstate, stack) {
+ pc, timer, unused_x, unused_y, vmstate, stack) {
var tick = new Tick(timer);
var entry = code_map.findEntry(pc);
@@ -365,7 +365,7 @@ function PlotScriptComposer(kResX, kResY) {
processor: processCodeDeleteEvent },
'code-deopt': { parsers: [parseTimeStamp, parseInt],
processor: processCodeDeoptEvent },
- 'tick': { parsers: [parseInt, parseInt, parseTimeStamp,
+ 'tick': { parsers: [parseInt, parseTimeStamp,
null, null, parseInt, 'var-args'],
processor: processTickEvent }
});