From 550f73ae3e3b29aa36e793e8ffc5cd23478df099 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 16 Feb 2011 08:38:33 -0800 Subject: Upgrade V8 to 3.1.5 --- deps/v8/.gitignore | 2 + deps/v8/AUTHORS | 1 + deps/v8/ChangeLog | 65 +- deps/v8/SConstruct | 21 +- deps/v8/include/v8.h | 1 - deps/v8/samples/shell.cc | 3 + deps/v8/src/SConscript | 5 - deps/v8/src/accessors.cc | 1 + deps/v8/src/api.cc | 68 +- deps/v8/src/arguments.h | 2 +- deps/v8/src/arm/assembler-arm-inl.h | 4 + deps/v8/src/arm/assembler-arm.cc | 58 +- deps/v8/src/arm/assembler-arm.h | 9 + deps/v8/src/arm/builtins-arm.cc | 58 +- deps/v8/src/arm/code-stubs-arm.cc | 381 +++++----- deps/v8/src/arm/code-stubs-arm.h | 46 +- deps/v8/src/arm/codegen-arm.cc | 24 +- deps/v8/src/arm/constants-arm.h | 2 + deps/v8/src/arm/deoptimizer-arm.cc | 204 +++++- deps/v8/src/arm/full-codegen-arm.cc | 557 ++++++++++++--- deps/v8/src/arm/ic-arm.cc | 117 +++- deps/v8/src/arm/lithium-arm.cc | 88 ++- deps/v8/src/arm/lithium-arm.h | 106 ++- deps/v8/src/arm/lithium-codegen-arm.cc | 208 +++++- deps/v8/src/arm/lithium-codegen-arm.h | 8 + deps/v8/src/arm/macro-assembler-arm.cc | 172 ++++- deps/v8/src/arm/macro-assembler-arm.h | 27 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 71 +- deps/v8/src/arm/regexp-macro-assembler-arm.h | 3 +- deps/v8/src/arm/simulator-arm.h | 15 +- deps/v8/src/arm/stub-cache-arm.cc | 41 ++ deps/v8/src/arm/virtual-frame-arm.cc | 13 +- deps/v8/src/arm/virtual-frame-arm.h | 3 +- deps/v8/src/array.js | 10 +- deps/v8/src/assembler.cc | 2 +- deps/v8/src/assembler.h | 8 +- deps/v8/src/ast.cc | 4 +- deps/v8/src/bignum.cc | 11 +- deps/v8/src/bootstrapper.cc | 51 +- deps/v8/src/builtins.cc | 45 +- deps/v8/src/builtins.h | 170 +++-- deps/v8/src/code-stubs.cc | 4 - deps/v8/src/code-stubs.h | 3 - deps/v8/src/codegen.cc | 1 - deps/v8/src/compiler.cc | 25 +- deps/v8/src/compiler.h | 4 +- deps/v8/src/d8.cc | 2 + deps/v8/src/date.js | 7 +- deps/v8/src/debug.cc | 4 +- deps/v8/src/deoptimizer.cc | 14 +- deps/v8/src/execution.cc | 1 + deps/v8/src/execution.h | 2 - deps/v8/src/factory.cc | 9 +- deps/v8/src/factory.h | 2 + deps/v8/src/flag-definitions.h | 10 +- deps/v8/src/full-codegen.cc | 2 +- deps/v8/src/full-codegen.h | 6 +- deps/v8/src/gdb-jit.cc | 121 +++- deps/v8/src/handles.cc | 12 + deps/v8/src/handles.h | 7 + deps/v8/src/heap.cc | 2 +- deps/v8/src/heap.h | 1 + deps/v8/src/hydrogen-instructions.cc | 186 ++--- deps/v8/src/hydrogen-instructions.h | 441 +++++++++--- deps/v8/src/hydrogen.cc | 462 ++++++------ deps/v8/src/hydrogen.h | 38 +- deps/v8/src/ia32/assembler-ia32.cc | 17 +- deps/v8/src/ia32/assembler-ia32.h | 53 +- deps/v8/src/ia32/builtins-ia32.cc | 15 + deps/v8/src/ia32/code-stubs-ia32.cc | 223 +++--- deps/v8/src/ia32/code-stubs-ia32.h | 38 +- deps/v8/src/ia32/codegen-ia32.cc | 27 +- deps/v8/src/ia32/deoptimizer-ia32.cc | 58 +- deps/v8/src/ia32/disasm-ia32.cc | 12 +- deps/v8/src/ia32/full-codegen-ia32.cc | 133 ++-- deps/v8/src/ia32/ic-ia32.cc | 49 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 332 ++++++--- deps/v8/src/ia32/lithium-codegen-ia32.h | 26 +- deps/v8/src/ia32/lithium-ia32.cc | 133 +++- deps/v8/src/ia32/lithium-ia32.h | 234 +++++-- deps/v8/src/ia32/macro-assembler-ia32.cc | 133 +++- deps/v8/src/ia32/macro-assembler-ia32.h | 9 + deps/v8/src/ia32/simulator-ia32.h | 9 +- deps/v8/src/ia32/stub-cache-ia32.cc | 36 + deps/v8/src/ia32/virtual-frame-ia32.cc | 14 +- deps/v8/src/ia32/virtual-frame-ia32.h | 3 +- deps/v8/src/ic.cc | 89 ++- deps/v8/src/ic.h | 23 +- deps/v8/src/liveedit.cc | 1 - deps/v8/src/macro-assembler.h | 7 + deps/v8/src/math.js | 2 +- deps/v8/src/messages.cc | 9 +- deps/v8/src/messages.js | 4 + deps/v8/src/objects-debug.cc | 2 +- deps/v8/src/objects-inl.h | 22 +- deps/v8/src/objects.cc | 131 +++- deps/v8/src/objects.h | 66 +- deps/v8/src/oprofile-agent.cc | 108 --- deps/v8/src/oprofile-agent.h | 77 -- deps/v8/src/parser.cc | 48 +- deps/v8/src/parser.h | 5 + deps/v8/src/platform-cygwin.cc | 776 --------------------- deps/v8/src/platform.h | 4 - deps/v8/src/profile-generator.cc | 2 +- deps/v8/src/regexp-macro-assembler.cc | 6 +- deps/v8/src/runtime.cc | 310 ++++++-- deps/v8/src/runtime.h | 3 +- deps/v8/src/runtime.js | 4 +- deps/v8/src/scanner.cc | 13 +- deps/v8/src/serialize.cc | 2 +- deps/v8/src/string.js | 19 +- deps/v8/src/stub-cache.cc | 77 +- deps/v8/src/stub-cache.h | 32 +- deps/v8/src/top.cc | 118 +++- deps/v8/src/top.h | 23 +- deps/v8/src/uri.js | 28 +- deps/v8/src/v8-counters.h | 1 + deps/v8/src/v8.cc | 3 - deps/v8/src/v8globals.h | 18 +- deps/v8/src/v8natives.js | 74 +- deps/v8/src/version.cc | 2 +- deps/v8/src/x64/assembler-x64-inl.h | 4 + deps/v8/src/x64/assembler-x64.cc | 23 +- deps/v8/src/x64/assembler-x64.h | 12 + deps/v8/src/x64/builtins-x64.cc | 81 ++- deps/v8/src/x64/code-stubs-x64.cc | 323 +++++---- deps/v8/src/x64/code-stubs-x64.h | 40 +- deps/v8/src/x64/codegen-x64.cc | 29 +- deps/v8/src/x64/cpu-x64.cc | 3 + deps/v8/src/x64/deoptimizer-x64.cc | 41 +- deps/v8/src/x64/full-codegen-x64.cc | 259 +++++-- deps/v8/src/x64/ic-x64.cc | 98 ++- deps/v8/src/x64/lithium-codegen-x64.cc | 517 ++++++++++++-- deps/v8/src/x64/lithium-codegen-x64.h | 4 + deps/v8/src/x64/lithium-x64.cc | 157 ++++- deps/v8/src/x64/lithium-x64.h | 237 +++---- deps/v8/src/x64/macro-assembler-x64.cc | 139 +++- deps/v8/src/x64/macro-assembler-x64.h | 19 +- deps/v8/src/x64/simulator-x64.h | 7 +- deps/v8/src/x64/stub-cache-x64.cc | 37 + deps/v8/src/x64/virtual-frame-x64.cc | 13 +- deps/v8/src/x64/virtual-frame-x64.h | 3 +- deps/v8/test/cctest/cctest.status | 12 - deps/v8/test/cctest/test-api.cc | 206 ++++++ deps/v8/test/cctest/test-debug.cc | 2 +- deps/v8/test/cctest/test-disasm-ia32.cc | 8 + deps/v8/test/cctest/test-strtod.cc | 19 + deps/v8/test/es5conform/es5conform.status | 239 ++++--- deps/v8/test/mjsunit/compiler/regress-arguments.js | 5 +- deps/v8/test/mjsunit/fuzz-natives.js | 3 +- deps/v8/test/mjsunit/getter-in-prototype.js | 8 + deps/v8/test/mjsunit/indexed-value-properties.js | 56 ++ deps/v8/test/mjsunit/json.js | 14 + deps/v8/test/mjsunit/mjsunit.status | 10 +- deps/v8/test/mjsunit/regexp.js | 14 + deps/v8/test/mjsunit/regress/regress-1103.js | 32 + deps/v8/test/mjsunit/regress/regress-1104.js | 37 + deps/v8/test/mjsunit/regress/regress-1105.js | 38 + deps/v8/test/mjsunit/regress/regress-1106.js | 50 ++ deps/v8/test/mjsunit/regress/regress-1107.js | 32 + deps/v8/test/mjsunit/regress/regress-1110.js | 38 + deps/v8/test/mjsunit/regress/regress-1112.js | 36 + deps/v8/test/mjsunit/regress/regress-1117.js | 35 + deps/v8/test/mjsunit/regress/regress-1118.js | 50 ++ deps/v8/test/mjsunit/regress/regress-1119.js | 45 ++ deps/v8/test/mjsunit/regress/regress-1120.js | 33 + deps/v8/test/mjsunit/regress/regress-1121.js | 34 + deps/v8/test/mjsunit/regress/regress-1122.js | 55 ++ deps/v8/test/mjsunit/regress/regress-1125.js | 41 ++ deps/v8/test/mjsunit/regress/regress-1126.js | 35 + deps/v8/test/mjsunit/regress/regress-1129.js | 44 ++ deps/v8/test/mjsunit/regress/regress-1130.js | 38 + deps/v8/test/mjsunit/regress/regress-1131.js | 29 + deps/v8/test/mjsunit/regress/regress-1132.js | 48 ++ deps/v8/test/mjsunit/regress/regress-1146.js | 48 ++ deps/v8/test/mjsunit/regress/regress-1149.js | 39 ++ deps/v8/test/mjsunit/regress/regress-1150.js | 33 + deps/v8/test/mjsunit/regress/regress-1151.js | 32 + deps/v8/test/mjsunit/regress/regress-1156.js | 49 ++ deps/v8/test/mjsunit/regress/regress-1160.js | 46 ++ .../v8/test/mjsunit/regress/regress-crbug-72736.js | 37 + deps/v8/test/mjsunit/strict-mode.js | 174 ++++- deps/v8/test/mjsunit/tools/codemap.js | 14 +- deps/v8/test/mjsunit/tools/csvparser.js | 2 +- deps/v8/test/mjsunit/tools/profile.js | 2 +- deps/v8/test/mjsunit/tools/profile_view.js | 4 +- deps/v8/test/mjsunit/tools/splaytree.js | 18 +- deps/v8/tools/codemap.js | 69 +- deps/v8/tools/csvparser.js | 17 +- deps/v8/tools/gyp/v8.gyp | 2 - deps/v8/tools/logreader.js | 24 +- deps/v8/tools/oprofile/annotate | 7 - deps/v8/tools/oprofile/common | 19 - deps/v8/tools/oprofile/dump | 7 - deps/v8/tools/oprofile/report | 7 - deps/v8/tools/oprofile/reset | 7 - deps/v8/tools/oprofile/run | 14 - deps/v8/tools/oprofile/shutdown | 7 - deps/v8/tools/oprofile/start | 7 - deps/v8/tools/profile.js | 153 ++-- deps/v8/tools/profile_view.js | 61 +- deps/v8/tools/splaytree.js | 60 +- deps/v8/tools/tickprocessor.js | 44 +- deps/v8/tools/utils.py | 2 - deps/v8/tools/v8.xcodeproj/project.pbxproj | 14 +- deps/v8/tools/visual_studio/common.vsprops | 2 +- deps/v8/tools/visual_studio/v8_base.vcproj | 8 - deps/v8/tools/visual_studio/v8_base_arm.vcproj | 8 - deps/v8/tools/visual_studio/v8_base_x64.vcproj | 8 - 209 files changed, 8326 insertions(+), 3825 deletions(-) delete mode 100644 deps/v8/src/oprofile-agent.cc delete mode 100644 deps/v8/src/oprofile-agent.h delete mode 100644 deps/v8/src/platform-cygwin.cc create mode 100644 deps/v8/test/mjsunit/indexed-value-properties.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1103.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1104.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1105.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1106.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1107.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1110.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1112.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1117.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1118.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1119.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1120.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1121.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1122.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1125.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1126.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1129.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1130.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1131.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1132.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1146.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1149.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1150.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1151.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1156.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1160.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-72736.js delete mode 100755 deps/v8/tools/oprofile/annotate delete mode 100755 deps/v8/tools/oprofile/common delete mode 100755 deps/v8/tools/oprofile/dump delete mode 100755 deps/v8/tools/oprofile/report delete mode 100755 deps/v8/tools/oprofile/reset delete mode 100755 deps/v8/tools/oprofile/run delete mode 100755 deps/v8/tools/oprofile/shutdown delete mode 100755 deps/v8/tools/oprofile/start diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index c68dadbe98..db57d1bb32 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -20,6 +20,8 @@ d8_g shell shell_g /obj/ +/test/es5conform/data/ +/test/mozilla/data/ /test/sputnik/sputniktests/ /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 1b756caf27..92b69cb686 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -9,6 +9,7 @@ ARM Ltd. Hewlett-Packard Development Company, LP Alexander Botero-Lowry +Alexander Karpinsky Alexandre Vassalotti Andreas Anyuru Bert Belder diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index d48ded840c..f69be973f0 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,64 @@ +2011-02-16: Version 3.1.5 + + Change RegExp parsing to disallow /(*)/. + + Added GDB JIT support for ARM. + + Fixed several crash bugs. + + Performance improvements on the IA32 platform. + + +2011-02-14: Version 3.1.4 + + Fixed incorrect compare of prototypes of the global object (issue + 1082). + + Fixed a bug in optimizing calls to global functions (issue 1106). + + Made optimized Function.prototype.apply safe for non-JSObject first + arguments (issue 1128). + + Fixed an error related to element accessors on Object.prototype and + parser errors (issue 1130). + + Fixed a bug in sorting an array with large array indices (issue 1131). + + Properly treat exceptions thrown while compiling (issue 1132). + + Fixed bug in register requirements for function.apply (issue 1133). + + Fixed a representation change bug in the Hydrogen graph construction + (issue 1134). + + Fixed the semantics of delete on parameters (issue 1136). + + Fixed a optimizer bug related to moving instructions with side effects + (issue 1138). + + Added support for the global object in Object.keys (issue 1150). + + Fixed incorrect value for Math.LOG10E + (issue http://code.google.com/p/chromium/issues/detail?id=72555) + + Performance improvements on the IA32 platform. + + Implement assignment to undefined reference in ES5 Strict Mode. + + +2011-02-09: Version 3.1.3 + + Fixed a bug triggered by functions with huge numbers of declared + arguments. + + Fixed zap value aliasing a real object - debug mode only (issue 866). + + Fixed issue where Array.prototype.__proto__ had been set to null + (issue 1121). + + Fixed stability bugs in Crankshaft for x86. + + 2011-02-07: Version 3.1.2 Added better security checks when accessing properties via @@ -56,8 +117,8 @@ Introduced partial strict mode support. - Changed formatting of recursive error messages to match Firefox and Safari - (issue http://crbug.com/70334). + Changed formatting of recursive error messages to match Firefox and + Safari (issue http://crbug.com/70334). Fixed incorrect rounding for float-to-integer conversions for external array types, which implement the Typed Array spec diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index f877392941..bffbba648d 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -136,7 +136,7 @@ LIBRARY_FLAGS = { 'gcc': { 'all': { 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], - 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'], + 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'], }, 'visibility:hidden': { # Use visibility=default to disable this. @@ -234,9 +234,6 @@ LIBRARY_FLAGS = { 'CCFLAGS': ['-m64'], 'LINKFLAGS': ['-m64'], }, - 'prof:oprofile': { - 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT'] - }, 'gdbjit:on': { 'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE'] } @@ -538,10 +535,6 @@ SAMPLE_FLAGS = { 'CCFLAGS': ['-g', '-O0'], 'CPPDEFINES': ['DEBUG'] }, - 'prof:oprofile': { - 'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'], - 'LIBS': ['opagent'] - } }, 'msvc': { 'all': { @@ -669,8 +662,8 @@ def GuessToolchain(os): def GuessVisibility(os, toolchain): - if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc': - # MinGW / Cygwin can't do it. + if os == 'win32' and toolchain == 'gcc': + # MinGW can't do it. return 'default' elif os == 'solaris': return 'default' @@ -691,7 +684,7 @@ SIMPLE_OPTIONS = { 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS }, 'os': { - 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], + 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'], 'default': OS_GUESS, 'help': 'the os to build for (%s)' % OS_GUESS }, @@ -711,7 +704,7 @@ SIMPLE_OPTIONS = { 'help': 'build using snapshots for faster start-up' }, 'prof': { - 'values': ['on', 'off', 'oprofile'], + 'values': ['on', 'off'], 'default': 'off', 'help': 'enable profiling of build target' }, @@ -896,10 +889,8 @@ def VerifyOptions(env): return False if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on': Abort("Profiling on windows only supported for static library.") - if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')): + if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')): Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.") - if env['prof'] == 'oprofile' and env['os'] != 'linux': - Abort("OProfile is only supported on Linux.") if env['os'] == 'win32' and env['soname'] == 'on': Abort("Shared Object soname not applicable for Windows.") if env['soname'] == 'on' and env['library'] == 'static': diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 44ff2c00e0..83a5744278 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -462,7 +462,6 @@ class V8EXPORT HandleScope { void Leave(); - internal::Object** prev_next_; internal::Object** prev_limit_; diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index 6b67df6c6c..64f78f02c6 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -290,11 +291,13 @@ bool ExecuteString(v8::Handle source, } else { v8::Handle result = script->Run(); if (result.IsEmpty()) { + assert(try_catch.HasCaught()); // Print errors that happened during execution. if (report_exceptions) ReportException(&try_catch); return false; } else { + assert(!try_catch.HasCaught()); if (print_result && !result->IsUndefined()) { // If all went well and the result wasn't undefined then print // the returned value. diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 44129f67ae..c3561be340 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -97,7 +97,6 @@ SOURCES = { objects.cc objects-printer.cc objects-visiting.cc - oprofile-agent.cc parser.cc preparser.cc preparse-data.cc @@ -234,7 +233,6 @@ SOURCES = { 'os:android': ['platform-linux.cc', 'platform-posix.cc'], 'os:macos': ['platform-macos.cc', 'platform-posix.cc'], 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], - 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'], 'os:nullos': ['platform-nullos.cc'], 'os:win32': ['platform-win32.cc'], 'mode:release': [], @@ -266,9 +264,6 @@ D8_FILES = { 'os:solaris': [ 'd8-posix.cc' ], - 'os:cygwin': [ - 'd8-posix.cc' - ], 'os:win32': [ 'd8-windows.cc' ], diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 2b205d5d74..f6d1daf67a 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -447,6 +447,7 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { JSFunction* function = FindInPrototypeChain(object, &found_it); if (!found_it) return Heap::undefined_value(); if (!function->has_prototype()) { + if (!function->should_have_prototype()) return Heap::undefined_value(); Object* prototype; { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function); if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index c16244038b..d718c8875b 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -115,7 +115,9 @@ static FatalErrorCallback exception_behavior = NULL; static void DefaultFatalErrorHandler(const char* location, const char* message) { - ENTER_V8; +#ifdef ENABLE_VMSTATE_TRACKING + i::VMState __state__(i::OTHER); +#endif API_Fatal(location, message); } @@ -668,7 +670,7 @@ static void InitializeTemplate(i::Handle that, int type) { void Template::Set(v8::Handle name, v8::Handle value, v8::PropertyAttribute attribute) { - if (IsDeadCheck("v8::Template::SetProperty()")) return; + if (IsDeadCheck("v8::Template::Set()")) return; ENTER_V8; HandleScope scope; i::Handle list(Utils::OpenHandle(this)->property_list()); @@ -2204,6 +2206,12 @@ bool Value::Equals(Handle that) const { ENTER_V8; i::Handle obj = Utils::OpenHandle(this); i::Handle other = Utils::OpenHandle(*that); + // If both obj and other are JSObjects, we'd better compare by identity + // immediately when going into JS builtin. The reason is Invoke + // would overwrite global object receiver with global proxy. + if (obj->IsJSObject() && other->IsJSObject()) { + return *obj == *other; + } i::Object** args[1] = { other.location() }; EXCEPTION_PREAMBLE(); i::Handle result = @@ -2653,26 +2661,38 @@ int v8::Object::GetIdentityHash() { ENTER_V8; HandleScope scope; i::Handle self = Utils::OpenHandle(this); - i::Handle hidden_props(i::GetHiddenProperties(self, true)); - i::Handle hash_symbol = i::Factory::identity_hash_symbol(); - i::Handle hash = i::GetProperty(hidden_props, hash_symbol); - int hash_value; - if (hash->IsSmi()) { - hash_value = i::Smi::cast(*hash)->value(); - } else { - int attempts = 0; - do { - // Generate a random 32-bit hash value but limit range to fit - // within a smi. - hash_value = i::V8::Random() & i::Smi::kMaxValue; - attempts++; - } while (hash_value == 0 && attempts < 30); - hash_value = hash_value != 0 ? hash_value : 1; // never return 0 - i::SetProperty(hidden_props, - hash_symbol, - i::Handle(i::Smi::FromInt(hash_value)), - static_cast(None)); + i::Handle hidden_props_obj(i::GetHiddenProperties(self, true)); + if (!hidden_props_obj->IsJSObject()) { + // We failed to create hidden properties. That's a detached + // global proxy. + ASSERT(hidden_props_obj->IsUndefined()); + return 0; } + i::Handle hidden_props = + i::Handle::cast(hidden_props_obj); + i::Handle hash_symbol = i::Factory::identity_hash_symbol(); + if (hidden_props->HasLocalProperty(*hash_symbol)) { + i::Handle hash = i::GetProperty(hidden_props, hash_symbol); + CHECK(!hash.is_null()); + CHECK(hash->IsSmi()); + return i::Smi::cast(*hash)->value(); + } + + int hash_value; + int attempts = 0; + do { + // Generate a random 32-bit hash value but limit range to fit + // within a smi. + hash_value = i::V8::Random() & i::Smi::kMaxValue; + attempts++; + } while (hash_value == 0 && attempts < 30); + hash_value = hash_value != 0 ? hash_value : 1; // never return 0 + CHECK(!i::SetLocalPropertyIgnoreAttributes( + hidden_props, + hash_symbol, + i::Handle(i::Smi::FromInt(hash_value)), + static_cast(None)).is_null()); + return hash_value; } @@ -2749,9 +2769,9 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) { return; } i::Handle pixels = i::Factory::NewPixelArray(length, data); - i::Handle slow_map = - i::Factory::GetSlowElementsMap(i::Handle(self->map())); - self->set_map(*slow_map); + i::Handle pixel_array_map = + i::Factory::GetPixelArrayElementsMap(i::Handle(self->map())); + self->set_map(*pixel_array_map); self->set_elements(*pixels); } diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index d51c9e4cb1..5cf8deaa59 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -78,7 +78,7 @@ class Arguments BASE_EMBEDDED { class CustomArguments : public Relocatable { public: inline CustomArguments(Object* data, - JSObject* self, + Object* self, JSObject* holder) { values_[2] = self; values_[1] = holder; diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 68d32f1ebf..3b811021b3 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -198,6 +198,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { visitor->VisitPointer(target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); + } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { + visitor->VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(target_reference_address()); #ifdef ENABLE_DEBUGGER_SUPPORT @@ -221,6 +223,8 @@ void RelocInfo::Visit() { StaticVisitor::VisitPointer(target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(this); + } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { + StaticVisitor::VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(target_reference_address()); #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 243ba4978a..fb9bb488c9 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -272,7 +272,6 @@ static byte* spare_buffer_ = NULL; Assembler::Assembler(void* buffer, int buffer_size) : positions_recorder_(this), allow_peephole_optimization_(false) { - // BUG(3245989): disable peephole optimization if crankshaft is enabled. allow_peephole_optimization_ = FLAG_peephole_optimization; if (buffer == NULL) { // Do our own buffer management. @@ -352,6 +351,11 @@ void Assembler::CodeTargetAlign() { } +Condition Assembler::GetCondition(Instr instr) { + return Instruction::ConditionField(instr); +} + + bool Assembler::IsBranch(Instr instr) { return (instr & (B27 | B25)) == (B27 | B25); } @@ -428,6 +432,20 @@ Register Assembler::GetRd(Instr instr) { } +Register Assembler::GetRn(Instr instr) { + Register reg; + reg.code_ = Instruction::RnValue(instr); + return reg; +} + + +Register Assembler::GetRm(Instr instr) { + Register reg; + reg.code_ = Instruction::RmValue(instr); + return reg; +} + + bool Assembler::IsPush(Instr instr) { return ((instr & ~kRdMask) == kPushRegPattern); } @@ -465,6 +483,35 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) { } +bool Assembler::IsTstImmediate(Instr instr) { + return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == + (I | TST | S); +} + + +bool Assembler::IsCmpRegister(Instr instr) { + return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) == + (CMP | S); +} + + +bool Assembler::IsCmpImmediate(Instr instr) { + return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == + (I | CMP | S); +} + + +Register Assembler::GetCmpImmediateRegister(Instr instr) { + ASSERT(IsCmpImmediate(instr)); + return GetRn(instr); +} + + +int Assembler::GetCmpImmediateRawImmediate(Instr instr) { + ASSERT(IsCmpImmediate(instr)); + return instr & kOff12Mask; +} + // Labels refer to positions in the (to be) generated code. // There are bound, linked, and unused labels. // @@ -1052,6 +1099,13 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { } +void Assembler::cmp_raw_immediate( + Register src, int raw_immediate, Condition cond) { + ASSERT(is_uint12(raw_immediate)); + emit(cond | I | CMP | S | src.code() << 16 | raw_immediate); +} + + void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { addrmod1(cond | CMN | S, src1, r0, src2); } @@ -2363,7 +2417,7 @@ void Assembler::nop(int type) { bool Assembler::IsNop(Instr instr, int type) { - // Check for mov rx, rx. + // Check for mov rx, rx where x = type. ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. return instr == (al | 13*B21 | type*B12 | type); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index fc826c727e..3941c84b34 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -729,6 +729,7 @@ class Assembler : public Malloced { void cmp(Register src1, Register src2, Condition cond = al) { cmp(src1, Operand(src2), cond); } + void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al); void cmn(Register src1, const Operand& src2, Condition cond = al); @@ -1099,6 +1100,7 @@ class Assembler : public Malloced { static void instr_at_put(byte* pc, Instr instr) { *reinterpret_cast(pc) = instr; } + static Condition GetCondition(Instr instr); static bool IsBranch(Instr instr); static int GetBranchOffset(Instr instr); static bool IsLdrRegisterImmediate(Instr instr); @@ -1109,6 +1111,8 @@ class Assembler : public Malloced { static bool IsAddRegisterImmediate(Instr instr); static Instr SetAddRegisterImmediateOffset(Instr instr, int offset); static Register GetRd(Instr instr); + static Register GetRn(Instr instr); + static Register GetRm(Instr instr); static bool IsPush(Instr instr); static bool IsPop(Instr instr); static bool IsStrRegFpOffset(Instr instr); @@ -1116,6 +1120,11 @@ class Assembler : public Malloced { static bool IsStrRegFpNegOffset(Instr instr); static bool IsLdrRegFpNegOffset(Instr instr); static bool IsLdrPcImmediateOffset(Instr instr); + static bool IsTstImmediate(Instr instr); + static bool IsCmpRegister(Instr instr); + static bool IsCmpImmediate(Instr instr); + static Register GetCmpImmediateRegister(Instr instr); + static int GetCmpImmediateRawImmediate(Instr instr); static bool IsNop(Instr instr, int type = NON_MARKING_NOP); // Check if is time to emit a constant pool for pending reloc info entries diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index dbb8242c55..6e8fe28a2b 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1156,12 +1156,48 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - __ stop("builtins-arm.cc: NotifyOSR"); + // For now, we are relying on the fact that Runtime::NotifyOSR + // doesn't do any garbage collection which allows us to save/restore + // the registers without worrying about which of them contain + // pointers. This seems a bit fragile. + __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); + __ EnterInternalFrame(); + __ CallRuntime(Runtime::kNotifyOSR, 0); + __ LeaveInternalFrame(); + __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); + __ Ret(); } void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - __ stop("builtins-arm.cc: OnStackReplacement"); + // Probe the CPU to set the supported features, because this builtin + // may be called before the initialization performs CPU setup. + CpuFeatures::Probe(false); + + // Lookup the function in the JavaScript frame and push it as an + // argument to the on-stack replacement function. + __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + __ LeaveInternalFrame(); + + // If the result was -1 it means that we couldn't optimize the + // function. Just return and continue in the unoptimized version. + Label skip; + __ cmp(r0, Operand(Smi::FromInt(-1))); + __ b(ne, &skip); + __ Ret(); + + __ bind(&skip); + // Untag the AST id and push it on the stack. + __ SmiUntag(r0); + __ push(r0); + + // Generate the code for doing the frame-to-frame translation using + // the deoptimizer infrastructure. + Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR); + generator.Generate(); } @@ -1195,6 +1231,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Change context eagerly in case we need the global receiver. __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Do not transform the receiver for strict mode functions. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &shift_arguments); + + // Compute the receiver in non-strict mode. __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ ldr(r2, MemOperand(r2, -kPointerSize)); // r0: actual number of arguments @@ -1358,10 +1402,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Change context eagerly to get the right global object if necessary. __ ldr(r0, MemOperand(fp, kFunctionOffset)); __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r0. + __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); // Compute the receiver. Label call_to_object, use_global_receiver, push_receiver; __ ldr(r0, MemOperand(fp, kRecvOffset)); + + // Do not transform the receiver for strict mode functions. + __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Compute the receiver in non-strict mode. __ tst(r0, Operand(kSmiTagMask)); __ b(eq, &call_to_object); __ LoadRoot(r1, Heap::kNullValueRootIndex); diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 437dfd2733..cc49f7e4e5 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -1298,7 +1298,7 @@ void CompareStub::Generate(MacroAssembler* masm) { void ToBooleanStub::Generate(MacroAssembler* masm) { Label false_result; Label not_heap_number; - Register scratch = r7; + Register scratch = r9.is(tos_) ? r7 : r9; __ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(tos_, ip); @@ -2588,6 +2588,39 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( __ eor(right, left, Operand(right)); __ Ret(); break; + case Token::SAR: + // Remove tags from right operand. + __ GetLeastBitsFromSmi(scratch1, right, 5); + __ mov(right, Operand(left, ASR, scratch1)); + // Smi tag result. + __ bic(right, right, Operand(kSmiTagMask)); + __ Ret(); + break; + case Token::SHR: + // Remove tags from operands. We can't do this on a 31 bit number + // because then the 0s get shifted into bit 30 instead of bit 31. + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSR, scratch2)); + // Unsigned shift is not allowed to produce a negative number, so + // check the sign bit and the sign bit after Smi tagging. + __ tst(scratch1, Operand(0xc0000000)); + __ b(ne, ¬_smi_result); + // Smi tag result. + __ SmiTag(right, scratch1); + __ Ret(); + break; + case Token::SHL: + // Remove tags from operands. + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSL, scratch2)); + // Check that the signed result fits in a Smi. + __ add(scratch2, scratch1, Operand(0x40000000), SetCC); + __ b(mi, ¬_smi_result); + __ SmiTag(right, scratch1); + __ Ret(); + break; default: UNREACHABLE(); } @@ -2703,7 +2736,10 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, } case Token::BIT_OR: case Token::BIT_XOR: - case Token::BIT_AND: { + case Token::BIT_AND: + case Token::SAR: + case Token::SHR: + case Token::SHL: { if (smi_operands) { __ SmiUntag(r3, left); __ SmiUntag(r2, right); @@ -2726,6 +2762,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, d0, not_numbers); } + + Label result_not_a_smi; switch (op_) { case Token::BIT_OR: __ orr(r2, r3, Operand(r2)); @@ -2736,11 +2774,35 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, case Token::BIT_AND: __ and_(r2, r3, Operand(r2)); break; + case Token::SAR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ GetLeastBitsFromInt32(r2, r2, 5); + __ mov(r2, Operand(r3, ASR, r2)); + break; + case Token::SHR: + // Use only the 5 least significant bits of the shift count. + __ GetLeastBitsFromInt32(r2, r2, 5); + __ mov(r2, Operand(r3, LSR, r2), SetCC); + // SHR is special because it is required to produce a positive answer. + // The code below for writing into heap numbers isn't capable of + // writing the register as an unsigned int so we go to slow case if we + // hit this case. + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, &result_not_a_smi); + } else { + __ b(mi, not_numbers); + } + break; + case Token::SHL: + // Use only the 5 least significant bits of the shift count. + __ GetLeastBitsFromInt32(r2, r2, 5); + __ mov(r2, Operand(r3, LSL, r2)); + break; default: UNREACHABLE(); } - Label result_not_a_smi; // Check that the *signed* result fits in a smi. __ add(r3, r2, Operand(0x40000000), SetCC); __ b(mi, &result_not_a_smi); @@ -2760,10 +2822,15 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ mov(r0, Operand(r5)); if (CpuFeatures::IsSupported(VFP3)) { - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As + // mentioned above SHR needs to always produce a positive result. CpuFeatures::Scope scope(VFP3); __ vmov(s0, r2); - __ vcvt_f64_s32(d0, s0); + if (op_ == Token::SHR) { + __ vcvt_f64_u32(d0, s0); + } else { + __ vcvt_f64_s32(d0, s0); + } __ sub(r3, r0, Operand(kHeapObjectTag)); __ vstr(d0, r3, HeapNumber::kValueOffset); __ Ret(); @@ -2790,15 +2857,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { Label not_smis; - ASSERT(op_ == Token::ADD || - op_ == Token::SUB || - op_ == Token::MUL || - op_ == Token::DIV || - op_ == Token::MOD || - op_ == Token::BIT_OR || - op_ == Token::BIT_AND || - op_ == Token::BIT_XOR); - Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2825,15 +2883,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { Label not_smis, call_runtime; - ASSERT(op_ == Token::ADD || - op_ == Token::SUB || - op_ == Token::MUL || - op_ == Token::DIV || - op_ == Token::MOD || - op_ == Token::BIT_OR || - op_ == Token::BIT_AND || - op_ == Token::BIT_XOR); - if (result_type_ == TRBinaryOpIC::UNINITIALIZED || result_type_ == TRBinaryOpIC::SMI) { // Only allow smi results. @@ -2864,15 +2913,6 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD || - op_ == Token::SUB || - op_ == Token::MUL || - op_ == Token::DIV || - op_ == Token::MOD || - op_ == Token::BIT_OR || - op_ == Token::BIT_AND || - op_ == Token::BIT_XOR); - ASSERT(operands_type_ == TRBinaryOpIC::INT32); GenerateTypeTransition(masm); @@ -2880,15 +2920,6 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD || - op_ == Token::SUB || - op_ == Token::MUL || - op_ == Token::DIV || - op_ == Token::MOD || - op_ == Token::BIT_OR || - op_ == Token::BIT_AND || - op_ == Token::BIT_XOR); - Label not_numbers, call_runtime; ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); @@ -2903,15 +2934,6 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD || - op_ == Token::SUB || - op_ == Token::MUL || - op_ == Token::DIV || - op_ == Token::MOD || - op_ == Token::BIT_OR || - op_ == Token::BIT_AND || - op_ == Token::BIT_XOR); - Label call_runtime; GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); @@ -2984,6 +3006,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { case Token::BIT_XOR: __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_JS); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_JS); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_JS); + break; default: UNREACHABLE(); } @@ -3268,105 +3299,13 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // r0 holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Restore the next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0, RelocInfo::NONE)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); + __ Throw(r0); } void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ ldr(r2, MemOperand(sp, kStateOffset)); - __ cmp(r2, Operand(StackHandler::ENTRY)); - __ b(eq, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ ldr(sp, MemOperand(sp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(r0, Operand(false, RelocInfo::NONE)); - __ mov(r2, Operand(external_caught)); - __ str(r0, MemOperand(r2)); - - // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ mov(r0, Operand(reinterpret_cast(out_of_memory))); - __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(r2)); - } - - // Stack layout at this point. See also StackHandlerConstants. - // sp -> state (ENTRY) - // fp - // lr - - // Discard handler state (r2 is not used) and restore frame pointer. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0, RelocInfo::NONE)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); + __ ThrowUncatchable(type, r0); } @@ -3453,7 +3392,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // r0:r1: result // sp: stack pointer // fp: frame pointer - __ LeaveExitFrame(save_doubles_); + // Callee-saved register r4 still holds argc. + __ LeaveExitFrame(save_doubles_, r4); + __ mov(pc, lr); // check if we should retry or throw exception Label retry; @@ -4232,24 +4173,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); static const int kRegExpExecuteArguments = 7; - __ push(lr); - __ PrepareCallCFunction(kRegExpExecuteArguments, r0); + static const int kParameterRegisters = 4; + __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); - // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. + // Stack pointer now points to cell where return address is to be written. + // Arguments are before that on the stack or in registers. + + // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 2 * kPointerSize)); + __ str(r0, MemOperand(sp, 3 * kPointerSize)); - // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. + // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. __ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ ldr(r0, MemOperand(r0, 0)); __ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ ldr(r2, MemOperand(r2, 0)); __ add(r0, r0, Operand(r2)); - __ str(r0, MemOperand(sp, 1 * kPointerSize)); + __ str(r0, MemOperand(sp, 2 * kPointerSize)); - // Argument 5 (sp[0]): static offsets vector buffer. + // Argument 5 (sp[4]): static offsets vector buffer. __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); - __ str(r0, MemOperand(sp, 0 * kPointerSize)); + __ str(r0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). @@ -4271,8 +4215,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Locate the code entry and call it. __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r7, kRegExpExecuteArguments); - __ pop(lr); + DirectCEntryStub stub; + stub.GenerateCall(masm, r7); + + __ LeaveExitFrame(false, no_reg); // r0: result // subject: subject string (callee saved) @@ -4281,6 +4227,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); __ b(eq, &success); Label failure; @@ -4293,12 +4240,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r0, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); + __ mov(r1, Operand(ExternalReference::the_hole_value_location())); __ ldr(r1, MemOperand(r1, 0)); + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r0, MemOperand(r2, 0)); __ cmp(r0, r1); __ b(eq, &runtime); + + __ str(r1, MemOperand(r2, 0)); // Clear pending exception. + + // Check if the exception is a termination. If so, throw as uncatchable. + __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex); + __ cmp(r0, ip); + Label termination_exception; + __ b(eq, &termination_exception); + + __ Throw(r0); // Expects thrown value in r0. + + __ bind(&termination_exception); + __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. + __ bind(&failure); // For failure and exception return null. __ mov(r0, Operand(Factory::null_value())); @@ -5809,10 +5770,9 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { // For equality we do not care about the sign of the result. __ sub(r0, r0, r1, SetCC); } else { - __ sub(r1, r1, r0, SetCC); - // Correct sign of result in case of overflow. - __ rsb(r1, r1, Operand(0), SetCC, vs); - __ mov(r0, r1); + // Untag before subtracting to avoid handling overflow. + __ SmiUntag(r1); + __ sub(r0, r1, SmiUntagOperand(r0)); } __ Ret(); @@ -5923,14 +5883,24 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, ApiFunction *function) { __ mov(lr, Operand(reinterpret_cast(GetCode().location()), RelocInfo::CODE_TARGET)); - // Push return address (accessible to GC through exit frame pc). __ mov(r2, Operand(ExternalReference(function, ExternalReference::DIRECT_CALL))); + // Push return address (accessible to GC through exit frame pc). __ str(pc, MemOperand(sp, 0)); __ Jump(r2); // Call the api function. } +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + Register target) { + __ mov(lr, Operand(reinterpret_cast(GetCode().location()), + RelocInfo::CODE_TARGET)); + // Push return address (accessible to GC through exit frame pc). + __ str(pc, MemOperand(sp, 0)); + __ Jump(target); // Call the C++ function. +} + + void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -5998,6 +5968,91 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, } +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged unless the + // store succeeds. + // key - holds the key (must be a smi) and is unchanged. + // value - holds the value (must be a smi) and is unchanged. + // elements - holds the element object of the receiver on entry if + // load_elements_from_receiver is false, otherwise used + // internally to store the pixel arrays elements and + // external array pointer. + // elements_map - holds the map of the element object if + // load_elements_map_from_elements is false, otherwise + // loaded with the element map. + // + Register external_pointer = elements; + Register untagged_key = scratch1; + Register untagged_value = scratch2; + + if (load_elements_from_receiver) { + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + } + + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + if (load_elements_map_from_elements) { + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + } + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ b(ne, not_pixel_array); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ Assert(eq, "Elements isn't a pixel array"); + } + } + + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } + } + + __ SmiUntag(untagged_key, key); + + // Perform bounds check. + __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(untagged_key, scratch2); + __ b(hs, out_of_range); // unsigned check handles negative keys. + + __ JumpIfNotSmi(value, value_not_smi); + __ SmiUntag(untagged_value, value); + + // Clamp the value to [0..255]. + __ Usat(untagged_value, 8, Operand(untagged_value)); + // Get the pointer to the external array. This clobbers elements. + __ ldr(external_pointer, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); + __ Ret(); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index bf7d635487..baaa2f2bda 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -581,6 +581,7 @@ class DirectCEntryStub: public CodeStub { DirectCEntryStub() {} void Generate(MacroAssembler* masm); void GenerateCall(MacroAssembler* masm, ApiFunction *function); + void GenerateCall(MacroAssembler* masm, Register target); private: Major MajorKey() { return DirectCEntry; } @@ -589,14 +590,14 @@ class DirectCEntryStub: public CodeStub { }; -// Generate code the to load an element from a pixel array. The receiver is -// assumed to not be a smi and to have elements, the caller must guarantee this -// precondition. If the receiver does not have elements that are pixel arrays, -// the generated code jumps to not_pixel_array. If key is not a smi, then the -// generated code branches to key_not_smi. Callers can specify NULL for -// key_not_smi to signal that a smi check has already been performed on key so -// that the smi check is not generated . If key is not a valid index within the -// bounds of the pixel array, the generated code jumps to out_of_range. +// Generate code to load an element from a pixel array. The receiver is assumed +// to not be a smi and to have elements, the caller must guarantee this +// precondition. If key is not a smi, then the generated code branches to +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi +// check has already been performed on key so that the smi check is not +// generated. If key is not a valid index within the bounds of the pixel array, +// the generated code jumps to out_of_range. receiver, key and elements are +// unchanged throughout the generated code sequence. void GenerateFastPixelArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -609,6 +610,35 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm, Label* key_not_smi, Label* out_of_range); +// Generate code to store an element into a pixel array, clamping values between +// [0..255]. The receiver is assumed to not be a smi and to have elements, the +// caller must guarantee this precondition. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated. If value is not a smi, the generated +// code will branch to value_not_smi. If the receiver doesn't have pixel array +// elements, the generated code will branch to not_pixel_array, unless +// not_pixel_array is NULL, in which case the caller must ensure that the +// receiver has pixel array elements. If key is not a valid index within the +// bounds of the pixel array, the generated code jumps to out_of_range. If +// load_elements_from_receiver is true, then the elements of receiver is loaded +// into elements, otherwise elements is assumed to already be the receiver's +// elements. If load_elements_map_from_elements is true, elements_map is loaded +// from elements, otherwise it is assumed to already contain the element map. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range); } } // namespace v8::internal diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 12842230bf..a3921d8efc 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -2192,15 +2192,10 @@ void CodeGenerator::GenerateReturnSequence() { DeleteFrame(); #ifdef DEBUG - // Check that the size of the code used for returning matches what is - // expected by the debugger. If the sp_delts above cannot be encoded in - // the add instruction the add will generate two instructions. - int return_sequence_length = - masm_->InstructionsGeneratedSince(&check_exit_codesize); - CHECK(return_sequence_length == - Assembler::kJSReturnSequenceInstructions || - return_sequence_length == - Assembler::kJSReturnSequenceInstructions + 1); + // Check that the size of the code used for returning is large enough + // for the debugger's requirements. + ASSERT(Assembler::kJSReturnSequenceInstructions <= + masm_->InstructionsGeneratedSince(&check_exit_codesize)); #endif } } @@ -5849,15 +5844,20 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { if (property != NULL) { Load(property->obj()); Load(property->key()); - frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); frame_->EmitPush(r0); } else if (variable != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // so this code can only be reached in non-strict mode. + ASSERT(strict_mode_flag() == kNonStrictMode); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); frame_->EmitPush(Operand(variable->name())); - frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); + frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); frame_->EmitPush(r0); } else if (slot != NULL && slot->type() == Slot::LOOKUP) { @@ -6931,7 +6931,7 @@ void CodeGenerator::EmitNamedStore(Handle name, bool is_contextual) { Result result; if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { - frame()->CallStoreIC(name, is_contextual); + frame()->CallStoreIC(name, is_contextual, strict_mode_flag()); } else { // Inline the in-object property case. JumpTarget slow, done; diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 5671feecba..7ac38ed3ea 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -582,6 +582,7 @@ class Instruction { inline int TypeValue() const { return Bits(27, 25); } inline int RnValue() const { return Bits(19, 16); } + DECLARE_STATIC_ACCESSOR(RnValue); inline int RdValue() const { return Bits(15, 12); } DECLARE_STATIC_ACCESSOR(RdValue); @@ -625,6 +626,7 @@ class Instruction { inline int SValue() const { return Bit(20); } // with register inline int RmValue() const { return Bits(3, 0); } + DECLARE_STATIC_ACCESSOR(RmValue); inline int ShiftValue() const { return static_cast(Bits(6, 5)); } inline ShiftOp ShiftField() const { return static_cast(BitField(6, 5)); diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index e05001f3c3..9af7a8d190 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -124,19 +124,204 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + const int kInstrSize = Assembler::kInstrSize; + // The call of the stack guard check has the following form: + // e1 5d 00 0c cmp sp, + // 2a 00 00 01 bcs ok + // e5 9f c? ?? ldr ip, [pc, ] + // e1 2f ff 3c blx ip + ASSERT(Memory::int32_at(pc_after - kInstrSize) == + (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code())); + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + + // We patch the code to the following form: + // e1 5d 00 0c cmp sp, + // e1 a0 00 00 mov r0, r0 (NOP) + // e5 9f c? ?? ldr ip, [pc, ] + // e1 2f ff 3c blx ip + // and overwrite the constant containing the + // address of the stack check stub. + + // Replace conditional jump with NOP. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->nop(); + + // Replace the stack check address in the constant pool + // with the entry address of the replacement code. + uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address stack_check_address_pointer = pc_after + stack_check_address_offset; + ASSERT(Memory::uint32_at(stack_check_address_pointer) == + reinterpret_cast(check_code->entry())); + Memory::uint32_at(stack_check_address_pointer) = + reinterpret_cast(replacement_code->entry()); } void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { - UNIMPLEMENTED(); + const int kInstrSize = Assembler::kInstrSize; + ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c); + ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5); + ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f); + + // Replace NOP with conditional jump. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->b(+4, cs); + + // Replace the stack check address in the constant pool + // with the entry address of the replacement code. + uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address stack_check_address_pointer = pc_after + stack_check_address_offset; + ASSERT(Memory::uint32_at(stack_check_address_pointer) == + reinterpret_cast(replacement_code->entry())); + Memory::uint32_at(stack_check_address_pointer) = + reinterpret_cast(check_code->entry()); +} + + +static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) { + ByteArray* translations = data->TranslationByteArray(); + int length = data->DeoptCount(); + for (int i = 0; i < length; i++) { + if (static_cast(data->AstId(i)->value()) == ast_id) { + TranslationIterator it(translations, data->TranslationIndex(i)->value()); + int value = it.Next(); + ASSERT(Translation::BEGIN == static_cast(value)); + // Read the number of frames. + value = it.Next(); + if (value == 1) return i; + } + } + UNREACHABLE(); + return -1; } void Deoptimizer::DoComputeOsrOutputFrame() { - UNIMPLEMENTED(); + DeoptimizationInputData* data = DeoptimizationInputData::cast( + optimized_code_->deoptimization_data()); + unsigned ast_id = data->OsrAstId()->value(); + + int bailout_id = LookupBailoutId(data, ast_id); + unsigned translation_index = data->TranslationIndex(bailout_id)->value(); + ByteArray* translations = data->TranslationByteArray(); + + TranslationIterator iterator(translations, translation_index); + Translation::Opcode opcode = + static_cast(iterator.Next()); + ASSERT(Translation::BEGIN == opcode); + USE(opcode); + int count = iterator.Next(); + ASSERT(count == 1); + USE(count); + + opcode = static_cast(iterator.Next()); + USE(opcode); + ASSERT(Translation::FRAME == opcode); + unsigned node_id = iterator.Next(); + USE(node_id); + ASSERT(node_id == ast_id); + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next())); + USE(function); + ASSERT(function == function_); + unsigned height = iterator.Next(); + unsigned height_in_bytes = height * kPointerSize; + USE(height_in_bytes); + + unsigned fixed_size = ComputeFixedSize(function_); + unsigned input_frame_size = input_->GetFrameSize(); + ASSERT(fixed_size + height_in_bytes == input_frame_size); + + unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); + unsigned outgoing_size = outgoing_height * kPointerSize; + unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; + ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call. + + if (FLAG_trace_osr) { + PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ", + reinterpret_cast(function_)); + function_->PrintName(); + PrintF(" => node=%u, frame=%d->%d]\n", + ast_id, + input_frame_size, + output_frame_size); + } + + // There's only one output frame in the OSR case. + output_count_ = 1; + output_ = new FrameDescription*[1]; + output_[0] = new(output_frame_size) FrameDescription( + output_frame_size, function_); + + // Clear the incoming parameters in the optimized frame to avoid + // confusing the garbage collector. + unsigned output_offset = output_frame_size - kPointerSize; + int parameter_count = function_->shared()->formal_parameter_count() + 1; + for (int i = 0; i < parameter_count; ++i) { + output_[0]->SetFrameSlot(output_offset, 0); + output_offset -= kPointerSize; + } + + // Translate the incoming parameters. This may overwrite some of the + // incoming argument slots we've just cleared. + int input_offset = input_frame_size - kPointerSize; + bool ok = true; + int limit = input_offset - (parameter_count * kPointerSize); + while (ok && input_offset > limit) { + ok = DoOsrTranslateCommand(&iterator, &input_offset); + } + + // There are no translation commands for the caller's pc and fp, the + // context, and the function. Set them up explicitly. + for (int i = 0; ok && i < 4; i++) { + uint32_t input_value = input_->GetFrameSlot(input_offset); + if (FLAG_trace_osr) { + PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n", + output_offset, + input_value, + input_offset); + } + output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset)); + input_offset -= kPointerSize; + output_offset -= kPointerSize; + } + + // Translate the rest of the frame. + while (ok && input_offset >= 0) { + ok = DoOsrTranslateCommand(&iterator, &input_offset); + } + + // If translation of any command failed, continue using the input frame. + if (!ok) { + delete output_[0]; + output_[0] = input_; + output_[0]->SetPc(reinterpret_cast(from_)); + } else { + // Setup the frame pointer and the context pointer. + output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code())); + output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code())); + + unsigned pc_offset = data->OsrPcOffset()->value(); + uint32_t pc = reinterpret_cast( + optimized_code_->entry() + pc_offset); + output_[0]->SetPc(pc); + } + Code* continuation = Builtins::builtin(Builtins::NotifyOSR); + output_[0]->SetContinuation( + reinterpret_cast(continuation->entry())); + + if (FLAG_trace_osr) { + PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ", + ok ? "finished" : "aborted", + reinterpret_cast(function)); + function->PrintName(); + PrintF(" => pc=0x%0x]\n", output_[0]->GetPc()); + } } @@ -318,7 +503,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // easily ported. void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - // TOS: bailout-id; TOS+1: return address if not EAGER. CpuFeatures::Scope scope(VFP3); // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -353,6 +537,10 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(r3, Operand(0)); // Correct one word for bailout id. __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); + } else if (type() == OSR) { + __ mov(r3, lr); + // Correct one word for bailout id. + __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); } else { __ mov(r3, lr); // Correct two words for bailout id and return address. @@ -375,7 +563,6 @@ void Deoptimizer::EntryGenerator::Generate() { // frame descriptor pointer to r1 (deoptimizer->input_); __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); - // Copy core registers into FrameDescription::registers_[kNumRegisters]. ASSERT(Register::kNumRegisters == kNumberOfRegisters); for (int i = 0; i < kNumberOfRegisters; i++) { @@ -396,7 +583,7 @@ void Deoptimizer::EntryGenerator::Generate() { // Remove the bailout id, eventually return address, and the saved registers // from the stack. - if (type() == EAGER) { + if (type() == EAGER || type() == OSR) { __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); } else { __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); @@ -450,11 +637,6 @@ void Deoptimizer::EntryGenerator::Generate() { __ cmp(r0, r1); __ b(lt, &outer_push_loop); - // In case of OSR, we have to restore the XMM registers. - if (type() == OSR) { - UNIMPLEMENTED(); - } - // Push state, pc, and continuation from the last output frame. if (type() != OSR) { __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index ff446c5e4b..f04a00e052 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -45,6 +45,67 @@ namespace internal { #define __ ACCESS_MASM(masm_) + +// A patch site is a location in the code which it is possible to patch. This +// class has a number of methods to emit the code which is patchable and the +// method EmitPatchInfo to record a marker back to the patchable code. This +// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit +// immediate value is used) is the delta from the pc to the first instruction of +// the patchable code. +class JumpPatchSite BASE_EMBEDDED { + public: + explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) { +#ifdef DEBUG + info_emitted_ = false; +#endif + } + + ~JumpPatchSite() { + ASSERT(patch_site_.is_bound() == info_emitted_); + } + + // When initially emitting this ensure that a jump is always generated to skip + // the inlined smi code. + void EmitJumpIfNotSmi(Register reg, Label* target) { + ASSERT(!patch_site_.is_bound() && !info_emitted_); + __ bind(&patch_site_); + __ cmp(reg, Operand(reg)); + // Don't use b(al, ...) as that might emit the constant pool right after the + // branch. After patching when the branch is no longer unconditional + // execution can continue into the constant pool. + __ b(eq, target); // Always taken before patched. + } + + // When initially emitting this ensure that a jump is never generated to skip + // the inlined smi code. + void EmitJumpIfSmi(Register reg, Label* target) { + ASSERT(!patch_site_.is_bound() && !info_emitted_); + __ bind(&patch_site_); + __ cmp(reg, Operand(reg)); + __ b(ne, target); // Never taken before patched. + } + + void EmitPatchInfo() { + int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); + Register reg; + reg.set_code(delta_to_patch_site / kOff12Mask); + __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask); +#ifdef DEBUG + info_emitted_ = true; +#endif + } + + bool is_bound() const { return patch_site_.is_bound(); } + + private: + MacroAssembler* masm_; + Label patch_site_; +#ifdef DEBUG + bool info_emitted_; +#endif +}; + + // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right. The actual // argument count matches the formal parameter count expected by the @@ -268,15 +329,10 @@ void FullCodeGenerator::EmitReturnSequence() { } #ifdef DEBUG - // Check that the size of the code used for returning matches what is - // expected by the debugger. If the sp_delts above cannot be encoded in the - // add instruction the add will generate two instructions. - int return_sequence_length = - masm_->InstructionsGeneratedSince(&check_exit_codesize); - CHECK(return_sequence_length == - Assembler::kJSReturnSequenceInstructions || - return_sequence_length == - Assembler::kJSReturnSequenceInstructions + 1); + // Check that the size of the code used for returning is large enough + // for the debugger's requirements. + ASSERT(Assembler::kJSReturnSequenceInstructions <= + masm_->InstructionsGeneratedSince(&check_exit_codesize)); #endif } } @@ -285,7 +341,17 @@ void FullCodeGenerator::EmitReturnSequence() { FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( Token::Value op, Expression* left, Expression* right) { ASSERT(ShouldInlineSmiCase(op)); - return kNoConstants; + if (op == Token::DIV || op == Token::MOD || op == Token::MUL) { + // We never generate inlined constant smi operations for these. + return kNoConstants; + } else if (right->IsSmiLiteral()) { + return kRightConstant; + } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) { + // Don't inline shifts with constant left hand side. + return kLeftConstant; + } else { + return kNoConstants; + } } @@ -681,18 +747,24 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, } else if (prop != NULL) { if (function != NULL || mode == Variable::CONST) { // We are declaring a function or constant that rewrites to a - // property. Use (keyed) IC to set the initial value. - VisitForStackValue(prop->obj()); + // property. Use (keyed) IC to set the initial value. We + // cannot visit the rewrite because it's shared and we risk + // recording duplicate AST IDs for bailouts from optimized code. + ASSERT(prop->obj()->AsVariableProxy() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } if (function != NULL) { - VisitForStackValue(prop->key()); + __ push(r0); VisitForAccumulatorValue(function); - __ pop(r1); // Key. + __ pop(r2); } else { - VisitForAccumulatorValue(prop->key()); - __ mov(r1, result_register()); // Key. - __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex); + __ mov(r2, r0); + __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); } - __ pop(r2); // Receiver. + ASSERT(prop->key()->AsLiteral() != NULL && + prop->key()->AsLiteral()->handle()->IsSmi()); + __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); @@ -752,24 +824,24 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Perform the comparison as if via '==='. __ ldr(r1, MemOperand(sp, 0)); // Switch value. bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); + JumpPatchSite patch_site(masm_); if (inline_smi_code) { Label slow_case; __ orr(r2, r1, r0); - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, &slow_case); + patch_site.EmitJumpIfNotSmi(r2, &slow_case); + __ cmp(r1, r0); __ b(ne, &next_test); __ Drop(1); // Switch value is no longer needed. __ b(clause->body_target()->entry_label()); - __ bind(&slow_case); + __ bind(&slow_case); } - CompareFlags flags = inline_smi_code - ? NO_SMI_COMPARE_IN_STUB - : NO_COMPARE_FLAGS; - CompareStub stub(eq, true, flags, r1, r0); - __ CallStub(&stub); - __ cmp(r0, Operand(0, RelocInfo::NONE)); + // Record position before stub call for type feedback. + SetSourcePosition(clause->position()); + Handle ic = CompareIC::GetUninitialized(Token::EQ_STRICT); + EmitCallIC(ic, &patch_site); + __ cmp(r0, Operand(0)); __ b(ne, &next_test); __ Drop(1); // Switch value is no longer needed. __ b(clause->body_target()->entry_label()); @@ -1536,34 +1608,316 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { } +void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value) { + Label call_stub, done; + // Optimistically add smi value with unknown object. If result overflows or is + // not a smi then we had either a smi overflow or added a smi with a tagged + // pointer. + __ mov(r1, Operand(value)); + __ add(r2, r0, r1, SetCC); + __ b(vs, &call_stub); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfNotSmi(r2, &call_stub); + __ mov(r0, r2); + __ b(&done); + + // Call the shared stub. + __ bind(&call_stub); + if (!left_is_constant_smi) { + __ Swap(r0, r1, r2); + } + TypeRecordingBinaryOpStub stub(Token::ADD, mode); + EmitCallIC(stub.GetCode(), &patch_site); + + __ bind(&done); + context()->Plug(r0); +} + + +void FullCodeGenerator::EmitConstantSmiSub(Expression* expr, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value) { + Label call_stub, done; + // Optimistically subtract smi value and unknown object. If result overflows + // or is not a smi then we had either a smi overflow or subtraction between a + // smi and a tagged pointer. + __ mov(r1, Operand(value)); + if (left_is_constant_smi) { + __ sub(r2, r1, r0, SetCC); + } else { + __ sub(r2, r0, r1, SetCC); + } + __ b(vs, &call_stub); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfNotSmi(r2, &call_stub); + __ mov(r0, r2); + __ b(&done); + + // Call the shared stub. + __ bind(&call_stub); + if (!left_is_constant_smi) { + __ Swap(r0, r1, r2); + } + TypeRecordingBinaryOpStub stub(Token::SUB, mode); + EmitCallIC(stub.GetCode(), &patch_site); + + __ bind(&done); + context()->Plug(r0); +} + + +void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr, + Token::Value op, + OverwriteMode mode, + Smi* value) { + Label call_stub, smi_case, done; + int shift_value = value->value() & 0x1f; + + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(r0, &smi_case); + + // Call stub. + __ bind(&call_stub); + __ mov(r1, r0); + __ mov(r0, Operand(value)); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), &patch_site); + __ b(&done); + + // Smi case. + __ bind(&smi_case); + switch (op) { + case Token::SHL: + if (shift_value != 0) { + __ mov(r1, r0); + if (shift_value > 1) { + __ mov(r1, Operand(r1, LSL, shift_value - 1)); + } + // Convert int result to smi, checking that it is in int range. + __ SmiTag(r1, SetCC); + __ b(vs, &call_stub); + __ mov(r0, r1); // Put result back into r0. + } + break; + case Token::SAR: + if (shift_value != 0) { + __ mov(r0, Operand(r0, ASR, shift_value)); + __ bic(r0, r0, Operand(kSmiTagMask)); + } + break; + case Token::SHR: + // SHR must return a positive value. When shifting by 0 or 1 we need to + // check that smi tagging the result will not create a negative value. + if (shift_value < 2) { + __ mov(r2, Operand(shift_value)); + __ SmiUntag(r1, r0); + if (shift_value != 0) { + __ mov(r1, Operand(r1, LSR, shift_value)); + } + __ tst(r1, Operand(0xc0000000)); + __ b(ne, &call_stub); + __ SmiTag(r0, r1); // result in r0. + } else { + __ SmiUntag(r0); + __ mov(r0, Operand(r0, LSR, shift_value)); + __ SmiTag(r0); + } + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(r0); +} + + +void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr, + Token::Value op, + OverwriteMode mode, + Smi* value) { + Label smi_case, done; + + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(r0, &smi_case); + + // The order of the arguments does not matter for bit-ops with a + // constant operand. + __ mov(r1, Operand(value)); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), &patch_site); + __ jmp(&done); + + // Smi case. + __ bind(&smi_case); + __ mov(r1, Operand(value)); + switch (op) { + case Token::BIT_OR: + __ orr(r0, r0, Operand(r1)); + break; + case Token::BIT_XOR: + __ eor(r0, r0, Operand(r1)); + break; + case Token::BIT_AND: + __ and_(r0, r0, Operand(r1)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(r0); +} + + +void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr, + Token::Value op, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value) { + switch (op) { + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + EmitConstantSmiBitOp(expr, op, mode, value); + break; + case Token::SHL: + case Token::SAR: + case Token::SHR: + ASSERT(!left_is_constant_smi); + EmitConstantSmiShiftOp(expr, op, mode, value); + break; + case Token::ADD: + EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value); + break; + case Token::SUB: + EmitConstantSmiSub(expr, mode, left_is_constant_smi, value); + break; + default: + UNREACHABLE(); + } +} + + void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, Token::Value op, OverwriteMode mode, - Expression* left, - Expression* right, + Expression* left_expr, + Expression* right_expr, ConstantOperand constant) { - ASSERT(constant == kNoConstants); // Only handled case. - EmitBinaryOp(op, mode); + if (constant == kRightConstant) { + Smi* value = Smi::cast(*right_expr->AsLiteral()->handle()); + EmitConstantSmiBinaryOp(expr, op, mode, false, value); + return; + } else if (constant == kLeftConstant) { + Smi* value = Smi::cast(*left_expr->AsLiteral()->handle()); + EmitConstantSmiBinaryOp(expr, op, mode, true, value); + return; + } + + Label done, smi_case, stub_call; + + Register scratch1 = r2; + Register scratch2 = r3; + + // Get the arguments. + Register left = r1; + Register right = r0; + __ pop(left); + + // Perform combined smi check on both operands. + __ orr(scratch1, left, Operand(right)); + STATIC_ASSERT(kSmiTag == 0); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(scratch1, &smi_case); + + __ bind(&stub_call); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), &patch_site); + __ jmp(&done); + + __ bind(&smi_case); + // Smi case. This code works the same way as the smi-smi case in the type + // recording binary operation stub, see + // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments. + switch (op) { + case Token::SAR: + __ b(&stub_call); + __ GetLeastBitsFromSmi(scratch1, right, 5); + __ mov(right, Operand(left, ASR, scratch1)); + __ bic(right, right, Operand(kSmiTagMask)); + break; + case Token::SHL: { + __ b(&stub_call); + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSL, scratch2)); + __ add(scratch2, scratch1, Operand(0x40000000), SetCC); + __ b(mi, &stub_call); + __ SmiTag(right, scratch1); + break; + } + case Token::SHR: { + __ b(&stub_call); + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ mov(scratch1, Operand(scratch1, LSR, scratch2)); + __ tst(scratch1, Operand(0xc0000000)); + __ b(ne, &stub_call); + __ SmiTag(right, scratch1); + break; + } + case Token::ADD: + __ add(scratch1, left, Operand(right), SetCC); + __ b(vs, &stub_call); + __ mov(right, scratch1); + break; + case Token::SUB: + __ sub(scratch1, left, Operand(right), SetCC); + __ b(vs, &stub_call); + __ mov(right, scratch1); + break; + case Token::MUL: { + __ SmiUntag(ip, right); + __ smull(scratch1, scratch2, left, ip); + __ mov(ip, Operand(scratch1, ASR, 31)); + __ cmp(ip, Operand(scratch2)); + __ b(ne, &stub_call); + __ tst(scratch1, Operand(scratch1)); + __ mov(right, Operand(scratch1), LeaveCC, ne); + __ b(ne, &done); + __ add(scratch2, right, Operand(left), SetCC); + __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ b(mi, &stub_call); + break; + } + case Token::BIT_OR: + __ orr(right, left, Operand(right)); + break; + case Token::BIT_AND: + __ and_(right, left, Operand(right)); + break; + case Token::BIT_XOR: + __ eor(right, left, Operand(right)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + context()->Plug(r0); } void FullCodeGenerator::EmitBinaryOp(Token::Value op, OverwriteMode mode) { __ pop(r1); - if (op == Token::ADD || - op == Token::SUB || - op == Token::MUL || - op == Token::DIV || - op == Token::MOD || - op == Token::BIT_OR || - op == Token::BIT_AND || - op == Token::BIT_XOR) { - TypeRecordingBinaryOpStub stub(op, mode); - __ CallStub(&stub); - } else { - GenericBinaryOpStub stub(op, mode, r1, r0); - __ CallStub(&stub); - } + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), NULL); context()->Plug(r0); } @@ -1606,10 +1960,20 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { } case KEYED_PROPERTY: { __ push(r0); // Preserve value. - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ mov(r1, r0); - __ pop(r2); + if (prop->is_synthetic()) { + ASSERT(prop->obj()->AsVariableProxy() != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } + __ mov(r2, r0); + __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ mov(r1, r0); + __ pop(r2); + } __ pop(r0); // Restore value. Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET); @@ -1635,8 +1999,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // r2, and the global object in r1. __ mov(r2, Operand(var->name())); __ ldr(r1, GlobalObjectOperand()); - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); - EmitCallIC(ic, RelocInfo::CODE_TARGET); + Handle ic(Builtins::builtin(is_strict() + ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { // Perform the assignment for non-const variables and for initialization @@ -2991,39 +3357,50 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); Property* prop = expr->expression()->AsProperty(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - if (prop == NULL && var == NULL) { - // Result of deleting non-property, non-variable reference is true. - // The subexpression may have side effects. - VisitForEffect(expr->expression()); - context()->Plug(true); - } else if (var != NULL && - !var->is_global() && - var->AsSlot() != NULL && - var->AsSlot()->type() != Slot::LOOKUP) { - // Result of deleting non-global, non-dynamic variables is false. - // The subexpression does not have side effects. - context()->Plug(false); - } else { - // Property or variable reference. Call the delete builtin with - // object and property name as arguments. - if (prop != NULL) { + + if (prop != NULL) { + if (prop->is_synthetic()) { + // Result of deleting parameters is false, even when they rewrite + // to accesses on the arguments object. + context()->Plug(false); + } else { VisitForStackValue(prop->obj()); VisitForStackValue(prop->key()); + __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); + __ push(r1); __ InvokeBuiltin(Builtins::DELETE, CALL_JS); - } else if (var->is_global()) { - __ ldr(r1, GlobalObjectOperand()); - __ mov(r0, Operand(var->name())); - __ Push(r1, r0); + context()->Plug(r0); + } + } else if (var != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // so this code can only be reached in non-strict mode. + ASSERT(strict_mode_flag() == kNonStrictMode); + if (var->is_global()) { + __ ldr(r2, GlobalObjectOperand()); + __ mov(r1, Operand(var->name())); + __ mov(r0, Operand(Smi::FromInt(kNonStrictMode))); + __ Push(r2, r1, r0); __ InvokeBuiltin(Builtins::DELETE, CALL_JS); + context()->Plug(r0); + } else if (var->AsSlot() != NULL && + var->AsSlot()->type() != Slot::LOOKUP) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + context()->Plug(false); } else { - // Non-global variable. Call the runtime to delete from the + // Non-global variable. Call the runtime to try to delete from the // context where the variable was introduced. __ push(context_register()); __ mov(r2, Operand(var->name())); __ push(r2); __ CallRuntime(Runtime::kDeleteContextSlot, 2); + context()->Plug(r0); } - context()->Plug(r0); + } else { + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + context()->Plug(true); } break; } @@ -3214,13 +3591,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Inline smi case if we are in a loop. Label stub_call, done; + JumpPatchSite patch_site(masm_); + int count_value = expr->op() == Token::INC ? 1 : -1; if (ShouldInlineSmiCase(expr->op())) { __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); __ b(vs, &stub_call); // We could eliminate this smi check if we split the code at // the first smi check before calling ToNumber. - __ JumpIfSmi(r0, &done); + patch_site.EmitJumpIfSmi(r0, &done); + __ bind(&stub_call); // Call stub. Undo operation first. __ sub(r0, r0, Operand(Smi::FromInt(count_value))); @@ -3230,8 +3610,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Record position before stub call. SetSourcePosition(expr->position()); - GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); - __ CallStub(&stub); + TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE); + EmitCallIC(stub.GetCode(), &patch_site); __ bind(&done); // Store the value returned in r0. @@ -3510,21 +3890,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } bool inline_smi_code = ShouldInlineSmiCase(op); + JumpPatchSite patch_site(masm_); if (inline_smi_code) { Label slow_case; __ orr(r2, r0, Operand(r1)); - __ JumpIfNotSmi(r2, &slow_case); + patch_site.EmitJumpIfNotSmi(r2, &slow_case); __ cmp(r1, r0); Split(cond, if_true, if_false, NULL); __ bind(&slow_case); } - CompareFlags flags = inline_smi_code - ? NO_SMI_COMPARE_IN_STUB - : NO_COMPARE_FLAGS; - CompareStub stub(cond, strict, flags, r1, r0); - __ CallStub(&stub); + + // Record position and call the compare IC. + SetSourcePosition(expr->position()); + Handle ic = CompareIC::GetUninitialized(op); + EmitCallIC(ic, &patch_site); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand(0)); Split(cond, if_true, if_false, fall_through); } } @@ -3591,6 +3972,16 @@ void FullCodeGenerator::EmitCallIC(Handle ic, RelocInfo::Mode mode) { } +void FullCodeGenerator::EmitCallIC(Handle ic, JumpPatchSite* patch_site) { + __ Call(ic, RelocInfo::CODE_TARGET); + if (patch_site != NULL && patch_site->is_bound()) { + patch_site->EmitPatchInfo(); + } else { + __ nop(); // Signals no inlined code. + } +} + + void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); __ str(value, MemOperand(fp, frame_offset)); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 1aa031d39b..6c7aa0643a 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -115,6 +115,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm, Register name, Register scratch1, Register scratch2) { + // Assert that name contains a string. + if (FLAG_debug_code) __ AbortIfNotString(name); + // Compute the capacity mask. const int kCapacityOffset = StringDictionary::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; @@ -843,7 +846,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- lr : return address // ----------------------------------- + // Check if the name is a string. + Label miss; + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &miss); + __ IsObjectJSStringType(r2, r0, &miss); + GenerateCallNormal(masm, argc); + __ bind(&miss); GenerateMiss(masm, argc); } @@ -1465,24 +1475,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // Check whether the elements is a pixel array. // r4: elements map. __ bind(&check_pixel_array); - __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); - // Check that the value is a smi. If a conversion is needed call into the - // runtime to convert and clamp. - __ JumpIfNotSmi(value, &slow); - __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. - __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); - __ cmp(r4, Operand(ip)); - __ b(hs, &slow); - __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. - __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255]. - - // Get the pointer to the external array. This clobbers elements. - __ ldr(elements, - FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); - __ strb(r5, MemOperand(elements, r4)); // Elements is now external array. - __ Ret(); + GenerateFastPixelArrayStore(masm, + r2, + r1, + r0, + elements, + r4, + r5, + r6, + false, + false, + NULL, + &slow, + &slow, + &slow); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -1533,7 +1539,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { } -void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { +void StoreIC::GenerateMegamorphic(MacroAssembler* masm, + Code::ExtraICState extra_ic_state) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : receiver @@ -1544,7 +1551,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { // Get the receiver from the stack and probe the stub cache. Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, - MONOMORPHIC); + MONOMORPHIC, + extra_ic_state); StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); // Cache miss: Jump to runtime. @@ -1700,11 +1708,78 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { Token::Name(op_)); } #endif + + // Activate inlined smi code. + if (previous_state == UNINITIALIZED) { + PatchInlinedSmiCode(address()); + } } void PatchInlinedSmiCode(Address address) { - // Currently there is no smi inlining in the ARM full code generator. + Address cmp_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a cmp rx, #yyy, nothing + // was inlined. + Instr instr = Assembler::instr_at(cmp_instruction_address); + if (!Assembler::IsCmpImmediate(instr)) { + return; + } + + // The delta to the start of the map check instruction and the + // condition code uses at the patched jump. + int delta = Assembler::GetCmpImmediateRawImmediate(instr); + delta += + Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; + // If the delta is 0 the instruction is cmp r0, #0 which also signals that + // nothing was inlined. + if (delta == 0) { + return; + } + +#ifdef DEBUG + if (FLAG_trace_ic) { + PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", + address, cmp_instruction_address, delta); + } +#endif + + Address patch_address = + cmp_instruction_address - delta * Instruction::kInstrSize; + Instr instr_at_patch = Assembler::instr_at(patch_address); + Instr branch_instr = + Assembler::instr_at(patch_address + Instruction::kInstrSize); + ASSERT(Assembler::IsCmpRegister(instr_at_patch)); + ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), + Assembler::GetRm(instr_at_patch).code()); + ASSERT(Assembler::IsBranch(branch_instr)); + if (Assembler::GetCondition(branch_instr) == eq) { + // This is patching a "jump if not smi" site to be active. + // Changing + // cmp rx, rx + // b eq, + // to + // tst rx, #kSmiTagMask + // b ne, + CodePatcher patcher(patch_address, 2); + Register reg = Assembler::GetRn(instr_at_patch); + patcher.masm()->tst(reg, Operand(kSmiTagMask)); + patcher.EmitCondition(ne); + } else { + ASSERT(Assembler::GetCondition(branch_instr) == ne); + // This is patching a "jump if smi" site to be active. + // Changing + // cmp rx, rx + // b ne, + // to + // tst rx, #kSmiTagMask + // b eq, + CodePatcher patcher(patch_address, 2); + Register reg = Assembler::GetRn(instr_at_patch); + patcher.masm()->tst(reg, Operand(kSmiTagMask)); + patcher.EmitCondition(eq); + } } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index f672d4908e..903f77bbf0 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -62,15 +62,13 @@ void LInstruction::VerifyCall() { // Call instructions can use only fixed registers as // temporaries and outputs because all registers // are blocked by the calling convention. - // Inputs can use either fixed register or have a short lifetime (be - // used at start of the instruction). + // Inputs must use a fixed register. ASSERT(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); for (UseIterator it(this); it.HasNext(); it.Advance()) { LOperand* operand = it.Next(); ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() || - LUnallocated::cast(operand)->IsUsedAtStart() || !LUnallocated::cast(operand)->HasRegisterPolicy()); } for (TempIterator it(this); it.HasNext(); it.Advance()) { @@ -186,6 +184,9 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; + case Token::SHL: return "shl-t"; + case Token::SAR: return "sar-t"; + case Token::SHR: return "shr-t"; default: UNREACHABLE(); return NULL; @@ -802,6 +803,16 @@ LInstruction* LChunkBuilder::DoBit(Token::Value op, LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { + if (instr->representation().IsTagged()) { + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); + + LOperand* left = UseFixed(instr->left(), r1); + LOperand* right = UseFixed(instr->right(), r0); + LArithmeticT* result = new LArithmeticT(op, left, right); + return MarkAsCall(DefineFixed(result, r0), instr); + } + ASSERT(instr->representation().IsInteger32()); ASSERT(instr->OperandAt(0)->representation().IsInteger32()); ASSERT(instr->OperandAt(1)->representation().IsInteger32()); @@ -1021,7 +1032,7 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) { ASSERT(left->representation().IsInteger32()); ASSERT(right->representation().IsInteger32()); return new LCmpIDAndBranch(UseRegisterAtStart(left), - UseOrConstantAtStart(right)); + UseRegisterAtStart(right)); } else if (r.IsDouble()) { ASSERT(left->representation().IsDouble()); ASSERT(right->representation().IsDouble()); @@ -1077,6 +1088,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) { } else if (v->IsTypeofIs()) { HTypeofIs* typeof_is = HTypeofIs::cast(v); return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value())); + } else if (v->IsIsConstructCall()) { + return new LIsConstructCallAndBranch(TempRegister()); } else { if (v->IsConstant()) { if (HConstant::cast(v)->handle()->IsTrue()) { @@ -1131,8 +1144,8 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { LOperand* function = UseFixed(instr->function(), r1); LOperand* receiver = UseFixed(instr->receiver(), r0); - LOperand* length = UseRegisterAtStart(instr->length()); - LOperand* elements = UseRegisterAtStart(instr->elements()); + LOperand* length = UseFixed(instr->length(), r2); + LOperand* elements = UseFixed(instr->elements(), r3); LApplyArguments* result = new LApplyArguments(function, receiver, length, @@ -1304,10 +1317,10 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { // the generated code, which requires registers r0 // and r1 to be used. We should remove that // when we provide a native implementation. - LOperand* value = UseFixed(instr->left(), r0); + LOperand* dividend = UseFixed(instr->left(), r0); LOperand* divisor = UseFixed(instr->right(), r1); return AssignEnvironment(AssignPointerMap( - DefineFixed(new LDivI(value, divisor), r0))); + DefineFixed(new LDivI(dividend, divisor), r0))); } else { return DoArithmeticT(Token::DIV, instr); } @@ -1417,7 +1430,7 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseOrConstantAtStart(instr->right()); + LOperand* right = UseRegisterAtStart(instr->right()); return DefineAsRegister(new LCmpID(left, right)); } else if (r.IsDouble()) { ASSERT(instr->left()->representation().IsDouble()); @@ -1478,6 +1491,15 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { } +LInstruction* LChunkBuilder::DoGetCachedArrayIndex( + HGetCachedArrayIndex* instr) { + ASSERT(instr->value()->representation().IsTagged()); + LOperand* value = UseRegister(instr->value()); + + return DefineAsRegister(new LGetCachedArrayIndex(value)); +} + + LInstruction* LChunkBuilder::DoHasCachedArrayIndex( HHasCachedArrayIndex* instr) { ASSERT(instr->value()->representation().IsTagged()); @@ -1500,6 +1522,12 @@ LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) { } +LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) { + LOperand* array = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LPixelArrayLength(array)); +} + + LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) { LOperand* array = UseRegisterAtStart(instr->value()); return DefineAsRegister(new LFixedArrayLength(array)); @@ -1642,13 +1670,11 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { Representation r = instr->representation(); if (r.IsInteger32()) { - int32_t value = instr->Integer32Value(); - return DefineAsRegister(new LConstantI(value)); + return DefineAsRegister(new LConstantI); } else if (r.IsDouble()) { - double value = instr->DoubleValue(); - return DefineAsRegister(new LConstantD(value)); + return DefineAsRegister(new LConstantD); } else if (r.IsTagged()) { - return DefineAsRegister(new LConstantT(instr->handle())); + return DefineAsRegister(new LConstantT); } else { UNREACHABLE(); return NULL; @@ -1716,7 +1742,14 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype( LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) { LOperand* input = UseRegisterAtStart(instr->value()); - return DefineSameAsFirst(new LLoadElements(input)); + return DefineAsRegister(new LLoadElements(input)); +} + + +LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer( + HLoadPixelArrayExternalPointer* instr) { + LOperand* input = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LLoadPixelArrayExternalPointer(input)); } @@ -1731,6 +1764,19 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement( } +LInstruction* LChunkBuilder::DoLoadPixelArrayElement( + HLoadPixelArrayElement* instr) { + ASSERT(instr->representation().IsInteger32()); + ASSERT(instr->key()->representation().IsInteger32()); + LOperand* external_pointer = + UseRegisterAtStart(instr->external_pointer()); + LOperand* key = UseRegisterAtStart(instr->key()); + LLoadPixelArrayElement* result = + new LLoadPixelArrayElement(external_pointer, key); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LOperand* object = UseFixed(instr->object(), r1); LOperand* key = UseFixed(instr->key(), r0); @@ -1832,8 +1878,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - LOperand* key = UseRegisterAtStart(instr->key()); + LOperand* object = UseFixed(instr->object(), r0); + LOperand* key = UseFixed(instr->key(), r1); LDeleteProperty* result = new LDeleteProperty(object, key); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1881,7 +1927,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LTypeof* result = new LTypeof(UseRegisterAtStart(instr->value())); + LTypeof* result = new LTypeof(UseFixed(instr->value(), r0)); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1890,6 +1936,12 @@ LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) { return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value()))); } + +LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) { + return DefineAsRegister(new LIsConstructCall()); +} + + LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { HEnvironment* env = current_block_->last_environment(); ASSERT(env != NULL); diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index a076c80c75..57338f16d5 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -41,7 +41,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ - V(Constant) \ V(Call) \ V(StoreKeyed) \ V(StoreNamed) \ @@ -95,6 +94,7 @@ class LCodeGen; V(FixedArrayLength) \ V(FunctionLiteral) \ V(Gap) \ + V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ @@ -123,6 +123,8 @@ class LCodeGen; V(LoadKeyedGeneric) \ V(LoadNamedField) \ V(LoadNamedGeneric) \ + V(LoadPixelArrayElement) \ + V(LoadPixelArrayExternalPointer) \ V(ModI) \ V(MulI) \ V(NumberTagD) \ @@ -132,6 +134,7 @@ class LCodeGen; V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ + V(PixelArrayLength) \ V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ @@ -153,6 +156,8 @@ class LCodeGen; V(Typeof) \ V(TypeofIs) \ V(TypeofIsAndBranch) \ + V(IsConstructCall) \ + V(IsConstructCallAndBranch) \ V(UnaryMathOperation) \ V(UnknownOSRValue) \ V(ValueOf) @@ -735,6 +740,17 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { }; +class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { + public: + explicit LGetCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) +}; + + class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { public: explicit LHasCachedArrayIndexAndBranch(LOperand* value) { @@ -903,44 +919,30 @@ class LSubI: public LTemplateInstruction<1, 2, 0> { }; -class LConstant: public LTemplateInstruction<1, 0, 0> { - DECLARE_INSTRUCTION(Constant) -}; - - -class LConstantI: public LConstant { +class LConstantI: public LTemplateInstruction<1, 0, 0> { public: - explicit LConstantI(int32_t value) : value_(value) { } - int32_t value() const { return value_; } - DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") + DECLARE_HYDROGEN_ACCESSOR(Constant) - private: - int32_t value_; + int32_t value() const { return hydrogen()->Integer32Value(); } }; -class LConstantD: public LConstant { +class LConstantD: public LTemplateInstruction<1, 0, 0> { public: - explicit LConstantD(double value) : value_(value) { } - double value() const { return value_; } - DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") + DECLARE_HYDROGEN_ACCESSOR(Constant) - private: - double value_; + double value() const { return hydrogen()->DoubleValue(); } }; -class LConstantT: public LConstant { +class LConstantT: public LTemplateInstruction<1, 0, 0> { public: - explicit LConstantT(Handle value) : value_(value) { } - Handle value() const { return value_; } - DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t") + DECLARE_HYDROGEN_ACCESSOR(Constant) - private: - Handle value_; + Handle value() const { return hydrogen()->handle(); } }; @@ -990,6 +992,17 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> { }; +class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> { + public: + explicit LPixelArrayLength(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length") + DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength) +}; + + class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { public: explicit LFixedArrayLength(LOperand* value) { @@ -1139,6 +1152,17 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> { }; +class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> { + public: + explicit LLoadPixelArrayExternalPointer(LOperand* object) { + inputs_[0] = object; + } + + DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer, + "load-pixel-array-external-pointer") +}; + + class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { public: LLoadKeyedFastElement(LOperand* elements, LOperand* key) { @@ -1154,6 +1178,22 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { }; +class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> { + public: + LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) { + inputs_[0] = external_pointer; + inputs_[1] = key; + } + + DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement, + "load-pixel-array-element") + DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement) + + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } +}; + + class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> { public: LLoadKeyedGeneric(LOperand* obj, LOperand* key) { @@ -1716,6 +1756,24 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> { }; +class LIsConstructCall: public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call") + DECLARE_HYDROGEN_ACCESSOR(IsConstructCall) +}; + + +class LIsConstructCallAndBranch: public LControlInstruction<0, 1> { + public: + explicit LIsConstructCallAndBranch(LOperand* temp) { + temps_[0] = temp; + } + + DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch, + "is-construct-call-and-branch") +}; + + class LDeleteProperty: public LTemplateInstruction<1, 2, 0> { public: LDeleteProperty(LOperand* obj, LOperand* key) { diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 855ed461b5..1bfb3ad943 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -647,7 +647,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { return; } - if (cc == kNoCondition) { + if (cc == al) { if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); __ Jump(entry, RelocInfo::RUNTIME_ENTRY); } else { @@ -1188,8 +1188,8 @@ void LCodeGen::DoMulI(LMulI* instr) { __ tst(left, Operand(left)); __ b(ne, &done); if (instr->InputAt(1)->IsConstantOperand()) { - if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) { - DeoptimizeIf(kNoCondition, instr->environment()); + if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) { + DeoptimizeIf(al, instr->environment()); } } else { // Test the non-zero operand for negative sign. @@ -1322,6 +1322,13 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { } +void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) { + Register result = ToRegister(instr->result()); + Register array = ToRegister(instr->InputAt(0)); + __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset)); +} + + void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) { Register result = ToRegister(instr->result()); Register array = ToRegister(instr->InputAt(0)); @@ -1605,7 +1612,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { - __ cmp(ToRegister(left), ToOperand(right)); + __ cmp(ToRegister(left), ToRegister(right)); } @@ -1619,8 +1626,7 @@ void LCodeGen::DoCmpID(LCmpID* instr) { if (instr->is_double()) { // Compare left and right as doubles and load the // resulting flags into the normal status register. - __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); - __ vmrs(pc); + __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); // If a NaN is involved, i.e. the result is unordered (V set), // jump to unordered to return false. __ b(vs, &unordered); @@ -1647,8 +1653,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { if (instr->is_double()) { // Compare left and right as doubles and load the // resulting flags into the normal status register. - __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); - __ vmrs(pc); + __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); // If a NaN is involved, i.e. the result is unordered (V set), // jump to false block label. __ b(vs, chunk_->GetAssemblyLabel(false_block)); @@ -1891,14 +1896,42 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { } +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + __ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(scratch, result); +} + + void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { - Abort("DoHasCachedArrayIndex unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); } void LCodeGen::DoHasCachedArrayIndexAndBranch( LHasCachedArrayIndexAndBranch* instr) { - Abort("DoHasCachedArrayIndexAndBranch unimplemented."); + Register input = ToRegister(instr->InputAt(0)); + Register scratch = scratch0(); + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + EmitBranch(true_block, false_block, eq); } @@ -2180,12 +2213,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) { Handle ic = CompareIC::GetUninitialized(op); CallCode(ic, RelocInfo::CODE_TARGET, instr); + __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. Condition condition = ComputeCompareCondition(op); if (op == Token::GT || op == Token::LTE) { condition = ReverseCondition(condition); } - __ cmp(r0, Operand(0)); __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex, condition); @@ -2196,7 +2229,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { - Abort("DoCmpTAndBranch unimplemented."); + Token::Value op = instr->op(); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Handle ic = CompareIC::GetUninitialized(op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + // The compare stub expects compare condition and the input operands + // reversed for GT and LTE. + Condition condition = ComputeCompareCondition(op); + if (op == Token::GT || op == Token::LTE) { + condition = ReverseCondition(condition); + } + __ cmp(r0, Operand(0)); + EmitBranch(true_block, false_block, condition); } @@ -2342,17 +2389,20 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { void LCodeGen::DoLoadElements(LLoadElements* instr) { - ASSERT(instr->result()->Equals(instr->InputAt(0))); - Register reg = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register input = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); - __ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset)); + __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset)); if (FLAG_debug_code) { Label done; - __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(scratch, ip); __ b(eq, &done); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(scratch, ip); + __ b(eq, &done); __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); __ cmp(scratch, ip); __ Check(eq, "Check for fast elements failed."); @@ -2361,6 +2411,14 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { } +void LCodeGen::DoLoadPixelArrayExternalPointer( + LLoadPixelArrayExternalPointer* instr) { + Register to_reg = ToRegister(instr->result()); + Register from_reg = ToRegister(instr->InputAt(0)); + __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset)); +} + + void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); Register length = ToRegister(instr->length()); @@ -2397,6 +2455,16 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { } +void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) { + Register external_elements = ToRegister(instr->external_pointer()); + Register key = ToRegister(instr->key()); + Register result = ToRegister(instr->result()); + + // Load the result. + __ ldrb(result, MemOperand(external_elements, key)); +} + + void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r1)); ASSERT(ToRegister(instr->key()).is(r0)); @@ -2448,29 +2516,33 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { void LCodeGen::DoApplyArguments(LApplyArguments* instr) { Register receiver = ToRegister(instr->receiver()); Register function = ToRegister(instr->function()); + Register length = ToRegister(instr->length()); + Register elements = ToRegister(instr->elements()); Register scratch = scratch0(); - - ASSERT(receiver.is(r0)); - ASSERT(function.is(r1)); + ASSERT(receiver.is(r0)); // Used for parameter count. + ASSERT(function.is(r1)); // Required by InvokeFunction. ASSERT(ToRegister(instr->result()).is(r0)); - // If the receiver is null or undefined, we have to pass the - // global object as a receiver. - Label global_receiver, receiver_ok; + // If the receiver is null or undefined, we have to pass the global object + // as a receiver. + Label global_object, receiver_ok; __ LoadRoot(scratch, Heap::kNullValueRootIndex); __ cmp(receiver, scratch); - __ b(eq, &global_receiver); + __ b(eq, &global_object); __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); __ cmp(receiver, scratch); - __ b(ne, &receiver_ok); - __ bind(&global_receiver); - __ ldr(receiver, GlobalObjectOperand()); - __ bind(&receiver_ok); + __ b(eq, &global_object); - Register length = ToRegister(instr->length()); - Register elements = ToRegister(instr->elements()); + // Deoptimize if the receiver is not a JS object. + __ tst(receiver, Operand(kSmiTagMask)); + DeoptimizeIf(eq, instr->environment()); + __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE); + DeoptimizeIf(lo, instr->environment()); + __ jmp(&receiver_ok); - Label invoke; + __ bind(&global_object); + __ ldr(receiver, GlobalObjectOperand()); + __ bind(&receiver_ok); // Copy the arguments to this function possibly from the // adaptor frame below it. @@ -2487,7 +2559,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // Loop through the arguments pushing them onto the execution // stack. - Label loop; + Label invoke, loop; // length is a small non-negative integer, due to the test above. __ tst(length, Operand(length)); __ b(eq, &invoke); @@ -2510,6 +2582,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // by InvokeFunction. v8::internal::ParameterCount actual(receiver); __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -2899,7 +2972,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { // Name is always in r2. __ mov(r2, Operand(instr->name())); - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Handle ic(Builtins::builtin(info_->is_strict() + ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -3778,6 +3853,55 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } +void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { + Register result = ToRegister(instr->result()); + Label true_label; + Label false_label; + Label done; + + EmitIsConstructCall(result, scratch0()); + __ b(eq, &true_label); + + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ b(&done); + + + __ bind(&true_label); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + + __ bind(&done); +} + + +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { + Register temp1 = ToRegister(instr->TempAt(0)); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + EmitIsConstructCall(temp1, scratch0()); + EmitBranch(true_block, false_block, eq); +} + + +void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { + ASSERT(!temp1.is(temp2)); + // Get the frame pointer for the calling frame. + __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); + __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(ne, &check_frame_marker); + __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); + __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); +} + + void LCodeGen::DoLazyBailout(LLazyBailout* instr) { // No code for lazy bailout instruction. Used to capture environment after a // call for populating the safepoint data with deoptimization data. @@ -3785,14 +3909,16 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { void LCodeGen::DoDeoptimize(LDeoptimize* instr) { - DeoptimizeIf(kNoCondition, instr->environment()); + DeoptimizeIf(al, instr->environment()); } void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register object = ToRegister(instr->object()); Register key = ToRegister(instr->key()); - __ Push(object, key); + Register strict = scratch0(); + __ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); + __ Push(object, key, strict); ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); LPointerMap* pointers = instr->pointer_map(); LEnvironment* env = instr->deoptimization_environment(); @@ -3818,7 +3944,19 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { void LCodeGen::DoOsrEntry(LOsrEntry* instr) { - Abort("DoOsrEntry unimplemented."); + // This is a pseudo-instruction that ensures that the environment here is + // properly registered for deoptimization and records the assembler's PC + // offset. + LEnvironment* environment = instr->environment(); + environment->SetSpilledRegisters(instr->SpilledRegisterArray(), + instr->SpilledDoubleRegisterArray()); + + // If the environment were already registered, we would have no way of + // backpatching it with the spill slot operands. + ASSERT(!environment->HasBeenRegistered()); + RegisterEnvironmentForDeoptimization(environment); + ASSERT(osr_pc_offset_ == -1); + osr_pc_offset_ = masm()->pc_offset(); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 3f7fe4519b..732db44517 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -129,6 +129,10 @@ class LCodeGen BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + int strict_mode_flag() const { + return info_->is_strict() ? kStrictMode : kNonStrictMode; + } + LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } @@ -264,6 +268,10 @@ class LCodeGen BASE_EMBEDDED { Label* is_not_object, Label* is_object); + // Emits optimized code for %_IsConstructCall(). + // Caller should branch on equal condition. + void EmitIsConstructCall(Register temp1, Register temp2); + LChunk* const chunk_; MacroAssembler* const masm_; CompilationInfo* const info_; diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index c11d664f07..eb850cd948 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -714,7 +714,8 @@ int MacroAssembler::ActivationFrameAlignment() { } -void MacroAssembler::LeaveExitFrame(bool save_doubles) { +void MacroAssembler::LeaveExitFrame(bool save_doubles, + Register argument_count) { // Optionally restore all double registers. if (save_doubles) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { @@ -736,12 +737,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { str(r3, MemOperand(ip)); #endif - // Tear down the exit frame, pop the arguments, and return. Callee-saved - // register r4 still holds argc. + // Tear down the exit frame, pop the arguments, and return. mov(sp, Operand(fp)); ldm(ia_w, sp, fp.bit() | lr.bit()); - add(sp, sp, Operand(r4, LSL, kPointerSizeLog2)); - mov(pc, lr); + if (argument_count.is_valid()) { + add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); + } } @@ -929,8 +930,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, void MacroAssembler::IsObjectJSStringType(Register object, - Register scratch, - Label* fail) { + Register scratch, + Label* fail) { ASSERT(kNotStringTag != 0); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); @@ -1005,6 +1006,117 @@ void MacroAssembler::PopTryHandler() { } +void MacroAssembler::Throw(Register value) { + // r0 is expected to hold the exception. + if (!value.is(r0)) { + mov(r0, value); + } + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + mov(r3, Operand(ExternalReference(Top::k_handler_address))); + ldr(sp, MemOperand(r3)); + + // Restore the next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r2); + str(r2, MemOperand(r3)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + pop(pc); +} + + +void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, + Register value) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // r0 is expected to hold the exception. + if (!value.is(r0)) { + mov(r0, value); + } + + // Drop sp to the top stack handler. + mov(r3, Operand(ExternalReference(Top::k_handler_address))); + ldr(sp, MemOperand(r3)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + ldr(r2, MemOperand(sp, kStateOffset)); + cmp(r2, Operand(StackHandler::ENTRY)); + b(eq, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + ldr(sp, MemOperand(sp, kNextOffset)); + jmp(&loop); + bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r2); + str(r2, MemOperand(r3)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + mov(r0, Operand(false, RelocInfo::NONE)); + mov(r2, Operand(external_caught)); + str(r0, MemOperand(r2)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + mov(r0, Operand(reinterpret_cast(out_of_memory))); + mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + str(r0, MemOperand(r2)); + } + + // Stack layout at this point. See also StackHandlerConstants. + // sp -> state (ENTRY) + // fp + // lr + + // Discard handler state (r2 is not used) and restore frame pointer. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + pop(pc); +} + + void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { @@ -1150,7 +1262,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. - add(scratch2, result, Operand(obj_size_reg)); + add(scratch2, result, Operand(obj_size_reg), SetCC); + b(cs, gc_required); cmp(scratch2, Operand(ip)); b(hi, gc_required); str(scratch2, MemOperand(topaddr)); @@ -1229,10 +1342,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, // to calculate the new top. Object size may be in words so a shift is // required to get the number of bytes. if ((flags & SIZE_IN_WORDS) != 0) { - add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2)); + add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC); } else { - add(scratch2, result, Operand(object_size)); + add(scratch2, result, Operand(object_size), SetCC); } + b(cs, gc_required); cmp(scratch2, Operand(ip)); b(hi, gc_required); @@ -1552,9 +1666,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( cmp(r4, r5); b(ne, &promote_scheduled_exception); - // LeaveExitFrame expects unwind space to be in r4. + // LeaveExitFrame expects unwind space to be in a register. mov(r4, Operand(stack_space)); - LeaveExitFrame(false); + LeaveExitFrame(false, r4); + mov(pc, lr); bind(&promote_scheduled_exception); MaybeObject* result = TryTailCallExternalReference( @@ -1771,6 +1886,13 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst, } +void MacroAssembler::GetLeastBitsFromInt32(Register dst, + Register src, + int num_least_bits) { + and_(dst, src, Operand((1 << num_least_bits) - 1)); +} + + void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { // All parameters are on the stack. r0 has the return value after call. @@ -2113,6 +2235,19 @@ void MacroAssembler::AbortIfNotSmi(Register object) { } +void MacroAssembler::AbortIfNotString(Register object) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Assert(ne, "Operand is not a string"); + push(object); + ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); + CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); + pop(object); + Assert(lo, "Operand is not a string"); +} + + + void MacroAssembler::AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { @@ -2379,7 +2514,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, } -#ifdef ENABLE_DEBUGGER_SUPPORT CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), @@ -2402,15 +2536,21 @@ CodePatcher::~CodePatcher() { } -void CodePatcher::Emit(Instr x) { - masm()->emit(x); +void CodePatcher::Emit(Instr instr) { + masm()->emit(instr); } void CodePatcher::Emit(Address addr) { masm()->emit(reinterpret_cast(addr)); } -#endif // ENABLE_DEBUGGER_SUPPORT + + +void CodePatcher::EmitCondition(Condition cond) { + Instr instr = Assembler::instr_at(masm_.pc_); + instr = (instr & ~kCondMask) | cond; + masm_.emit(instr); +} } } // namespace v8::internal diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index c9ffde8981..354662da32 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -45,6 +45,12 @@ static inline MemOperand FieldMemOperand(Register object, int offset) { } +static inline Operand SmiUntagOperand(Register object) { + return Operand(object, ASR, kSmiTagSize); +} + + + // Give alias names to registers const Register cp = { 8 }; // JavaScript context pointer const Register roots = { 10 }; // Roots array pointer. @@ -291,7 +297,9 @@ class MacroAssembler: public Assembler { void EnterExitFrame(bool save_doubles, int stack_space = 0); // Leave the current exit frame. Expects the return value in r0. - void LeaveExitFrame(bool save_doubles); + // Expect the number of values, pushed prior to the exit frame, to + // remove in a register (or no_reg, if there is nothing to remove). + void LeaveExitFrame(bool save_doubles, Register argument_count); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -365,6 +373,13 @@ class MacroAssembler: public Assembler { // Must preserve the result register. void PopTryHandler(); + // Passes thrown value (in r0) to the handler of top of the try handler chain. + void Throw(Register value); + + // Propagates an uncatchable exception to the top of the current JS stack's + // handler chain. + void ThrowUncatchable(UncatchableExceptionType type, Register value); + // --------------------------------------------------------------------------- // Inline caching support @@ -558,6 +573,7 @@ class MacroAssembler: public Assembler { // Get the number of least significant bits from a register void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); + void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); // Uses VFP instructions to Convert a Smi to a double. void IntegerToDoubleConversionWithVFP3(Register inReg, @@ -784,6 +800,9 @@ class MacroAssembler: public Assembler { void AbortIfSmi(Register object); void AbortIfNotSmi(Register object); + // Abort execution if argument is a string. Used in debug code. + void AbortIfNotString(Register object); + // Abort execution if argument is not the root value with the given index. void AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, @@ -886,11 +905,15 @@ class CodePatcher { MacroAssembler* masm() { return &masm_; } // Emit an instruction directly. - void Emit(Instr x); + void Emit(Instr instr); // Emit an address directly. void Emit(Address addr); + // Emit the condition part of an instruction leaving the rest of the current + // instruction unchanged. + void EmitCondition(Condition cond); + private: byte* address_; // The address of the code being patched. int instructions_; // Number of instructions of the expected patch size. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 94da04240d..1f6ed6712d 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -57,48 +57,57 @@ namespace internal { * - r13/sp : points to tip of C stack. * * The remaining registers are free for computations. - * * Each call to a public method should retain this convention. + * * The stack will have the following structure: - * - direct_call (if 1, direct call from JavaScript code, if 0 call - * through the runtime system) - * - stack_area_base (High end of the memory area to use as - * backtracking stack) - * - int* capture_array (int[num_saved_registers_], for output). - * --- sp when called --- - * - link address - * - backup of registers r4..r11 - * - end of input (Address of end of string) - * - start of input (Address of first character in string) - * - start index (character index of start) - * --- frame pointer ---- - * - void* input_string (location of a handle containing the string) - * - Offset of location before start of input (effectively character - * position -1). Used to initialize capture registers to a non-position. - * - At start (if 1, we are starting at the start of the - * string, otherwise 0) - * - register 0 (Only positions must be stored in the first - * - register 1 num_saved_registers_ registers) - * - ... - * - register num_registers-1 - * --- sp --- + * - fp[48] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[44] stack_area_base (High end of the memory area to use as + * backtracking stack). + * - fp[40] int* capture_array (int[num_saved_registers_], for output). + * - fp[36] secondary link/return address used by native call. + * --- sp when called --- + * - fp[32] return address (lr). + * - fp[28] old frame pointer (r11). + * - fp[0..24] backup of registers r4..r10. + * --- frame pointer ---- + * - fp[-4] end of input (Address of end of string). + * - fp[-8] start of input (Address of first character in string). + * - fp[-12] start index (character index of start). + * - fp[-16] void* input_string (location of a handle containing the string). + * - fp[-20] Offset of location before start of input (effectively character + * position -1). Used to initialize capture registers to a + * non-position. + * - fp[-24] At start (if 1, we are starting at the start of the + * string, otherwise 0) + * - fp[-28] register 0 (Only positions must be stored in the first + * - register 1 num_saved_registers_ registers) + * - ... + * - register num_registers-1 + * --- sp --- * * The first num_saved_registers_ registers are initialized to point to * "character -1" in the string (i.e., char_size() bytes before the first * character of the string). The remaining registers start out as garbage. * * The data up to the return address must be placed there by the calling - * code, by calling the code entry as cast to a function with the signature: + * code and the remaining arguments are passed in registers, e.g. by calling the + * code entry as cast to a function with the signature: * int (*match)(String* input_string, * int start_index, * Address start, * Address end, + * Address secondary_return_address, // Only used by native call. * int* capture_output_array, - * bool at_start, * byte* stack_area_base, - * bool direct_call) + * bool direct_call = false) * The call is performed by NativeRegExpMacroAssembler::Execute() - * (in regexp-macro-assembler.cc). + * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro + * in arm/simulator-arm.h. + * When calling as a non-direct call (i.e., from C++ code), the return address + * area is overwritten with the LR register by the RegExp code. When doing a + * direct call from generated code, the return address is placed there by + * the calling code, as in a normal exit frame. */ #define __ ACCESS_MASM(masm_) @@ -598,16 +607,17 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Entry code: __ bind(&entry_label_); - // Push Link register. // Push arguments // Save callee-save registers. // Start new stack frame. + // Store link register in existing stack-cell. // Order here should correspond to order of offset constants in header file. RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit(); RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit(); __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit()); - // Set frame pointer just above the arguments. + // Set frame pointer in space for it if this is not a direct call + // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); __ push(r0); // Make room for "position - 1" constant (value is irrelevant). __ push(r0); // Make room for "at start" constant (value is irrelevant). @@ -764,10 +774,9 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { if (stack_overflow_label_.is_linked()) { SafeCallTarget(&stack_overflow_label_); // Reached if the backtrack-stack limit has been hit. - Label grow_failed; - // Call GrowStack(backtrack_stackpointer()) + // Call GrowStack(backtrack_stackpointer(), &stack_base) static const int num_arguments = 2; __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index b487ba59d1..d9d0b3562e 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -122,8 +122,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kStoredRegisters = kFramePointer; // Return address (stored from link register, read into pc on return). static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize; + static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; // Stack parameters placed by caller. - static const int kRegisterOutput = kReturnAddress + kPointerSize; + static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 5256ae35b9..bdf1f8a106 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -48,10 +48,16 @@ namespace internal { #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ (entry(p0, p1, p2, p3, p4)) -// Call the generated regexp code directly. The entry function pointer should -// expect seven int/pointer sized arguments and return an int. +typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, + void*, int*, Address, int); + + +// Call the generated regexp code directly. The code at the entry address +// should act as a function matching the type arm_regexp_matcher. +// The fifth argument is a dummy that reserves the space used for +// the return address added by the ExitFrame in native calls. #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - (entry(p0, p1, p2, p3, p4, p5, p6)) + (FUNCTION_CAST(entry)(p0, p1, p2, p3, NULL, p4, p5, p6)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast(try_catch_address)) @@ -362,8 +368,7 @@ class Simulator { FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ - Simulator::current()->Call( \ - FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) + Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == \ diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 9ef61158ea..675fdf49b2 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -3259,6 +3259,47 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( } +MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray( + JSObject* receiver) { + // ----------- S t a t e ------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // -- r3 : scratch + // -- r4 : scratch + // -- r5 : scratch + // -- r6 : scratch + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the map matches. + __ CheckMap(r2, r6, Handle(receiver->map()), &miss, false); + + GenerateFastPixelArrayStore(masm(), + r2, + r1, + r0, + r3, + r4, + r5, + r6, + true, + true, + &miss, + &miss, + NULL, + &miss); + + __ bind(&miss); + Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, NULL); +} + + MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // ----------- S t a t e ------------- // -- r0 : argc diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index 3266a16e3a..b4b518cff6 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -329,18 +329,25 @@ void VirtualFrame::CallLoadIC(Handle name, RelocInfo::Mode mode) { } -void VirtualFrame::CallStoreIC(Handle name, bool is_contextual) { - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); +void VirtualFrame::CallStoreIC(Handle name, + bool is_contextual, + StrictModeFlag strict_mode) { + Handle ic(Builtins::builtin(strict_mode == kStrictMode + ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); PopToR0(); + RelocInfo::Mode mode; if (is_contextual) { SpillAll(); __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + mode = RelocInfo::CODE_TARGET_CONTEXT; } else { EmitPop(r1); SpillAll(); + mode = RelocInfo::CODE_TARGET; } __ mov(r2, Operand(name)); - CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); + CallCodeObject(ic, mode, 0); } diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index 82b4d08ab7..b6e794a5c0 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -294,7 +294,8 @@ class VirtualFrame : public ZoneObject { // Call store IC. If the load is contextual, value is found on top of the // frame. If not, value and receiver are on the frame. Both are consumed. // Result is returned in r0. - void CallStoreIC(Handle name, bool is_contextual); + void CallStoreIC(Handle name, bool is_contextual, + StrictModeFlag strict_mode); // Call keyed load IC. Key and receiver are on the stack. Both are consumed. // Result is returned in r0. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 1298434d59..ef82674d78 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -161,15 +161,7 @@ function Join(array, length, separator, convert) { var result = %_FastAsciiArrayJoin(elements, separator); if (!IS_UNDEFINED(result)) return result; - var length2 = (length << 1) - 1; - var j = length2; - var i = length; - elements[--j] = elements[--i]; - while (i > 0) { - elements[--j] = separator; - elements[--j] = elements[--i]; - } - return %StringBuilderConcat(elements, length2, ''); + return %StringBuilderJoin(elements, length, separator); } finally { // Make sure to remove the last element of the visited array no // matter what happens. diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index ef2094f63a..42a61c2b8d 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -68,7 +68,7 @@ const double DoubleConstant::min_int = kMinInt; const double DoubleConstant::one_half = 0.5; const double DoubleConstant::minus_zero = -0.0; const double DoubleConstant::negative_infinity = -V8_INFINITY; - +const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; // ----------------------------------------------------------------------------- // Implementation of Label diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index e8bc5d6caa..1b71dfc5a1 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -178,10 +178,16 @@ class RelocInfo BASE_EMBEDDED { // invalid/uninitialized position value. static const int kNoPosition = -1; + // This string is used to add padding comments to the reloc info in cases + // where we are not sure to have enough space for patching in during + // lazy deoptimization. This is the case if we have indirect calls for which + // we do not normally record relocation info. + static const char* kFillerCommentString; + enum Mode { // Please note the order is important (see IsCodeTarget, IsGCRelocMode). CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. - CODE_TARGET_CONTEXT, // Code target used for contextual loads. + CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores. DEBUG_BREAK, // Code target for the debugger statement. CODE_TARGET, // Code target which is not any of the above. EMBEDDED_OBJECT, diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index ccfa2b4ecf..772684cf95 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -618,7 +618,9 @@ bool Call::ComputeGlobalTarget(Handle global, cell_ = Handle::null(); LookupResult lookup; global->Lookup(*name, &lookup); - if (lookup.IsProperty() && lookup.type() == NORMAL) { + if (lookup.IsProperty() && + lookup.type() == NORMAL && + lookup.holder() == *global) { cell_ = Handle(global->GetPropertyCell(&lookup)); if (cell_->value()->IsJSFunction()) { Handle candidate(JSFunction::cast(cell_->value())); diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc index dd1537a25a..a973974550 100644 --- a/deps/v8/src/bignum.cc +++ b/deps/v8/src/bignum.cc @@ -67,7 +67,7 @@ void Bignum::AssignUInt64(uint64_t value) { int needed_bigits = kUInt64Size / kBigitSize + 1; EnsureCapacity(needed_bigits); for (int i = 0; i < needed_bigits; ++i) { - bigits_[i] = value & kBigitMask; + bigits_[i] = static_cast(value & kBigitMask); value = value >> kBigitSize; } used_digits_ = needed_bigits; @@ -266,7 +266,7 @@ void Bignum::MultiplyByUInt32(uint32_t factor) { } while (carry != 0) { EnsureCapacity(used_digits_ + 1); - bigits_[used_digits_] = carry & kBigitMask; + bigits_[used_digits_] = static_cast(carry & kBigitMask); used_digits_++; carry >>= kBigitSize; } @@ -287,13 +287,13 @@ void Bignum::MultiplyByUInt64(uint64_t factor) { uint64_t product_low = low * bigits_[i]; uint64_t product_high = high * bigits_[i]; uint64_t tmp = (carry & kBigitMask) + product_low; - bigits_[i] = tmp & kBigitMask; + bigits_[i] = static_cast(tmp & kBigitMask); carry = (carry >> kBigitSize) + (tmp >> kBigitSize) + (product_high << (32 - kBigitSize)); } while (carry != 0) { EnsureCapacity(used_digits_ + 1); - bigits_[used_digits_] = carry & kBigitMask; + bigits_[used_digits_] = static_cast(carry & kBigitMask); used_digits_++; carry >>= kBigitSize; } @@ -748,7 +748,8 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) { for (int i = 0; i < other.used_digits_; ++i) { DoubleChunk product = static_cast(factor) * other.bigits_[i]; DoubleChunk remove = borrow + product; - Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask); + Chunk difference = + bigits_[i + exponent_diff] - static_cast(remove & kBigitMask); bigits_[i + exponent_diff] = difference & kBigitMask; borrow = static_cast((difference >> (kChunkSize - 1)) + (remove >> kBigitSize)); diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 6db9a4819a..415d2dd8cb 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -349,7 +349,7 @@ static Handle InstallFunction(Handle target, prototype, call_code, is_ecma_native); - SetProperty(target, symbol, function, DONT_ENUM); + SetLocalPropertyNoThrow(target, symbol, function, DONT_ENUM); if (is_ecma_native) { function->shared()->set_instance_class_name(*symbol); } @@ -580,8 +580,8 @@ Handle Genesis::CreateNewGlobals( Handle prototype = Handle( JSObject::cast(js_global_function->instance_prototype())); - SetProperty(prototype, Factory::constructor_symbol(), - Top::object_function(), NONE); + SetLocalPropertyNoThrow( + prototype, Factory::constructor_symbol(), Top::object_function(), NONE); } else { Handle js_global_constructor( FunctionTemplateInfo::cast(js_global_template->constructor())); @@ -683,7 +683,8 @@ void Genesis::InitializeGlobal(Handle inner_global, global_context()->set_security_token(*inner_global); Handle object_name = Handle(Heap::Object_symbol()); - SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM); + SetLocalPropertyNoThrow(inner_global, object_name, + Top::object_function(), DONT_ENUM); Handle global = Handle(global_context()->global()); @@ -851,7 +852,7 @@ void Genesis::InitializeGlobal(Handle inner_global, cons->SetInstanceClassName(*name); Handle json_object = Factory::NewJSObject(cons, TENURED); ASSERT(json_object->IsJSObject()); - SetProperty(global, name, json_object, DONT_ENUM); + SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM); global_context()->set_json_object(*json_object); } @@ -880,12 +881,12 @@ void Genesis::InitializeGlobal(Handle inner_global, global_context()->set_arguments_boilerplate(*result); // Note: callee must be added as the first property and // length must be added as the second property. - SetProperty(result, Factory::callee_symbol(), - Factory::undefined_value(), - DONT_ENUM); - SetProperty(result, Factory::length_symbol(), - Factory::undefined_value(), - DONT_ENUM); + SetLocalPropertyNoThrow(result, Factory::callee_symbol(), + Factory::undefined_value(), + DONT_ENUM); + SetLocalPropertyNoThrow(result, Factory::length_symbol(), + Factory::undefined_value(), + DONT_ENUM); #ifdef DEBUG LookupResult lookup; @@ -1085,10 +1086,8 @@ bool Genesis::InstallNatives() { static const PropertyAttributes attributes = static_cast(READ_ONLY | DONT_DELETE); Handle global_symbol = Factory::LookupAsciiSymbol("global"); - SetProperty(builtins, - global_symbol, - Handle(global_context()->global()), - attributes); + Handle global_obj(global_context()->global()); + SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes); // Setup the reference from the global object to the builtins object. JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins); @@ -1480,17 +1479,17 @@ void Genesis::InstallSpecialObjects(Handle global_context) { if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) { Handle natives_string = Factory::LookupAsciiSymbol(FLAG_expose_natives_as); - SetProperty(js_global, natives_string, - Handle(js_global->builtins()), DONT_ENUM); + SetLocalPropertyNoThrow(js_global, natives_string, + Handle(js_global->builtins()), DONT_ENUM); } Handle Error = GetProperty(js_global, "Error"); if (Error->IsJSObject()) { Handle name = Factory::LookupAsciiSymbol("stackTraceLimit"); - SetProperty(Handle::cast(Error), - name, - Handle(Smi::FromInt(FLAG_stack_trace_limit)), - NONE); + SetLocalPropertyNoThrow(Handle::cast(Error), + name, + Handle(Smi::FromInt(FLAG_stack_trace_limit)), + NONE); } #ifdef ENABLE_DEBUGGER_SUPPORT @@ -1507,8 +1506,8 @@ void Genesis::InstallSpecialObjects(Handle global_context) { Handle debug_string = Factory::LookupAsciiSymbol(FLAG_expose_debug_as); - SetProperty(js_global, debug_string, - Handle(Debug::debug_context()->global_proxy()), DONT_ENUM); + Handle global_proxy(Debug::debug_context()->global_proxy()); + SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM); } #endif } @@ -1679,7 +1678,7 @@ void Genesis::TransferNamedProperties(Handle from, Handle key = Handle(descs->GetKey(i)); int index = descs->GetFieldIndex(i); Handle value = Handle(from->FastPropertyAt(index)); - SetProperty(to, key, value, details.attributes()); + SetLocalPropertyNoThrow(to, key, value, details.attributes()); break; } case CONSTANT_FUNCTION: { @@ -1687,7 +1686,7 @@ void Genesis::TransferNamedProperties(Handle from, Handle key = Handle(descs->GetKey(i)); Handle fun = Handle(descs->GetConstantFunction(i)); - SetProperty(to, key, fun, details.attributes()); + SetLocalPropertyNoThrow(to, key, fun, details.attributes()); break; } case CALLBACKS: { @@ -1737,7 +1736,7 @@ void Genesis::TransferNamedProperties(Handle from, value = Handle(JSGlobalPropertyCell::cast(*value)->value()); } PropertyDetails details = properties->DetailsAt(i); - SetProperty(to, key, value, details.attributes()); + SetLocalPropertyNoThrow(to, key, value, details.attributes()); } } } diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index d604226d75..8fdc1b1382 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -368,7 +368,9 @@ static bool ArrayPrototypeHasNoElements(Context* global_context, array_proto = JSObject::cast(array_proto->GetPrototype()); ASSERT(array_proto->elements() == Heap::empty_fixed_array()); // Object.prototype - array_proto = JSObject::cast(array_proto->GetPrototype()); + Object* proto = array_proto->GetPrototype(); + if (proto == Heap::null_value()) return false; + array_proto = JSObject::cast(proto); if (array_proto != global_context->initial_object_prototype()) return false; if (array_proto->elements() != Heap::empty_fixed_array()) return false; ASSERT(array_proto->GetPrototype()->IsNull()); @@ -1305,6 +1307,11 @@ static void Generate_StoreIC_Initialize(MacroAssembler* masm) { } +static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) { + StoreIC::GenerateInitialize(masm); +} + + static void Generate_StoreIC_Miss(MacroAssembler* masm) { StoreIC::GenerateMiss(masm); } @@ -1315,8 +1322,18 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) { } +static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) { + StoreIC::GenerateNormal(masm); +} + + static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) { - StoreIC::GenerateMegamorphic(masm); + StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict); +} + + +static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) { + StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict); } @@ -1325,11 +1342,21 @@ static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) { } +static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) { + StoreIC::GenerateArrayLength(masm); +} + + static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) { StoreIC::GenerateGlobalProxy(masm); } +static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) { + StoreIC::GenerateGlobalProxy(masm); +} + + static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { KeyedStoreIC::GenerateGeneric(masm); } @@ -1442,13 +1469,13 @@ void Builtins::Setup(bool create_heap_objects) { extra_args \ }, -#define DEF_FUNCTION_PTR_A(name, kind, state) \ - { FUNCTION_ADDR(Generate_##name), \ - NULL, \ - #name, \ - name, \ - Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state), \ - NO_EXTRA_ARGUMENTS \ +#define DEF_FUNCTION_PTR_A(name, kind, state, extra) \ + { FUNCTION_ADDR(Generate_##name), \ + NULL, \ + #name, \ + name, \ + Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state, extra), \ + NO_EXTRA_ARGUMENTS \ }, // Define array of pointers to generators and C builtin functions. diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 88d31c7612..2733410ea9 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -63,73 +63,135 @@ enum BuiltinExtraArguments { // Define list of builtins implemented in assembly. #define BUILTIN_LIST_A(V) \ - V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \ - V(JSConstructCall, BUILTIN, UNINITIALIZED) \ - V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED) \ - V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \ - V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \ - V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \ - V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \ - V(LazyCompile, BUILTIN, UNINITIALIZED) \ - V(LazyRecompile, BUILTIN, UNINITIALIZED) \ - V(NotifyDeoptimized, BUILTIN, UNINITIALIZED) \ - V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED) \ - V(NotifyOSR, BUILTIN, UNINITIALIZED) \ + V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructCall, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LazyCompile, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LazyRecompile, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(NotifyOSR, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ \ - V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \ - V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \ - V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \ - V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \ + V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ \ - V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \ - V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \ - V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \ - V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \ - V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \ - V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC) \ - V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \ - V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \ + V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ \ - V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \ - V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \ - V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \ - V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \ + V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ \ - V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \ - V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \ - V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \ - V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \ - V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC) \ + V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ + V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ + StoreIC::kStoreICStrict) \ + V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ + StoreIC::kStoreICStrict) \ + V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ + StoreIC::kStoreICStrict) \ + V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ + StoreIC::kStoreICStrict) \ + V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ + StoreIC::kStoreICStrict) \ \ - V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ - V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \ + V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ + Code::kNoExtraICState) \ \ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \ - V(FunctionCall, BUILTIN, UNINITIALIZED) \ - V(FunctionApply, BUILTIN, UNINITIALIZED) \ + V(FunctionCall, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(FunctionApply, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ \ - V(ArrayCode, BUILTIN, UNINITIALIZED) \ - V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \ + V(ArrayCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ \ - V(StringConstructCode, BUILTIN, UNINITIALIZED) \ + V(StringConstructCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ \ - V(OnStackReplacement, BUILTIN, UNINITIALIZED) + V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) #ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. #define BUILTIN_LIST_DEBUG_A(V) \ - V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \ - V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \ - V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \ - V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \ - V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \ - V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \ - V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \ - V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK) \ - V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \ - V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK) + V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \ + Code::kNoExtraICState) \ + V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \ + Code::kNoExtraICState) #else #define BUILTIN_LIST_DEBUG_A(V) #endif @@ -152,7 +214,7 @@ enum BuiltinExtraArguments { V(SHL, 1) \ V(SAR, 1) \ V(SHR, 1) \ - V(DELETE, 1) \ + V(DELETE, 2) \ V(IN, 1) \ V(INSTANCE_OF, 1) \ V(GET_KEYS, 0) \ @@ -186,7 +248,7 @@ class Builtins : public AllStatic { enum Name { #define DEF_ENUM_C(name, ignore) name, -#define DEF_ENUM_A(name, kind, state) name, +#define DEF_ENUM_A(name, kind, state, extra) name, BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A) BUILTIN_LIST_DEBUG_A(DEF_ENUM_A) diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 69f8477f89..ba77b21c60 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -32,7 +32,6 @@ #include "factory.h" #include "gdb-jit.h" #include "macro-assembler.h" -#include "oprofile-agent.h" namespace v8 { namespace internal { @@ -63,9 +62,6 @@ void CodeStub::GenerateCode(MacroAssembler* masm) { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { code->set_major_key(MajorKey()); - OPROFILE(CreateNativeCodeRegion(GetName(), - code->instruction_start(), - code->instruction_size())); PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName())); GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code)); Counters::total_stubs_code_size.Increment(code->instruction_size()); diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 0d0e37ffac..96ac7335cf 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -86,9 +86,6 @@ namespace internal { CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ARM(V) -// Types of uncatchable exceptions. -enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; - // Mode to overwrite BinaryExpression values. enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE }; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index f9a2453a09..e6fcecde7b 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -31,7 +31,6 @@ #include "codegen-inl.h" #include "compiler.h" #include "debug.h" -#include "oprofile-agent.h" #include "prettyprinter.h" #include "register-allocator-inl.h" #include "rewriter.h" diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 77111a842e..ae7b2b9f98 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -39,7 +39,6 @@ #include "hydrogen.h" #include "lithium.h" #include "liveedit.h" -#include "oprofile-agent.h" #include "parser.h" #include "rewriter.h" #include "runtime-profiler.h" @@ -289,6 +288,11 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { HGraphBuilder builder(&oracle); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(info); + if (Top::has_pending_exception()) { + info->SetCode(Handle::null()); + return false; + } + if (graph != NULL && FLAG_build_lithium) { Handle code = graph->Compile(); if (!code.is_null()) { @@ -419,9 +423,6 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), String::cast(script->name()))); - OPROFILE(CreateNativeCodeRegion(String::cast(script->name()), - info->code()->instruction_start(), - info->code()->instruction_size())); GDBJIT(AddCode(Handle(String::cast(script->name())), script, info->code())); @@ -432,9 +433,6 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), "")); - OPROFILE(CreateNativeCodeRegion(info->is_eval() ? "Eval" : "Script", - info->code()->instruction_start(), - info->code()->instruction_size())); GDBJIT(AddCode(Handle(), script, info->code())); } @@ -608,7 +606,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) { // Compile the code. if (!MakeCode(info)) { - Top::StackOverflow(); + if (!Top::has_pending_exception()) { + Top::StackOverflow(); + } } else { ASSERT(!info->code().is_null()); Handle code = info->code(); @@ -783,7 +783,6 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, // script name and line number. Check explicitly whether logging is // enabled as finding the line number is not free. if (Logger::is_logging() || - OProfileAgent::is_enabled() || CpuProfiler::is_profiling()) { Handle