summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorRyan <ry@tinyclouds.org>2009-06-29 10:55:05 +0200
committerRyan <ry@tinyclouds.org>2009-06-29 10:55:05 +0200
commite763efdadf4bbd9c0155a4c7f782d271a2fd5814 (patch)
tree094824e55548cdeb2ce9b796cbf29aaa591bb69f /deps/v8
parente876d6629e8682f5d818141bc0710f6d82311373 (diff)
downloadandroid-node-v8-e763efdadf4bbd9c0155a4c7f782d271a2fd5814.tar.gz
android-node-v8-e763efdadf4bbd9c0155a4c7f782d271a2fd5814.tar.bz2
android-node-v8-e763efdadf4bbd9c0155a4c7f782d271a2fd5814.zip
Upgrade v8 to 1.2.10 and libev to 3.6
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/ChangeLog31
-rw-r--r--deps/v8/SConstruct8
-rw-r--r--deps/v8/benchmarks/revisions.html2
-rwxr-xr-x[-rw-r--r--]deps/v8/benchmarks/run.html37
-rwxr-xr-x[-rw-r--r--]deps/v8/benchmarks/style.css38
-rw-r--r--deps/v8/include/v8.h6
-rw-r--r--deps/v8/src/accessors.cc5
-rw-r--r--deps/v8/src/api.cc20
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/arm/builtins-arm.cc28
-rw-r--r--deps/v8/src/arm/codegen-arm.cc134
-rw-r--r--deps/v8/src/arm/codegen-arm.h53
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h4
-rw-r--r--deps/v8/src/assembler.h10
-rw-r--r--deps/v8/src/ast.cc2
-rw-r--r--deps/v8/src/ast.h16
-rw-r--r--deps/v8/src/bootstrapper.cc5
-rw-r--r--deps/v8/src/builtins.h2
-rw-r--r--deps/v8/src/codegen.cc125
-rw-r--r--deps/v8/src/codegen.h6
-rw-r--r--deps/v8/src/compilation-cache.cc360
-rw-r--r--deps/v8/src/compilation-cache.h17
-rw-r--r--deps/v8/src/compiler.cc16
-rw-r--r--deps/v8/src/contexts.cc2
-rw-r--r--deps/v8/src/conversions.cc2
-rw-r--r--deps/v8/src/date-delay.js5
-rw-r--r--deps/v8/src/debug-delay.js16
-rw-r--r--deps/v8/src/dtoa-config.c5
-rw-r--r--deps/v8/src/factory.cc2
-rw-r--r--deps/v8/src/frame-element.h49
-rw-r--r--deps/v8/src/globals.h2
-rw-r--r--deps/v8/src/heap-inl.h22
-rw-r--r--deps/v8/src/heap.cc221
-rw-r--r--deps/v8/src/heap.h117
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h2
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc15
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h16
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc33
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc625
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h78
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc3
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc20
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h70
-rw-r--r--deps/v8/src/ic.cc22
-rw-r--r--deps/v8/src/ic.h2
-rw-r--r--deps/v8/src/jsregexp.cc65
-rw-r--r--deps/v8/src/jump-target.cc26
-rw-r--r--deps/v8/src/log-inl.h126
-rw-r--r--deps/v8/src/log-utils.cc18
-rw-r--r--deps/v8/src/log.cc231
-rw-r--r--deps/v8/src/log.h26
-rw-r--r--deps/v8/src/mark-compact.cc17
-rw-r--r--deps/v8/src/messages.js28
-rw-r--r--deps/v8/src/mirror-delay.js26
-rw-r--r--deps/v8/src/objects-inl.h35
-rw-r--r--deps/v8/src/objects.cc112
-rw-r--r--deps/v8/src/objects.h39
-rw-r--r--deps/v8/src/oprofile-agent.cc4
-rw-r--r--deps/v8/src/parser.cc16
-rw-r--r--deps/v8/src/platform-linux.cc27
-rw-r--r--deps/v8/src/platform-macos.cc23
-rw-r--r--deps/v8/src/platform.h23
-rw-r--r--deps/v8/src/register-allocator.cc13
-rw-r--r--deps/v8/src/register-allocator.h94
-rw-r--r--deps/v8/src/rewriter.cc5
-rw-r--r--deps/v8/src/runtime.cc190
-rw-r--r--deps/v8/src/runtime.js5
-rw-r--r--deps/v8/src/scopeinfo.cc86
-rw-r--r--deps/v8/src/scopeinfo.h68
-rw-r--r--deps/v8/src/scopes.cc23
-rw-r--r--deps/v8/src/scopes.h2
-rw-r--r--deps/v8/src/serialize.cc18
-rw-r--r--deps/v8/src/spaces.h1
-rw-r--r--deps/v8/src/string.js48
-rw-r--r--deps/v8/src/top.cc5
-rw-r--r--deps/v8/src/top.h1
-rw-r--r--deps/v8/src/v8.h1
-rw-r--r--deps/v8/src/variables.cc4
-rw-r--r--deps/v8/src/variables.h13
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/virtual-frame.cc18
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h60
-rw-r--r--deps/v8/src/x64/assembler-x64.cc834
-rw-r--r--deps/v8/src/x64/assembler-x64.h192
-rw-r--r--deps/v8/src/x64/builtins-x64.cc440
-rw-r--r--deps/v8/src/x64/codegen-x64-inl.h7
-rw-r--r--deps/v8/src/x64/codegen-x64.cc6269
-rw-r--r--deps/v8/src/x64/codegen-x64.h14
-rw-r--r--deps/v8/src/x64/frames-x64.cc79
-rw-r--r--deps/v8/src/x64/frames-x64.h10
-rw-r--r--deps/v8/src/x64/ic-x64.cc156
-rw-r--r--deps/v8/src/x64/jump-target-x64.cc341
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc190
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h20
-rw-r--r--deps/v8/src/x64/register-allocator-x64.cc30
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc127
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc898
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h17
-rw-r--r--deps/v8/test/cctest/cctest.status4
-rw-r--r--deps/v8/test/cctest/test-debug.cc37
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc14
-rw-r--r--deps/v8/test/cctest/test-heap.cc4
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc7
-rw-r--r--deps/v8/test/message/overwritten-builtins.js31
-rw-r--r--deps/v8/test/message/overwritten-builtins.out30
-rw-r--r--deps/v8/test/mjsunit/arguments-apply.js134
-rw-r--r--deps/v8/test/mjsunit/arguments-lazy.js47
-rw-r--r--deps/v8/test/mjsunit/date-parse.js2
-rw-r--r--deps/v8/test/mjsunit/debug-sourceinfo.js12
-rw-r--r--deps/v8/test/mjsunit/regexp-captures.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1919169.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-386.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-392.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6-9-regexp.js30
-rw-r--r--deps/v8/test/mjsunit/toint32.js16
-rw-r--r--deps/v8/test/mjsunit/tools/logreader.js82
-rw-r--r--deps/v8/test/mozilla/mozilla.status34
-rw-r--r--deps/v8/tools/codemap.js4
-rw-r--r--deps/v8/tools/gyp/v8.gyp1
-rw-r--r--deps/v8/tools/linux-tick-processor12
-rw-r--r--deps/v8/tools/logreader.js317
-rw-r--r--deps/v8/tools/oprofile/annotate7
-rw-r--r--deps/v8/tools/oprofile/common19
-rw-r--r--deps/v8/tools/oprofile/dump7
-rw-r--r--deps/v8/tools/oprofile/report7
-rw-r--r--deps/v8/tools/oprofile/reset7
-rw-r--r--deps/v8/tools/oprofile/run14
-rw-r--r--deps/v8/tools/oprofile/shutdown7
-rw-r--r--deps/v8/tools/oprofile/start7
-rwxr-xr-xdeps/v8/tools/test.py4
-rw-r--r--deps/v8/tools/tickprocessor.js185
-rwxr-xr-xdeps/v8/tools/v8.xcodeproj/project.pbxproj2
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj4
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj4
-rw-r--r--deps/v8/tools/windows-tick-processor.bat2
136 files changed, 12452 insertions, 2176 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 9b198d077a..bfe58a2c37 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -6,6 +6,7 @@
Google Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org>
+Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 41b3234581..13061120d3 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,34 @@
+2009-06-29: Version 1.2.10
+
+ Improved debugger support.
+
+ Fixed bug in exception message reporting (issue 390).
+
+ Improved overall performance.
+
+
+2009-06-23: Version 1.2.9
+
+ Improved math performance on ARM.
+
+ Fixed profiler name-inference bug.
+
+ Fixed handling of shared libraries in the profiler tick processor
+ scripts.
+
+ Fixed handling of tests that time out in the test scripts.
+
+ Fixed compilation on MacOS X version 10.4.
+
+ Fixed two bugs in the regular expression engine.
+
+ Fixed a bug in the string type inference.
+
+ Fixed a bug in the handling of 'constant function' properties.
+
+ Improved overall performance.
+
+
2009-06-16: Version 1.2.8
Optimized math on ARM platforms.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 3b14eea271..0baf71b5d3 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -125,7 +125,7 @@ LIBRARY_FLAGS = {
}
},
'os:macos': {
- 'CCFLAGS': ['-ansi'],
+ 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
@@ -641,7 +641,7 @@ def GetVersionComponents():
def GetVersion():
version_components = GetVersionComponents()
-
+
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
@@ -649,10 +649,10 @@ def GetVersion():
def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
-
+
source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source)
-
+
if match:
return match.group(1).strip()
else:
diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html
index 458f8db697..b86c876fe4 100644
--- a/deps/v8/benchmarks/revisions.html
+++ b/deps/v8/benchmarks/revisions.html
@@ -1,7 +1,7 @@
<html>
<head>
<title>V8 Benchmark Suite Revisions</title>
-<link type="text/css" rel="stylesheet" href="style.css"></link>
+<link type="text/css" rel="stylesheet" href="style.css" />
</head>
<body>
<div>
diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html
index 6adb6d27a0..050764e013 100644..100755
--- a/deps/v8/benchmarks/run.html
+++ b/deps/v8/benchmarks/run.html
@@ -1,5 +1,10 @@
-<html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html lang="en">
<head>
+<meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<meta http-equiv="Content-Script-Type" content="text/javascript">
+<meta http-equiv="Content-Style-Type" content="text/css">
<title>V8 Benchmark Suite</title>
<script type="text/javascript" src="base.js"></script>
<script type="text/javascript" src="richards.js"></script>
@@ -9,7 +14,7 @@
<script type="text/javascript" src="earley-boyer.js"></script>
<script type="text/javascript" src="regexp.js"></script>
<script type="text/javascript" src="splay.js"></script>
-<link type="text/css" rel="stylesheet" href="style.css"></link>
+<link type="text/css" rel="stylesheet" href="style.css" />
<script type="text/javascript">
var completed = 0;
var benchmarks = BenchmarkSuite.CountBenchmarks();
@@ -25,12 +30,12 @@ function ShowProgress(name) {
function AddResult(name, result) {
var text = name + ': ' + result;
var results = document.getElementById("results");
- results.innerHTML += (text + "<br/>");
+ results.innerHTML += (text + "<br>");
}
function AddError(name, error) {
- AddResult(name, '<b>error</b>');
+ AddResult(name, '<b>error<\/b>');
success = false;
}
@@ -53,11 +58,11 @@ function Run() {
function Load() {
var version = BenchmarkSuite.version;
document.getElementById("version").innerHTML = version;
- window.setTimeout(Run, 200);
+ setTimeout(Run, 200);
}
</script>
</head>
-<body onLoad="Load()">
+<body onload="Load()">
<div>
<div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
<table>
@@ -71,15 +76,15 @@ the individual benchmarks and of a reference system (score
higher scores means better performance: <em>Bigger is better!</em>
<ul>
-<li><b>Richards</b><br/>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
-<li><b>DeltaBlue</b><br/>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
-<li><b>Crypto</b><br/>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
-<li><b>RayTrace</b><br/>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
-<li><b>EarleyBoyer</b><br/>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
-<li><b>RegExp</b><br/>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
+<li><b>Richards</b><br>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
+<li><b>DeltaBlue</b><br>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
+<li><b>Crypto</b><br>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
+<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
+<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
+<li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
(<i>1614 lines</i>).
</li>
-<li><b>Splay</b><br/>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
+<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
</ul>
<p>
@@ -92,9 +97,9 @@ the <a href="http://v8.googlecode.com/svn/data/benchmarks/current/revisions.html
</td><td style="text-align: center">
<div class="run">
- <div id="status" style="text-align: center; margin-top: 50px; font-size: 120%; font-weight: bold;">Starting...</div>
- <div style="text-align: left; margin: 30px 0 0 90px;" id="results">
- <div>
+ <div id="status">Starting...</div>
+ <div id="results">
+ </div>
</div>
</td></tr></table>
diff --git a/deps/v8/benchmarks/style.css b/deps/v8/benchmarks/style.css
index d976cdd3e7..46320c1ebe 100644..100755
--- a/deps/v8/benchmarks/style.css
+++ b/deps/v8/benchmarks/style.css
@@ -1,11 +1,7 @@
-body {
- font-family: sans-serif;
-}
-
-hr{
+hr {
border: 1px solid;
border-color: #36C;
- margin: 1em 0
+ margin: 1em 0;
}
h1, h2, h3, h4 {
@@ -14,27 +10,17 @@ h1, h2, h3, h4 {
}
h1 {
- font-size: 190%;
- height: 1.2em;
-}
-
-
-h2{
- font-size: 140%;
+ font-size: 154%;
height: 1.2em;
}
-h3{
- font-size: 100%;
-}
-li{
+li {
margin: .3em 0 1em 0;
}
-body{
+body {
font-family: Helvetica,Arial,sans-serif;
- font-size: small;
color: #000;
background-color: #fff;
}
@@ -54,7 +40,7 @@ div.subtitle {
}
td.contents {
- text-align: start;
+ text-align: left;
}
div.run {
@@ -68,3 +54,15 @@ div.run {
background-repeat: no-repeat;
border: 1px solid rgb(51, 102, 204);
}
+
+#status {
+ text-align: center;
+ margin-top: 50px;
+ font-size: 120%;
+ font-weight: bold;
+}
+
+#results {
+ text-align: left;
+ margin: 30px 0 0 90px;
+}
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e7b26778a9..8f22c81b65 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1176,6 +1176,12 @@ class V8EXPORT Array : public Object {
public:
uint32_t Length() const;
+ /**
+ * Clones an element at index |index|. Returns an empty
+ * handle if cloning fails (for any reason).
+ */
+ Local<Object> CloneElementAt(uint32_t index);
+
static Local<Array> New(int length = 0);
static Array* Cast(Value* obj);
private:
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index ac6cdf95ac..82ae702fd9 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -511,7 +511,10 @@ Object* Accessors::FunctionGetArguments(Object* object, void*) {
// If there is an arguments variable in the stack, we return that.
int index = ScopeInfo<>::StackSlotIndex(frame->code(),
Heap::arguments_symbol());
- if (index >= 0) return frame->GetExpression(index);
+ if (index >= 0) {
+ Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
+ if (!arguments->IsTheHole()) return *arguments;
+ }
// If there isn't an arguments variable in the stack, we need to
// find the frame that holds the actual arguments passed to the
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 1001847904..b9e0cec8b6 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -3012,6 +3012,26 @@ uint32_t v8::Array::Length() const {
}
+Local<Object> Array::CloneElementAt(uint32_t index) {
+ ON_BAILOUT("v8::Array::CloneElementAt()", return Local<Object>());
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (!self->HasFastElements()) {
+ return Local<Object>();
+ }
+ i::FixedArray* elms = self->elements();
+ i::Object* paragon = elms->get(index);
+ if (!paragon->IsJSObject()) {
+ return Local<Object>();
+ }
+ i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ return Utils::ToLocal(result);
+}
+
+
Local<String> v8::String::NewSymbol(const char* data, int length) {
EnsureInitialized("v8::String::NewSymbol()");
LOG_API("String::NewSymbol(char)");
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 824a5fda52..4dda7ec5b5 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -50,7 +50,7 @@ Condition NegateCondition(Condition cc) {
}
-void RelocInfo::apply(int delta) {
+void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 6d23a19bb2..b5332ece4c 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -67,6 +67,24 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call);
+ // Jump to the function-specific construct stub.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // r0: number of arguments
+ // r1: called object
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing r0).
+ __ mov(r2, Operand(0));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -177,16 +195,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
-
- // r0: number of arguments
- // r1: called object
- __ bind(&non_function_call);
-
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 8c28b24347..6626619912 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1471,85 +1471,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
}
-int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
- return kFastSwitchMaxOverheadFactor;
-}
-
-int CodeGenerator::FastCaseSwitchMinCaseCount() {
- return kFastSwitchMinCaseCount;
-}
-
-
-void CodeGenerator::GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node,
- int min_index,
- int range,
- Label* default_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels) {
- VirtualFrame::SpilledScope spilled_scope;
- JumpTarget setup_default;
- JumpTarget is_smi;
-
- // A non-null default label pointer indicates a default case among
- // the case labels. Otherwise we use the break target as a
- // "default" for failure to hit the jump table.
- JumpTarget* default_target =
- (default_label == NULL) ? node->break_target() : &setup_default;
-
- ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
- frame_->EmitPop(r0);
-
- // Test for a Smi value in a HeapNumber.
- __ tst(r0, Operand(kSmiTagMask));
- is_smi.Branch(eq);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- default_target->Branch(ne);
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kNumberToSmi, 1);
- is_smi.Bind();
-
- if (min_index != 0) {
- // Small positive numbers can be immediate operands.
- if (min_index < 0) {
- // If min_index is Smi::kMinValue, -min_index is not a Smi.
- if (Smi::IsValid(-min_index)) {
- __ add(r0, r0, Operand(Smi::FromInt(-min_index)));
- } else {
- __ add(r0, r0, Operand(Smi::FromInt(-min_index - 1)));
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- }
- } else {
- __ sub(r0, r0, Operand(Smi::FromInt(min_index)));
- }
- }
- __ tst(r0, Operand(0x80000000 | kSmiTagMask));
- default_target->Branch(ne);
- __ cmp(r0, Operand(Smi::FromInt(range)));
- default_target->Branch(ge);
- VirtualFrame* start_frame = new VirtualFrame(frame_);
- __ SmiJumpTable(r0, case_targets);
-
- GenerateFastCaseSwitchCases(node, case_labels, start_frame);
-
- // If there was a default case among the case labels, we need to
- // emit code to jump to it from the default target used for failure
- // to hit the jump table.
- if (default_label != NULL) {
- if (has_valid_frame()) {
- node->break_target()->Jump();
- }
- setup_default.Bind();
- frame_->MergeTo(start_frame);
- __ b(default_label);
- DeleteFrame();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
-}
-
-
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -1560,10 +1481,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
LoadAndSpill(node->tag());
- if (TryGenerateFastCaseSwitchStatement(node)) {
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
- return;
- }
JumpTarget next_test;
JumpTarget fall_through;
@@ -4728,27 +4645,53 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Only succeeds for doubles that are in the ranges
+// number. Rounds towards 0. Fastest for doubles that are in the ranges
// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
// almost to the range of signed int32 values that are not Smis. Jumps to the
-// label if the double isn't in the range it can cope with.
+// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
+// (excluding the endpoints).
static void GetInt32(MacroAssembler* masm,
Register source,
Register dest,
Register scratch,
+ Register scratch2,
Label* slow) {
- Register scratch2 = dest;
+ Label right_exponent, done;
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
__ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ __ mov(dest, Operand(0));
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
__ cmp(scratch2, Operand(non_smi_exponent));
- // If not, then we go slow.
- __ b(ne, slow);
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+ __ b(eq, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ b(gt, slow);
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
+ // Dest already has a Smi zero.
+ __ b(lt, &done);
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ __ rsb(dest, dest, Operand(30));
+
+ __ bind(&right_exponent);
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1.
@@ -4760,12 +4703,17 @@ static void GetInt32(MacroAssembler* masm,
__ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
__ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double.
+ // Get the second half of the double. For some exponents we don't actually
+ // need this because the bits get shifted out again, but it's probably slower
+ // to test than just to do it.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits.
- __ orr(dest, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
__ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ __ bind(&done);
}
@@ -4785,7 +4733,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r1, r3, r4, &slow);
+ GetInt32(masm, r1, r3, r4, r5, &slow);
__ jmp(&done_checking_r1);
__ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1));
@@ -4795,7 +4743,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r0, r2, r4, &slow);
+ GetInt32(masm, r0, r2, r4, r5, &slow);
__ jmp(&done_checking_r0);
__ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1));
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 0df793a4ae..4fab900166 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -358,59 +358,6 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
- // Methods and constants for fast case switch statement support.
- //
- // Only allow fast-case switch if the range of labels is at most
- // this factor times the number of case labels.
- // Value is derived from comparing the size of code generated by the normal
- // switch code for Smi-labels to the size of a single pointer. If code
- // quality increases this number should be decreased to match.
- static const int kFastSwitchMaxOverheadFactor = 10;
-
- // Minimal number of switch cases required before we allow jump-table
- // optimization.
- static const int kFastSwitchMinCaseCount = 5;
-
- // The limit of the range of a fast-case switch, as a factor of the number
- // of cases of the switch. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMaxOverheadFactor();
-
- // The minimal number of cases in a switch before the fast-case switch
- // optimization is enabled. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMinCaseCount();
-
- // Allocate a jump table and create code to jump through it.
- // Should call GenerateFastCaseSwitchCases to generate the code for
- // all the cases at the appropriate point.
- void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
- int min_index,
- int range,
- Label* default_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels);
-
- // Generate the code for cases for the fast case switch.
- // Called by GenerateFastCaseSwitchJumpTable.
- void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame);
-
- // Fast support for constant-Smi switches.
- void GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index);
-
- // Fast support for constant-Smi switches. Tests whether switch statement
- // permits optimization and calls GenerateFastCaseSwitch if it does.
- // Returns true if the fast-case switch was generated, and false if not.
- bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
-
-
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index ebebd534a7..2f36f108bb 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -359,14 +359,14 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Register reg);
// Push an element on the virtual frame.
- void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg(), result->static_type());
+ Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 0abd852a16..979dd90f3e 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -183,7 +183,7 @@ class RelocInfo BASE_EMBEDDED {
intptr_t data() const { return data_; }
// Apply a relocation by delta bytes
- INLINE(void apply(int delta));
+ INLINE(void apply(intptr_t delta));
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
@@ -265,8 +265,12 @@ class RelocInfoWriter BASE_EMBEDDED {
last_pc_ = pc;
}
- // Max size (bytes) of a written RelocInfo.
- static const int kMaxSize = 12;
+ // Max size (bytes) of a written RelocInfo. Longest encoding is
+ // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
+ // On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
+ // On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
+ // Here we use the maximum of the two.
+ static const int kMaxSize = 16;
private:
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index eef8da7151..d8a323267d 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -68,7 +68,7 @@ VariableProxy::VariableProxy(Handle<String> name,
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
// at least one access, otherwise no need for a VariableProxy
- var_uses_.RecordAccess(1);
+ var_uses_.RecordRead(1);
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 80a4aa5f2e..15d762f051 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -802,13 +802,20 @@ class VariableProxy: public Expression {
Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
}
+
virtual bool IsValidLeftHandSide() {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
+
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
+ bool IsArguments() {
+ Variable* variable = AsVariable();
+ return (variable == NULL) ? false : variable->is_arguments();
+ }
+
// If this assertion fails it means that some code has tried to
// treat the special "this" variable as an ordinary variable with
// the name "this".
@@ -890,12 +897,13 @@ class Slot: public Expression {
virtual void Accept(AstVisitor* v);
// Type testing & conversion
- virtual Slot* AsSlot() { return this; }
+ virtual Slot* AsSlot() { return this; }
// Accessors
- Variable* var() const { return var_; }
- Type type() const { return type_; }
- int index() const { return index_; }
+ Variable* var() const { return var_; }
+ Type type() const { return type_; }
+ int index() const { return index_; }
+ bool is_arguments() const { return var_->is_arguments(); }
private:
Variable* var_;
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 2dbc030327..3810c6a191 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1113,11 +1113,8 @@ bool Genesis::InstallNatives() {
}
#ifdef V8_HOST_ARCH_64_BIT
- // TODO(X64): Remove these tests when code generation works and is stable.
- MacroAssembler::ConstructAndTestJSFunction();
+ // TODO(X64): Remove this test when code generation works and is stable.
CodeGenerator::TestCodeGenerator();
- // TODO(X64): Reenable remaining initialization when code generation works.
- return true;
#endif // V8_HOST_ARCH_64_BIT
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 6e0f832565..0f4a610b83 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -51,6 +51,7 @@ namespace internal {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
\
@@ -210,6 +211,7 @@ class Builtins : public AllStatic {
static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
static void Generate_JSConstructCall(MacroAssembler* masm);
+ static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index e359c348ae..ad5b1eaf7d 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -225,7 +225,7 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
- if (!Logger::IsEnabled()) return false;
+ if (!Logger::is_logging()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
@@ -472,129 +472,6 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
}
-void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index) {
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
-
- // Label pointer per number in range.
- SmartPointer<Label*> case_targets(NewArray<Label*>(range));
-
- // Label per switch case.
- SmartPointer<Label> case_labels(NewArray<Label>(length));
-
- Label* fail_label =
- default_index >= 0 ? &(case_labels[default_index]) : NULL;
-
- // Populate array of label pointers for each number in the range.
- // Initally put the failure label everywhere.
- for (int i = 0; i < range; i++) {
- case_targets[i] = fail_label;
- }
-
- // Overwrite with label of a case for the number value of that case.
- // (In reverse order, so that if the same label occurs twice, the
- // first one wins).
- for (int i = length - 1; i >= 0 ; i--) {
- CaseClause* clause = cases->at(i);
- if (!clause->is_default()) {
- Object* label_value = *(clause->label()->AsLiteral()->handle());
- int case_value = Smi::cast(label_value)->value();
- case_targets[case_value - min_index] = &(case_labels[i]);
- }
- }
-
- GenerateFastCaseSwitchJumpTable(node,
- min_index,
- range,
- fail_label,
- Vector<Label*>(*case_targets, range),
- Vector<Label>(*case_labels, length));
-}
-
-
-void CodeGenerator::GenerateFastCaseSwitchCases(
- SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame) {
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
-
- for (int i = 0; i < length; i++) {
- Comment cmnt(masm(), "[ Case clause");
-
- // We may not have a virtual frame if control flow did not fall
- // off the end of the previous case. In that case, use the start
- // frame. Otherwise, we have to merge the existing one to the
- // start frame as part of the previous case.
- if (!has_valid_frame()) {
- RegisterFile empty;
- SetFrame(new VirtualFrame(start_frame), &empty);
- } else {
- frame_->MergeTo(start_frame);
- }
- masm()->bind(&case_labels[i]);
- VisitStatements(cases->at(i)->statements());
- }
-}
-
-
-bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
- // TODO(238): Due to issue 238, fast case switches can crash on ARM
- // and possibly IA32. They are disabled for now.
- // See http://code.google.com/p/v8/issues/detail?id=238
- return false;
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
-
- if (length < FastCaseSwitchMinCaseCount()) {
- return false;
- }
-
- // Test whether fast-case should be used.
- int default_index = -1;
- int min_index = Smi::kMaxValue;
- int max_index = Smi::kMinValue;
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
- if (clause->is_default()) {
- if (default_index >= 0) {
- // There is more than one default label. Defer to the normal case
- // for error.
- return false;
- }
- default_index = i;
- } else {
- Expression* label = clause->label();
- Literal* literal = label->AsLiteral();
- if (literal == NULL) {
- return false; // fail fast case
- }
- Object* value = *(literal->handle());
- if (!value->IsSmi()) {
- return false;
- }
- int int_value = Smi::cast(value)->value();
- min_index = Min(int_value, min_index);
- max_index = Max(int_value, max_index);
- }
- }
-
- // All labels are known to be Smis.
- int range = max_index - min_index + 1; // |min..max| inclusive
- if (range / FastCaseSwitchMaxOverheadFactor() > length) {
- return false; // range of labels is too sparse
- }
-
- // Optimization accepted, generate code.
- GenerateFastCaseSwitchStatement(node, min_index, range, default_index);
- return true;
-}
-
-
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) {
int pos = fun->start_position();
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 0b42935f5b..fa414d4128 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -61,12 +61,6 @@
// FindInlineRuntimeLUT
// CheckForInlineRuntimeCall
// PatchInlineRuntimeEntry
-// GenerateFastCaseSwitchStatement
-// GenerateFastCaseSwitchCases
-// TryGenerateFastCaseSwitchStatement
-// GenerateFastCaseSwitchJumpTable
-// FastCaseSwitchMinCaseCount
-// FastCaseSwitchMaxOverheadFactor
// CodeForFunctionPosition
// CodeForReturnPosition
// CodeForStatementPosition
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 421b6766fe..fd706af88d 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -32,28 +32,123 @@
namespace v8 {
namespace internal {
-enum {
- // The number of script generations tell how many GCs a script can
- // survive in the compilation cache, before it will be flushed if it
- // hasn't been used.
- NUMBER_OF_SCRIPT_GENERATIONS = 5,
-
- // The compilation cache consists of tables - one for each entry
- // kind plus extras for the script generations.
- NUMBER_OF_TABLE_ENTRIES =
- CompilationCache::LAST_ENTRY + NUMBER_OF_SCRIPT_GENERATIONS
+
+// The number of sub caches covering the different types to cache.
+static const int kSubCacheCount = 4;
+
+// The number of generations for each sub cache.
+static const int kScriptGenerations = 5;
+static const int kEvalGlobalGenerations = 2;
+static const int kEvalContextualGenerations = 2;
+static const int kRegExpGenerations = 2;
+
+// Initial of each compilation cache table allocated.
+static const int kInitialCacheSize = 64;
+
+// The compilation cache consists of several generational sub-caches which uses
+// this class as a base class. A sub-cache contains a compilation cache tables
+// for each generation of the sub-cache. As the same source code string has
+// different compiled code for scripts and evals. Internally, we use separate
+// sub-caches to avoid getting the wrong kind of result when looking up.
+class CompilationSubCache {
+ public:
+ explicit CompilationSubCache(int generations): generations_(generations) {
+ tables_ = NewArray<Object*>(generations);
+ }
+
+ // Get the compilation cache tables for a specific generation.
+ Handle<CompilationCacheTable> GetTable(int generation);
+
+ // Age the sub-cache by evicting the oldest generation and creating a new
+ // young generation.
+ void Age();
+
+ // GC support.
+ void Iterate(ObjectVisitor* v);
+
+ // Clear this sub-cache evicting all its content.
+ void Clear();
+
+ // Number of generations in this sub-cache.
+ inline int generations() { return generations_; }
+
+ private:
+ int generations_; // Number of generations.
+ Object** tables_; // Compilation cache tables - one for each generation.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
+// Sub-cache for scripts.
+class CompilationCacheScript : public CompilationSubCache {
+ public:
+ explicit CompilationCacheScript(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<JSFunction> Lookup(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+ void Put(Handle<String> source, Handle<JSFunction> boilerplate);
+
+ private:
+ bool HasOrigin(Handle<JSFunction> boilerplate,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
+};
+
+
+// Sub-cache for eval scripts.
+class CompilationCacheEval: public CompilationSubCache {
+ public:
+ explicit CompilationCacheEval(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context);
+
+ void Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<JSFunction> boilerplate);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
+};
+
+
+// Sub-cache for regular expressions.
+class CompilationCacheRegExp: public CompilationSubCache {
+ public:
+ explicit CompilationCacheRegExp(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
+
+ void Put(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
+};
+
+
+// Statically allocate all the sub-caches.
+static CompilationCacheScript script(kScriptGenerations);
+static CompilationCacheEval eval_global(kEvalGlobalGenerations);
+static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
+static CompilationCacheRegExp reg_exp(kRegExpGenerations);
+static CompilationSubCache* subcaches[kSubCacheCount] =
+ {&script, &eval_global, &eval_contextual, &reg_exp};
+
+
// Current enable state of the compilation cache.
static bool enabled = true;
static inline bool IsEnabled() {
return FLAG_compilation_cache && enabled;
}
-// Keep separate tables for the different entry kinds.
-static Object* tables[NUMBER_OF_TABLE_ENTRIES] = { 0, };
-
static Handle<CompilationCacheTable> AllocateTable(int size) {
CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
@@ -61,54 +156,40 @@ static Handle<CompilationCacheTable> AllocateTable(int size) {
}
-static Handle<CompilationCacheTable> GetTable(int index) {
- ASSERT(index >= 0 && index < NUMBER_OF_TABLE_ENTRIES);
+Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
+ ASSERT(generation < generations_);
Handle<CompilationCacheTable> result;
- if (tables[index]->IsUndefined()) {
- static const int kInitialCacheSize = 64;
+ if (tables_[generation]->IsUndefined()) {
result = AllocateTable(kInitialCacheSize);
- tables[index] = *result;
+ tables_[generation] = *result;
} else {
- CompilationCacheTable* table = CompilationCacheTable::cast(tables[index]);
+ CompilationCacheTable* table =
+ CompilationCacheTable::cast(tables_[generation]);
result = Handle<CompilationCacheTable>(table);
}
return result;
}
-static Handle<JSFunction> Lookup(Handle<String> source,
- Handle<Context> context,
- CompilationCache::Entry entry) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result;
- { HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(entry);
- result = table->LookupEval(*source, *context);
- }
- if (result->IsJSFunction()) {
- return Handle<JSFunction>(JSFunction::cast(result));
- } else {
- return Handle<JSFunction>::null();
+void CompilationSubCache::Age() {
+ // Age the generations implicitly killing off the oldest.
+ for (int i = generations_ - 1; i > 0; i--) {
+ tables_[i] = tables_[i - 1];
}
+
+ // Set the first generation as unborn.
+ tables_[0] = Heap::undefined_value();
}
-static Handle<FixedArray> Lookup(Handle<String> source,
- JSRegExp::Flags flags) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result;
- { HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(CompilationCache::REGEXP);
- result = table->LookupRegExp(*source, flags);
- }
- if (result->IsFixedArray()) {
- return Handle<FixedArray>(FixedArray::cast(result));
- } else {
- return Handle<FixedArray>::null();
+void CompilationSubCache::Iterate(ObjectVisitor* v) {
+ v->VisitPointers(&tables_[0], &tables_[generations_]);
+}
+
+
+void CompilationSubCache::Clear() {
+ for (int i = 0; i < generations_; i++) {
+ tables_[i] = Heap::undefined_value();
}
}
@@ -116,10 +197,10 @@ static Handle<FixedArray> Lookup(Handle<String> source,
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
-static bool HasOrigin(Handle<JSFunction> boilerplate,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
+bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset) {
Handle<Script> script =
Handle<Script>(Script::cast(boilerplate->shared()->script()));
// If the script name isn't set, the boilerplate script should have
@@ -141,24 +222,17 @@ static bool HasOrigin(Handle<JSFunction> boilerplate,
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
+Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset) {
- if (!IsEnabled()) {
- return Handle<JSFunction>::null();
- }
-
- // Use an int for the generation index, so value range propagation
- // in gcc 4.3+ won't assume it can only go up to LAST_ENTRY when in
- // fact it can go up to SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS.
- int generation = SCRIPT;
Object* result = NULL;
+ int generation;
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
{ HandleScope scope;
- while (generation < SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS) {
+ for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source));
if (probe->IsJSFunction()) {
@@ -170,20 +244,18 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
break;
}
}
- // Go to the next generation.
- generation++;
}
}
static void* script_histogram = StatsTable::CreateHistogram(
"V8.ScriptCache",
0,
- NUMBER_OF_SCRIPT_GENERATIONS,
- NUMBER_OF_SCRIPT_GENERATIONS + 1);
+ kScriptGenerations,
+ kScriptGenerations + 1);
if (script_histogram != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
- StatsTable::AddHistogramSample(script_histogram, generation - SCRIPT);
+ StatsTable::AddHistogramSample(script_histogram, generation);
}
// Once outside the manacles of the handle scope, we need to recheck
@@ -194,7 +266,7 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
- if (generation != SCRIPT) PutScript(source, boilerplate);
+ if (generation != 0) Put(source, boilerplate);
Counters::compilation_cache_hits.Increment();
return boilerplate;
} else {
@@ -204,19 +276,118 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
}
+void CompilationCacheScript::Put(Handle<String> source,
+ Handle<JSFunction> boilerplate) {
+ HandleScope scope;
+ ASSERT(boilerplate->IsBoilerplate());
+ Handle<CompilationCacheTable> table = GetTable(0);
+ CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+}
+
+
+Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
+ Handle<Context> context) {
+ // Make sure not to leak the table into the surrounding handle
+ // scope. Otherwise, we risk keeping old tables around even after
+ // having cleared the cache.
+ Object* result = NULL;
+ int generation;
+ { HandleScope scope;
+ for (generation = 0; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupEval(*source, *context);
+ if (result->IsJSFunction()) {
+ break;
+ }
+ }
+ }
+ if (result->IsJSFunction()) {
+ Handle<JSFunction> boilerplate(JSFunction::cast(result));
+ if (generation != 0) {
+ Put(source, context, boilerplate);
+ }
+ Counters::compilation_cache_hits.Increment();
+ return boilerplate;
+ } else {
+ Counters::compilation_cache_misses.Increment();
+ return Handle<JSFunction>::null();
+ }
+}
+
+
+void CompilationCacheEval::Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<JSFunction> boilerplate) {
+ HandleScope scope;
+ ASSERT(boilerplate->IsBoilerplate());
+ Handle<CompilationCacheTable> table = GetTable(0);
+ CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+}
+
+
+Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
+ JSRegExp::Flags flags) {
+ // Make sure not to leak the table into the surrounding handle
+ // scope. Otherwise, we risk keeping old tables around even after
+ // having cleared the cache.
+ Object* result = NULL;
+ int generation;
+ { HandleScope scope;
+ for (generation = 0; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupRegExp(*source, flags);
+ if (result->IsFixedArray()) {
+ break;
+ }
+ }
+ }
+ if (result->IsFixedArray()) {
+ Handle<FixedArray> data(FixedArray::cast(result));
+ if (generation != 0) {
+ Put(source, flags, data);
+ }
+ Counters::compilation_cache_hits.Increment();
+ return data;
+ } else {
+ Counters::compilation_cache_misses.Increment();
+ return Handle<FixedArray>::null();
+ }
+}
+
+
+void CompilationCacheRegExp::Put(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data) {
+ HandleScope scope;
+ Handle<CompilationCacheTable> table = GetTable(0);
+ CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+}
+
+
+Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset) {
+ if (!IsEnabled()) {
+ return Handle<JSFunction>::null();
+ }
+
+ return script.Lookup(source, name, line_offset, column_offset);
+}
+
+
Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
Handle<Context> context,
- Entry entry) {
+ bool is_global) {
if (!IsEnabled()) {
return Handle<JSFunction>::null();
}
- ASSERT(entry == EVAL_GLOBAL || entry == EVAL_CONTEXTUAL);
- Handle<JSFunction> result = Lookup(source, context, entry);
- if (result.is_null()) {
- Counters::compilation_cache_misses.Increment();
+ Handle<JSFunction> result;
+ if (is_global) {
+ result = eval_global.Lookup(source, context);
} else {
- Counters::compilation_cache_hits.Increment();
+ result = eval_contextual.Lookup(source, context);
}
return result;
}
@@ -228,13 +399,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return Handle<FixedArray>::null();
}
- Handle<FixedArray> result = Lookup(source, flags);
- if (result.is_null()) {
- Counters::compilation_cache_misses.Increment();
- } else {
- Counters::compilation_cache_hits.Increment();
- }
- return result;
+ return reg_exp.Lookup(source, flags);
}
@@ -244,16 +409,14 @@ void CompilationCache::PutScript(Handle<String> source,
return;
}
- HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
- Handle<CompilationCacheTable> table = GetTable(SCRIPT);
- CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+ script.Put(source, boilerplate);
}
void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context,
- Entry entry,
+ bool is_global,
Handle<JSFunction> boilerplate) {
if (!IsEnabled()) {
return;
@@ -261,8 +424,11 @@ void CompilationCache::PutEval(Handle<String> source,
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
- Handle<CompilationCacheTable> table = GetTable(entry);
- CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+ if (is_global) {
+ eval_global.Put(source, context, boilerplate);
+ } else {
+ eval_contextual.Put(source, context, boilerplate);
+ }
}
@@ -274,31 +440,27 @@ void CompilationCache::PutRegExp(Handle<String> source,
return;
}
- HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(REGEXP);
- CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+ reg_exp.Put(source, flags, data);
}
void CompilationCache::Clear() {
- for (int i = 0; i < NUMBER_OF_TABLE_ENTRIES; i++) {
- tables[i] = Heap::undefined_value();
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Clear();
}
}
void CompilationCache::Iterate(ObjectVisitor* v) {
- v->VisitPointers(&tables[0], &tables[NUMBER_OF_TABLE_ENTRIES]);
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Iterate(v);
+ }
}
void CompilationCache::MarkCompactPrologue() {
- ASSERT(LAST_ENTRY == SCRIPT);
- for (int i = NUMBER_OF_TABLE_ENTRIES - 1; i > SCRIPT; i--) {
- tables[i] = tables[i - 1];
- }
- for (int j = 0; j <= LAST_ENTRY; j++) {
- tables[j] = Heap::undefined_value();
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Age();
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 4545defc5d..3487c08a15 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -34,20 +34,9 @@ namespace internal {
// The compilation cache keeps function boilerplates for compiled
// scripts and evals. The boilerplates are looked up using the source
-// string as the key.
+// string as the key. For regular expressions the compilation data is cached.
class CompilationCache {
public:
- // The same source code string has different compiled code for
- // scripts and evals. Internally, we use separate caches to avoid
- // getting the wrong kind of entry when looking up.
- enum Entry {
- EVAL_GLOBAL,
- EVAL_CONTEXTUAL,
- REGEXP,
- SCRIPT,
- LAST_ENTRY = SCRIPT
- };
-
// Finds the script function boilerplate for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
@@ -61,7 +50,7 @@ class CompilationCache {
// contain a script for the given source string.
static Handle<JSFunction> LookupEval(Handle<String> source,
Handle<Context> context,
- Entry entry);
+ bool is_global);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -77,7 +66,7 @@ class CompilationCache {
// with the boilerplate. This may overwrite an existing mapping.
static void PutEval(Handle<String> source,
Handle<Context> context,
- Entry entry,
+ bool is_global,
Handle<JSFunction> boilerplate);
// Associate the (source, flags) pair to the given regexp data.
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 73d200226e..aecdfb9aa8 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -175,7 +175,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
// Log the code generation for the script. Check explicit whether logging is
// to avoid allocating when not required.
- if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
+ if (Logger::is_logging() || OProfileAgent::is_enabled()) {
if (script->name()->IsString()) {
SmartPointer<char> data =
String::cast(script->name())->ToCString(DISALLOW_NULLS);
@@ -295,14 +295,11 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
- CompilationCache::Entry entry = is_global
- ? CompilationCache::EVAL_GLOBAL
- : CompilationCache::EVAL_CONTEXTUAL;
// Do a lookup in the compilation cache; if the entry is not there,
// invoke the compiler and add the result to the cache.
Handle<JSFunction> result =
- CompilationCache::LookupEval(source, context, entry);
+ CompilationCache::LookupEval(source, context, is_global);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
@@ -314,7 +311,7 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
NULL,
NULL);
if (!result.is_null()) {
- CompilationCache::PutEval(source, context, entry, result);
+ CompilationCache::PutEval(source, context, is_global, result);
}
}
@@ -376,14 +373,11 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Log the code generation. If source information is available include script
// name and line number. Check explicit whether logging is enabled as finding
// the line number is not for free.
- if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
+ if (Logger::is_logging() || OProfileAgent::is_enabled()) {
Handle<String> func_name(name->length() > 0 ?
*name : shared->inferred_name());
if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, start_position);
- if (line_num > 0) {
- line_num += script->line_offset()->value() + 1;
- }
+ int line_num = GetScriptLineNumber(script, start_position) + 1;
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name,
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 873c23ca54..ead73ee035 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -149,7 +149,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// check parameter locals in context
int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
if (param_index >= 0) {
- // slot found
+ // slot found.
int index =
ScopeInfo<>::ContextSlotIndex(*code,
Heap::arguments_shadow_symbol(),
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 7f63d9b337..2a3db7bb69 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -327,7 +327,7 @@ static double InternalStringToDouble(S* str,
index++;
if (!SubStringEquals(str, index, "Infinity"))
return JUNK_STRING_VALUE;
- result = is_negative ? -INFINITY : INFINITY;
+ result = is_negative ? -V8_INFINITY : V8_INFINITY;
index += 8;
}
}
diff --git a/deps/v8/src/date-delay.js b/deps/v8/src/date-delay.js
index 9aecadbec6..0a89783b4f 100644
--- a/deps/v8/src/date-delay.js
+++ b/deps/v8/src/date-delay.js
@@ -150,6 +150,8 @@ var DST_offset_cache = {
};
+// NOTE: The implementation relies on the fact that no time zones have
+// more than one daylight savings offset change per month.
function DaylightSavingsOffset(t) {
// Load the cache object from the builtins object.
var cache = DST_offset_cache;
@@ -530,7 +532,8 @@ function GetUTCHoursFrom(aDate) {
function GetFullYearFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return YearFromTime(LocalTimeNoCheck(t));
+ // Ignore the DST offset for year computations.
+ return YearFromTime(t + local_time_offset);
}
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index 21cd68a2cb..423a1185eb 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -388,7 +388,7 @@ ScriptBreakPoint.prototype.clear = function () {
function UpdateScriptBreakPoints(script) {
for (var i = 0; i < script_break_points.length; i++) {
if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
- script_break_points[i].script_name() == script.name) {
+ script_break_points[i].matchesScript(script)) {
script_break_points[i].set(script);
}
}
@@ -1194,6 +1194,13 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
throw new Error('Command not specified');
}
+ // TODO(yurys): remove request.arguments.compactFormat check once
+ // ChromeDevTools are switched to 'inlineRefs'
+ if (request.arguments && (request.arguments.inlineRefs ||
+ request.arguments.compactFormat)) {
+ response.setOption('inlineRefs', true);
+ }
+
if (request.command == 'continue') {
this.continueRequest_(request, response);
} else if (request.command == 'break') {
@@ -1504,9 +1511,6 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
if (from_index < 0 || to_index < 0) {
return response.failed('Invalid frame number');
}
- if (request.arguments.compactFormat) {
- response.setOption('compactFormat', true);
- }
}
// Adjust the index.
@@ -1696,10 +1700,6 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
response.setOption('includeSource', includeSource);
}
- if (request.arguments.compactFormat) {
- response.setOption('compactFormat', true);
- }
-
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
diff --git a/deps/v8/src/dtoa-config.c b/deps/v8/src/dtoa-config.c
index 9fcd0ddfa8..bc0a58a174 100644
--- a/deps/v8/src/dtoa-config.c
+++ b/deps/v8/src/dtoa-config.c
@@ -77,6 +77,11 @@
#define __NO_ISOCEXT
#endif /* __MINGW32__ */
+/* On 64-bit systems, we need to make sure that a Long is only 32 bits. */
+#ifdef V8_TARGET_ARCH_X64
+#define Long int
+#endif /* V8_TARGET_ARCH_X64 */
+
/* Make sure we use the David M. Gay version of strtod(). On Linux, we
* cannot use the same name (maybe the function does not have weak
* linkage?). */
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index fad3e9c281..fe19873ab3 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -92,8 +92,6 @@ Handle<String> Factory::NewRawTwoByteString(int length,
Handle<String> Factory::NewConsString(Handle<String> first,
Handle<String> second) {
- if (first->length() == 0) return second;
- if (second->length() == 0) return first;
CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
}
diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h
index d16eb481a3..666aabb269 100644
--- a/deps/v8/src/frame-element.h
+++ b/deps/v8/src/frame-element.h
@@ -54,8 +54,7 @@ class FrameElement BASE_EMBEDDED {
// The default constructor creates an invalid frame element.
FrameElement() {
- value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
- | TypeField::encode(INVALID)
+ value_ = TypeField::encode(INVALID)
| CopiedField::encode(false)
| SyncedField::encode(false)
| DataField::encode(0);
@@ -75,9 +74,8 @@ class FrameElement BASE_EMBEDDED {
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced,
- StaticType static_type = StaticType()) {
- return FrameElement(REGISTER, reg, is_synced, static_type);
+ SyncFlag is_synced) {
+ return FrameElement(REGISTER, reg, is_synced);
}
// Factory function to construct a frame element whose value is known at
@@ -143,15 +141,6 @@ class FrameElement BASE_EMBEDDED {
return DataField::decode(value_);
}
- StaticType static_type() {
- return StaticType(StaticTypeField::decode(value_));
- }
-
- void set_static_type(StaticType static_type) {
- value_ = value_ & ~StaticTypeField::mask();
- value_ = value_ | StaticTypeField::encode(static_type.static_type_);
- }
-
bool Equals(FrameElement other) {
uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
if (!masked_difference) {
@@ -184,13 +173,8 @@ class FrameElement BASE_EMBEDDED {
if (!other->is_valid()) return other;
if (!SameLocation(other)) return NULL;
- // If either is unsynced, the result is. The result static type is
- // the merge of the static types. It's safe to set it on one of the
- // frame elements, and harmless too (because we are only going to
- // merge the reaching frames and will ensure that the types are
- // coherent, and changing the static type does not emit code).
+ // If either is unsynced, the result is.
FrameElement* result = is_synced() ? other : this;
- result->set_static_type(static_type().merge(other->static_type()));
return result;
}
@@ -205,16 +189,7 @@ class FrameElement BASE_EMBEDDED {
// Used to construct memory and register elements.
FrameElement(Type type, Register reg, SyncFlag is_synced) {
- value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
- | TypeField::encode(type)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
- }
-
- FrameElement(Type type, Register reg, SyncFlag is_synced, StaticType stype) {
- value_ = StaticTypeField::encode(stype.static_type_)
- | TypeField::encode(type)
+ value_ = TypeField::encode(type)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
@@ -222,8 +197,7 @@ class FrameElement BASE_EMBEDDED {
// Used to construct constant elements.
FrameElement(Handle<Object> value, SyncFlag is_synced) {
- value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_)
- | TypeField::encode(CONSTANT)
+ value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(ConstantList()->length());
@@ -248,14 +222,13 @@ class FrameElement BASE_EMBEDDED {
value_ = value_ | DataField::encode(new_reg.code_);
}
- // Encode static type, type, copied, synced and data in one 32 bit integer.
+ // Encode type, copied, synced and data in one 32 bit integer.
uint32_t value_;
- class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {};
- class TypeField: public BitField<Type, 3, 3> {};
- class CopiedField: public BitField<uint32_t, 6, 1> {};
- class SyncedField: public BitField<uint32_t, 7, 1> {};
- class DataField: public BitField<uint32_t, 8, 32 - 9> {};
+ class TypeField: public BitField<Type, 0, 3> {};
+ class CopiedField: public BitField<uint32_t, 3, 1> {};
+ class SyncedField: public BitField<uint32_t, 4, 1> {};
+ class DataField: public BitField<uint32_t, 5, 32 - 6> {};
friend class VirtualFrame;
};
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 2b0fe15d13..bf83d0d75c 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -120,8 +120,10 @@ const int kIntptrSize = sizeof(intptr_t); // NOLINT
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
+const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
#else
const int kPointerSizeLog2 = 2;
+const intptr_t kIntptrSignBit = 0x80000000;
#endif
const int kObjectAlignmentBits = kPointerSizeLog2;
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 8dd09d77d6..810d3d42a1 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -34,7 +34,7 @@
namespace v8 {
namespace internal {
-int Heap::MaxHeapObjectSize() {
+int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
@@ -215,26 +215,6 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
-Object* Heap::GetKeyedLookupCache() {
- if (keyed_lookup_cache()->IsUndefined()) {
- Object* obj = LookupCache::Allocate(4);
- if (obj->IsFailure()) return obj;
- keyed_lookup_cache_ = obj;
- }
- return keyed_lookup_cache();
-}
-
-
-void Heap::SetKeyedLookupCache(LookupCache* cache) {
- keyed_lookup_cache_ = cache;
-}
-
-
-void Heap::ClearKeyedLookupCache() {
- keyed_lookup_cache_ = undefined_value();
-}
-
-
void Heap::SetLastScriptId(Object* last_script_id) {
last_script_id_ = last_script_id;
}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index eb70f21a80..bf6fccd9f4 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -500,7 +500,9 @@ void Heap::MarkCompact(GCTracer* tracer) {
void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
- ClearKeyedLookupCache();
+ KeyedLookupCache::Clear();
+ ContextSlotCache::Clear();
+ DescriptorLookupCache::Clear();
CompilationCache::MarkCompactPrologue();
@@ -629,6 +631,9 @@ void Heap::Scavenge() {
// Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin"));
+ // Clear descriptor cache.
+ DescriptorLookupCache::Clear();
+
// Used for updating survived_since_last_expansion_ at function end.
int survived_watermark = PromotedSpaceSize();
@@ -943,17 +948,15 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space_ ||
- target_space == Heap::old_data_space_);
- Object* result = target_space->AllocateRaw(object_size);
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
- if (target_space == Heap::old_pointer_space_) {
+ Object* result;
+ if (object_size > MaxObjectSizeInPagedSpace()) {
+ result = lo_space_->AllocateRawFixedArray(object_size);
+ if (!result->IsFailure()) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
+ HeapObject* target = HeapObject::cast(result);
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
@@ -964,21 +967,45 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
node->set_size(object_size);
*p = target;
- } else {
- // Objects promoted to the data space can be copied immediately
- // and not revisited---we will never sweep that space for
- // pointers and the copied objects do not contain pointers to
- // new space objects.
- *p = MigrateObject(object, target, object_size);
+ return;
+ }
+ } else {
+ OldSpace* target_space = Heap::TargetSpace(object);
+ ASSERT(target_space == Heap::old_pointer_space_ ||
+ target_space == Heap::old_data_space_);
+ result = target_space->AllocateRaw(object_size);
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ if (target_space == Heap::old_pointer_space_) {
+ // Save the from-space object pointer and its map pointer at the
+ // top of the to space to be swept and copied later. Write the
+ // forwarding address over the map word of the from-space
+ // object.
+ promotion_queue.insert(object, first_word.ToMap());
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+
+ // Give the space allocated for the result a proper map by
+ // treating it as a free list node (not linked into the free
+ // list).
+ FreeListNode* node = FreeListNode::FromAddress(target->address());
+ node->set_size(object_size);
+
+ *p = target;
+ } else {
+ // Objects promoted to the data space can be copied immediately
+ // and not revisited---we will never sweep that space for
+ // pointers and the copied objects do not contain pointers to
+ // new space objects.
+ *p = MigrateObject(object, target, object_size);
#ifdef DEBUG
- VerifyNonPointerSpacePointersVisitor v;
- (*p)->Iterate(&v);
+ VerifyNonPointerSpacePointersVisitor v;
+ (*p)->Iterate(&v);
#endif
+ }
+ return;
}
- return;
}
}
-
// The object should remain in new space or the old space allocation failed.
Object* result = new_space_.AllocateRaw(object_size);
// Failed allocation at this point is utterly unexpected.
@@ -1364,7 +1391,13 @@ bool Heap::CreateInitialObjects() {
last_script_id_ = undefined_value();
// Initialize keyed lookup cache.
- ClearKeyedLookupCache();
+ KeyedLookupCache::Clear();
+
+ // Initialize context slot cache.
+ ContextSlotCache::Clear();
+
+ // Initialize descriptor cache.
+ DescriptorLookupCache::Clear();
// Initialize compilation cache.
CompilationCache::Clear();
@@ -1488,6 +1521,8 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = Builtins::builtin(Builtins::Illegal);
share->set_code(illegal);
+ Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0);
share->set_length(0);
share->set_formal_parameter_count(0);
@@ -1501,14 +1536,24 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
}
-Object* Heap::AllocateConsString(String* first,
- String* second) {
+Object* Heap::AllocateConsString(String* first, String* second) {
int first_length = first->length();
+ if (first_length == 0) return second;
+
int second_length = second->length();
+ if (second_length == 0) return first;
+
int length = first_length + second_length;
bool is_ascii = first->IsAsciiRepresentation()
&& second->IsAsciiRepresentation();
+ // Make sure that an out of memory exception is thrown if the length
+ // of the new cons string is too large to fit in a Smi.
+ if (length > Smi::kMaxValue || length < -0) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+
// If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) {
ASSERT(first->IsFlat());
@@ -1518,8 +1563,12 @@ Object* Heap::AllocateConsString(String* first,
if (result->IsFailure()) return result;
// Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
+ // Copy first part.
+ char* src = SeqAsciiString::cast(first)->GetChars();
+ for (int i = 0; i < first_length; i++) *dest++ = src[i];
+ // Copy second part.
+ src = SeqAsciiString::cast(second)->GetChars();
+ for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
Object* result = AllocateRawTwoByteString(length);
@@ -1698,7 +1747,7 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
}
int size = ByteArray::SizeFor(length);
AllocationSpace space =
- size > MaxHeapObjectSize() ? LO_SPACE : OLD_DATA_SPACE;
+ size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -1713,7 +1762,7 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* Heap::AllocateByteArray(int length) {
int size = ByteArray::SizeFor(length);
AllocationSpace space =
- size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
+ size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -1748,7 +1797,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
int obj_size = Code::SizeFor(body_size, sinfo_size);
ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
Object* result;
- if (obj_size > MaxHeapObjectSize()) {
+ if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size);
} else {
result = code_space_->AllocateRaw(obj_size);
@@ -1788,7 +1837,7 @@ Object* Heap::CopyCode(Code* code) {
// Allocate an object the same size as the code object.
int obj_size = code->Size();
Object* result;
- if (obj_size > MaxHeapObjectSize()) {
+ if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size);
} else {
result = code_space_->AllocateRaw(obj_size);
@@ -1963,7 +2012,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// Allocate the JSObject.
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
+ if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@@ -2250,7 +2299,7 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string.
AllocationSpace space =
- (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
+ (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -2272,13 +2321,16 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = SeqAsciiString::SizeFor(length);
- if (size > MaxHeapObjectSize()) {
- space = LO_SPACE;
- }
- // Use AllocateRaw rather than Allocate because the object's size cannot be
- // determined from the map.
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ Object* result = Failure::OutOfMemoryException();
+ if (space == NEW_SPACE) {
+ result = size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
+ } else {
+ if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+ result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ }
if (result->IsFailure()) return result;
// Determine the map based on the string's length.
@@ -2302,13 +2354,16 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = SeqTwoByteString::SizeFor(length);
- if (size > MaxHeapObjectSize()) {
- space = LO_SPACE;
- }
- // Use AllocateRaw rather than Allocate because the object's size cannot be
- // determined from the map.
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ Object* result = Failure::OutOfMemoryException();
+ if (space == NEW_SPACE) {
+ result = size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
+ } else {
+ if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+ result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ }
if (result->IsFailure()) return result;
// Determine the map based on the string's length.
@@ -2345,9 +2400,9 @@ Object* Heap::AllocateRawFixedArray(int length) {
if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
- return (size > MaxHeapObjectSize())
- ? lo_space_->AllocateRawFixedArray(size)
- : new_space_.AllocateRaw(size);
+ return size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
}
@@ -2395,16 +2450,22 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
int size = FixedArray::SizeFor(length);
- Object* result;
- if (size > MaxHeapObjectSize()) {
- result = lo_space_->AllocateRawFixedArray(size);
- } else {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ Object* result = Failure::OutOfMemoryException();
+ if (pretenure != TENURED) {
+ result = size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
+ }
+ if (result->IsFailure()) {
+ if (size > MaxObjectSizeInPagedSpace()) {
+ result = lo_space_->AllocateRawFixedArray(size);
+ } else {
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
+ result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ }
+ if (result->IsFailure()) return result;
}
- if (result->IsFailure()) return result;
-
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
@@ -2504,7 +2565,7 @@ STRUCT_LIST(MAKE_CASE)
}
int size = map->instance_size();
AllocationSpace space =
- (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
+ (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result = Heap::Allocate(map, space);
if (result->IsFailure()) return result;
Struct::cast(result)->InitializeBody(size);
@@ -3478,6 +3539,58 @@ const char* GCTracer::CollectorString() {
}
+int KeyedLookupCache::Hash(Map* map, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
+ return (addr_hash ^ name->Hash()) % kLength;
+}
+
+
+int KeyedLookupCache::Lookup(Map* map, String* name) {
+ int index = Hash(map, name);
+ Key& key = keys_[index];
+ if ((key.map == map) && key.name->Equals(name)) {
+ return field_offsets_[index];
+ }
+ return -1;
+}
+
+
+void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
+ String* symbol;
+ if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ int index = Hash(map, symbol);
+ Key& key = keys_[index];
+ key.map = map;
+ key.name = symbol;
+ field_offsets_[index] = field_offset;
+ }
+}
+
+
+void KeyedLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+
+KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
+
+
+int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
+
+
+void DescriptorLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
+}
+
+
+DescriptorLookupCache::Key
+DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
+
+int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
+
+
#ifdef DEBUG
bool Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 08b2a99350..31adcbdba8 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -126,7 +126,6 @@ namespace internal {
V(FixedArray, number_string_cache) \
V(FixedArray, single_character_string_cache) \
V(FixedArray, natives_source_cache) \
- V(Object, keyed_lookup_cache) \
V(Object, last_script_id)
@@ -243,9 +242,8 @@ class Heap : public AllStatic {
// all available bytes. Check MaxHeapObjectSize() instead.
static int Available();
- // Returns the maximum object size that heap supports. Objects larger than
- // the maximum heap object size are allocated in a large object space.
- static inline int MaxHeapObjectSize();
+ // Returns the maximum object size in paged space.
+ static inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
static int SizeOfObjects();
@@ -446,17 +444,6 @@ class Heap : public AllStatic {
// Allocates a new utility object in the old generation.
static Object* AllocateStruct(InstanceType type);
-
- // Initializes a function with a shared part and prototype.
- // Returns the function.
- // Note: this code was factored out of AllocateFunction such that
- // other parts of the VM could use it. Specifically, a function that creates
- // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
- // Please note this does not perform a garbage collection.
- static Object* InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
-
// Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -520,8 +507,7 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateConsString(String* first,
- String* second);
+ static Object* AllocateConsString(String* first, String* second);
// Allocates a new sliced string object which is a slice of an underlying
// string buffer stretching from the index start (inclusive) to the index
@@ -700,11 +686,6 @@ class Heap : public AllStatic {
non_monomorphic_cache_ = value;
}
- // Gets, sets and clears the lookup cache used for keyed access.
- static inline Object* GetKeyedLookupCache();
- static inline void SetKeyedLookupCache(LookupCache* cache);
- static inline void ClearKeyedLookupCache();
-
// Update the next script id.
static inline void SetLastScriptId(Object* last_script_id);
@@ -836,6 +817,8 @@ class Heap : public AllStatic {
static const int kMaxMapSpaceSize = 8*MB;
+ static const int kMaxObjectSizeInNewSpace = 256*KB;
+
static NewSpace new_space_;
static OldSpace* old_pointer_space_;
static OldSpace* old_data_space_;
@@ -989,7 +972,17 @@ class Heap : public AllStatic {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Copy memory from src to dst.
- inline static void CopyBlock(Object** dst, Object** src, int byte_size);
+ static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+
+ // Initializes a function with a shared part and prototype.
+ // Returns the function.
+ // Note: this code was factored out of AllocateFunction such that
+ // other parts of the VM could use it. Specifically, a function that creates
+ // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
+ // Please note this does not perform a garbage collection.
+ static inline Object* InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype);
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1140,6 +1133,84 @@ class HeapIterator BASE_EMBEDDED {
};
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+ // Lookup field offset for (map, name). If absent, -1 is returned.
+ static int Lookup(Map* map, String* name);
+
+ // Update an element in the cache.
+ static void Update(Map* map, String* name, int field_offset);
+
+ // Clear the cache.
+ static void Clear();
+ private:
+ inline static int Hash(Map* map, String* name);
+ static const int kLength = 64;
+ struct Key {
+ Map* map;
+ String* name;
+ };
+ static Key keys_[kLength];
+ static int field_offsets_[kLength];
+};
+
+
+
+// Cache for mapping (array, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+ // Lookup descriptor index for (map, name).
+ // If absent, kAbsent is returned.
+ static int Lookup(DescriptorArray* array, String* name) {
+ if (!StringShape(name).IsSymbol()) return kAbsent;
+ int index = Hash(array, name);
+ Key& key = keys_[index];
+ if ((key.array == array) && (key.name == name)) return results_[index];
+ return kAbsent;
+ }
+
+ // Update an element in the cache.
+ static void Update(DescriptorArray* array, String* name, int result) {
+ ASSERT(result != kAbsent);
+ if (StringShape(name).IsSymbol()) {
+ int index = Hash(array, name);
+ Key& key = keys_[index];
+ key.array = array;
+ key.name = name;
+ results_[index] = result;
+ }
+ }
+
+ // Clear the cache.
+ static void Clear();
+
+ static const int kAbsent = -2;
+ private:
+ static int Hash(DescriptorArray* array, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t array_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
+ uintptr_t name_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
+ return (array_hash ^ name_hash) % kLength;
+ }
+
+ static const int kLength = 64;
+ struct Key {
+ DescriptorArray* array;
+ String* name;
+ };
+
+ static Key keys_[kLength];
+ static int results_[kLength];
+};
+
+
// ----------------------------------------------------------------------------
// Marking stack for tracing live objects.
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 045f17682f..9a5352b418 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -48,7 +48,7 @@ Condition NegateCondition(Condition cc) {
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(int delta) {
+void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // relocate entry
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index b5efe9e4c7..596861009b 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1417,7 +1417,7 @@ void Assembler::call(const Operand& adr) {
}
-void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1815,7 +1815,7 @@ void Assembler::fcompp() {
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0xdF);
+ EMIT(0xDF);
EMIT(0xE0);
}
@@ -2182,17 +2182,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-void Assembler::WriteInternalReference(int position, const Label& bound_label) {
- ASSERT(bound_label.is_bound());
- ASSERT(0 <= position);
- ASSERT(position + static_cast<int>(sizeof(uint32_t)) <= pc_offset());
- ASSERT(long_at(position) == 0); // only initialize once!
-
- uint32_t label_loc = reinterpret_cast<uint32_t>(addr_at(bound_label.pos()));
- long_at_put(position, label_loc);
-}
-
-
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index ae16e700f1..92c390cfbb 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -396,10 +396,15 @@ class CpuFeatures : public AllStatic {
class Assembler : public Malloced {
private:
- // The relocation writer's position is kGap bytes below the end of
+ // We check before assembling an instruction that there is sufficient
+ // space to write an instruction and its relocation information.
+ // The relocation writer's position must be kGap bytes above the end of
// the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction (17 bytes as of 9/26/06) and
- // allows for a single, fast space check per instruction.
+ // longest possible ia32 instruction, 15 bytes, and the longest possible
+ // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+ // (There is a 15 byte limit on ia32 instruction length that rules out some
+ // otherwise valid instructions.)
+ // This allows for a single, fast space check per instruction.
static const int kGap = 32;
public:
@@ -731,11 +736,6 @@ class Assembler : public Malloced {
// Used for inline tables, e.g., jump-tables.
void dd(uint32_t data, RelocInfo::Mode reloc_info);
- // Writes the absolute address of a bound label at the given position in
- // the generated code. That positions should have the relocation mode
- // internal_reference!
- void WriteInternalReference(int position, const Label& bound_label);
-
int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index f65074bd49..3cafd904b5 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -63,6 +63,25 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
+ // Jump to the function-specific construct stub.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(Operand(ebx));
+
+ // edi: called object
+ // eax: number of arguments
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing eax).
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -113,7 +132,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
- ASSERT(Heap::MaxHeapObjectSize() >= (1 << kBitsPerByte));
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
__ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
@@ -175,7 +194,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
- ASSERT(Heap::MaxHeapObjectSize() >
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
__ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
@@ -305,16 +324,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
-
- // edi: called object
- // eax: number of arguments
- __ bind(&non_function_call);
-
- // Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 3357f57e45..59c1d45406 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -175,18 +175,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false;
- // Allocate the arguments object and copy the parameters into it.
- if (scope_->arguments() != NULL) {
- ASSERT(scope_->arguments_shadow() != NULL);
- Comment cmnt(masm_, "[ Allocate arguments object");
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope_->num_parameters()));
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
- }
-
+ // Allocate the local context if needed.
if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@@ -247,27 +236,11 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
}
- // This section stores the pointer to the arguments object that
- // was allocated and copied into above. If the address was not
- // saved to TOS, we push ecx onto the stack.
- //
// Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope_->arguments() != NULL) {
- Comment cmnt(masm_, "[ store arguments object");
- { Reference shadow_ref(this, scope_->arguments_shadow());
- ASSERT(shadow_ref.is_slot());
- { Reference arguments_ref(this, scope_->arguments());
- ASSERT(arguments_ref.is_slot());
- // Here we rely on the convenient property that references to slot
- // take up zero space in the frame (ie, it doesn't matter that the
- // stored value is actually below the reference on the frame).
- arguments_ref.SetValue(NOT_CONST_INIT);
- }
- shadow_ref.SetValue(NOT_CONST_INIT);
- }
- frame_->Drop(); // Value is no longer needed.
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
}
// Generate code to 'execute' declarations and initialize functions
@@ -591,6 +564,71 @@ void CodeGenerator::LoadTypeofExpression(Expression* x) {
}
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+ if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope_->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope_->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+ { Reference shadow_ref(this, scope_->arguments_shadow());
+ Reference arguments_ref(this, scope_->arguments());
+ ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+ // Here we rely on the convenient property that references to slot
+ // take up zero space in the frame (ie, it doesn't matter that the
+ // stored value is actually below the reference on the frame).
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result arguments = frame_->Pop();
+ if (arguments.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !arguments.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
+ arguments.Unuse();
+ done.Branch(not_equal);
+ }
+ }
+ if (!skip_arguments) {
+ arguments_ref.SetValue(NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ shadow_ref.SetValue(NOT_CONST_INIT);
+ }
+ return frame_->Pop();
+}
+
+
Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
@@ -881,15 +919,15 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result left = frame_->Pop();
if (op == Token::ADD) {
- bool left_is_string = left.static_type().is_jsstring();
- bool right_is_string = right.static_type().is_jsstring();
+ bool left_is_string = left.is_constant() && left.handle()->IsString();
+ bool right_is_string = right.is_constant() && right.handle()->IsString();
if (left_is_string || right_is_string) {
frame_->Push(&left);
frame_->Push(&right);
Result answer;
if (left_is_string) {
if (right_is_string) {
- // TODO(lrn): if (left.is_constant() && right.is_constant())
+ // TODO(lrn): if both are constant strings
// -- do a compile time cons, if allocation during codegen is allowed.
answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
} else {
@@ -900,7 +938,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
}
- answer.set_static_type(StaticType::jsstring());
frame_->Push(&answer);
return;
}
@@ -1387,7 +1424,11 @@ class DeferredInlineSmiOperation: public DeferredCode {
void DeferredInlineSmiOperation::Generate() {
__ push(src_);
__ push(Immediate(value_));
- GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED);
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
__ CallStub(&stub);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1772,6 +1813,33 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
break;
}
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
+ deferred->Branch(not_zero);
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
+ } else {
+ __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
+ }
+ deferred->BindExit();
+ frame_->Push(operand);
+ break;
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+
default: {
Result constant_operand(value);
if (reversed) {
@@ -1806,6 +1874,12 @@ class CompareStub: public CodeStub {
return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
}
+ // Branch to the label if the given object isn't a symbol.
+ void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch);
+
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
@@ -2053,6 +2127,176 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
}
+void CodeGenerator::CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position) {
+ ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+ ASSERT(arguments->IsArguments());
+
+ JumpTarget slow, done;
+
+ // Load the apply function onto the stack. This will usually
+ // give us a megamorphic load site. Not super, but it works.
+ Reference ref(this, apply);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
+ ASSERT(ref.type() == Reference::NAMED);
+
+ // Load the receiver and the existing arguments object onto the
+ // expression stack. Avoid allocating the arguments object here.
+ Load(receiver);
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+ // Emit the source position information after having loaded the
+ // receiver and the arguments.
+ CodeForSourcePosition(position);
+
+ // Check if the arguments object has been lazily allocated
+ // already. If so, just use that instead of copying the arguments
+ // from the stack. This also deals with cases where a local variable
+ // named 'arguments' has been introduced.
+ frame_->Dup();
+ Result probe = frame_->Pop();
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ slow.Branch(not_equal);
+ }
+
+ if (try_lazy) {
+ JumpTarget build_args;
+
+ // Get rid of the arguments object probe.
+ frame_->Drop();
+
+ // Before messing with the execution stack, we sync all
+ // elements. This is bound to happen anyway because we're
+ // about to call a function.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Check that the receiver really is a JavaScript object.
+ { frame_->PushElementAt(0);
+ Result receiver = frame_->Pop();
+ receiver.ToRegister();
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ // We allow all JSObjects including JSFunctions. As long as
+ // JS_FUNCTION_TYPE is the last instance type and it is right
+ // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+ // bound.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
+ build_args.Branch(less);
+ }
+
+ // Verify that we're invoking Function.prototype.apply.
+ { frame_->PushElementAt(1);
+ Result apply = frame_->Pop();
+ apply.ToRegister();
+ __ test(apply.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ build_args.Branch(not_equal);
+ __ mov(tmp.reg(),
+ FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ Immediate(apply_code));
+ build_args.Branch(not_equal);
+ }
+
+ // Get the function receiver from the stack. Check that it
+ // really is a function.
+ __ mov(edi, Operand(esp, 2 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ build_args.Branch(not_equal);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ mov(eax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ shr(eax, kSmiTagSize);
+ __ mov(ecx, Operand(eax));
+ __ cmp(eax, kArgumentsLimit);
+ build_args.Branch(above);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ __ bind(&loop);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &invoke);
+ __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
+ __ dec(ecx);
+ __ jmp(&loop);
+
+ // Invoke the function. The virtual frame knows about the receiver
+ // so make sure to forget that explicitly.
+ __ bind(&invoke);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ frame_->Forget(1);
+ Result result = allocator()->Allocate(eax);
+ frame_->SetElementAt(0, &result);
+ done.Jump();
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // Function.prototype.apply.
+ build_args.Bind();
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->Push(&arguments_object);
+ slow.Bind();
+ }
+
+ // Flip the apply function and the function to call on the stack, so
+ // the function looks like the receiver of the apply call. This way,
+ // the generic Function.prototype.apply implementation can deal with
+ // the call like it usually does.
+ Result a2 = frame_->Pop();
+ Result a1 = frame_->Pop();
+ Result ap = frame_->Pop();
+ Result fn = frame_->Pop();
+ frame_->Push(&ap);
+ frame_->Push(&fn);
+ frame_->Push(&a1);
+ frame_->Push(&a2);
+ CallFunctionStub call_function(2, NOT_IN_LOOP);
+ Result res = frame_->CallStub(&call_function, 3);
+ frame_->Push(&res);
+
+ // All done. Restore context register after call.
+ if (try_lazy) done.Bind();
+ frame_->RestoreContextRegister();
+}
+
+
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@@ -2420,131 +2664,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
}
-int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
- return kFastSwitchMaxOverheadFactor;
-}
-
-
-int CodeGenerator::FastCaseSwitchMinCaseCount() {
- return kFastSwitchMinCaseCount;
-}
-
-
-// Generate a computed jump to a switch case.
-void CodeGenerator::GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node,
- int min_index,
- int range,
- Label* default_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels) {
- // Notice: Internal references, used by both the jmp instruction and
- // the table entries, need to be relocated if the buffer grows. This
- // prevents the forward use of Labels, since a displacement cannot
- // survive relocation, and it also cannot safely be distinguished
- // from a real address. Instead we put in zero-values as
- // placeholders, and fill in the addresses after the labels have been
- // bound.
-
- JumpTarget setup_default;
- JumpTarget is_smi;
-
- // A non-null default label pointer indicates a default case among
- // the case labels. Otherwise we use the break target as a
- // "default".
- JumpTarget* default_target =
- (default_label == NULL) ? node->break_target() : &setup_default;
-
- // Test whether input is a smi.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- Result switch_value = frame_->Pop();
- switch_value.ToRegister();
- __ test(switch_value.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(equal, &switch_value, taken);
-
- // It's a heap object, not a smi or a failure. Check if it is a
- // heap number.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ CmpObjectType(switch_value.reg(), HEAP_NUMBER_TYPE, temp.reg());
- temp.Unuse();
- default_target->Branch(not_equal);
-
- // The switch value is a heap number. Convert it to a smi.
- frame_->Push(&switch_value);
- Result smi_value = frame_->CallRuntime(Runtime::kNumberToSmi, 1);
-
- is_smi.Bind(&smi_value);
- smi_value.ToRegister();
- // Convert the switch value to a 0-based table index.
- if (min_index != 0) {
- frame_->Spill(smi_value.reg());
- __ sub(Operand(smi_value.reg()), Immediate(min_index << kSmiTagSize));
- }
- // Go to the default case if the table index is negative or not a smi.
- __ test(smi_value.reg(), Immediate(0x80000000 | kSmiTagMask));
- default_target->Branch(not_equal, not_taken);
- __ cmp(smi_value.reg(), range << kSmiTagSize);
- default_target->Branch(greater_equal, not_taken);
-
- // The expected frame at all the case labels is a version of the
- // current one (the bidirectional entry frame, which an arbitrary
- // frame of the correct height can be merged to). Keep a copy to
- // restore at the start of every label. Create a jump target and
- // bind it to set its entry frame properly.
- JumpTarget entry_target(JumpTarget::BIDIRECTIONAL);
- entry_target.Bind(&smi_value);
- VirtualFrame* start_frame = new VirtualFrame(frame_);
-
- // 0 is placeholder.
- // Jump to the address at table_address + 2 * smi_value.reg().
- // The target of the jump is read from table_address + 4 * switch_value.
- // The Smi encoding of smi_value.reg() is 2 * switch_value.
- smi_value.ToRegister();
- __ jmp(Operand(smi_value.reg(), smi_value.reg(),
- times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
- smi_value.Unuse();
- // Calculate address to overwrite later with actual address of table.
- int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t);
- __ Align(4);
- Label table_start;
- __ bind(&table_start);
- __ WriteInternalReference(jump_table_ref, table_start);
-
- for (int i = 0; i < range; i++) {
- // These are the table entries. 0x0 is the placeholder for case address.
- __ dd(0x0, RelocInfo::INTERNAL_REFERENCE);
- }
-
- GenerateFastCaseSwitchCases(node, case_labels, start_frame);
-
- // If there was a default case, we need to emit the code to match it.
- if (default_label != NULL) {
- if (has_valid_frame()) {
- node->break_target()->Jump();
- }
- setup_default.Bind();
- frame_->MergeTo(start_frame);
- __ jmp(default_label);
- DeleteFrame();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
-
- for (int i = 0, entry_pos = table_start.pos();
- i < range;
- i++, entry_pos += sizeof(uint32_t)) {
- if (case_targets[i] == NULL) {
- __ WriteInternalReference(entry_pos,
- *node->break_target()->entry_label());
- } else {
- __ WriteInternalReference(entry_pos, *case_targets[i]);
- }
- }
-}
-
-
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
@@ -2554,10 +2673,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compile the switch value.
Load(node->tag());
- if (TryGenerateFastCaseSwitchStatement(node)) {
- return;
- }
-
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
CaseClause* default_clause = NULL;
@@ -3707,6 +3822,44 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
+ }
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
+}
+
+
Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
Slot* slot,
TypeofState typeof_state,
@@ -3879,7 +4032,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, typeof_state());
}
@@ -4441,23 +4594,40 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(literal->handle());
- Load(property->obj());
+ Handle<String> name = Handle<String>::cast(literal->handle());
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
+ if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+ name->IsEqualTo(CStrVector("apply")) &&
+ args->length() == 2 &&
+ args->at(1)->AsVariableProxy() != NULL &&
+ args->at(1)->AsVariableProxy()->IsArguments()) {
+ // Use the optimized Function.prototype.apply that avoids
+ // allocating lazily allocated arguments objects.
+ CallApplyLazy(property,
+ args->at(0),
+ args->at(1)->AsVariableProxy(),
+ node->position());
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ } else {
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(name);
+ Load(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result =
+ frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+ }
} else {
// -------------------------------------------
@@ -5925,12 +6095,19 @@ void Reference::GetValue(TypeofState typeof_state) {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
MacroAssembler* masm = cgen_->masm();
+
+ // Record the source position for the property load.
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
break;
}
@@ -6016,6 +6193,7 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
@@ -6143,13 +6321,16 @@ void Reference::TakeValue(TypeofState typeof_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST) {
+ slot->var()->mode() == Variable::CONST ||
+ slot->is_arguments()) {
GetValue(typeof_state);
return;
}
- // Only non-constant, frame-allocated parameters and locals can reach
- // here.
+ // Only non-constant, frame-allocated parameters and locals can
+ // reach here. Be careful not to use the optimizations for arguments
+ // object access since it may not have been initialized yet.
+ ASSERT(!slot->is_arguments());
if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index());
} else {
@@ -6687,9 +6868,45 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result.
__ bind(&call_runtime);
switch (op_) {
- case Token::ADD:
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1;
+ Result answer;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &not_string1);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string1);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &not_strings);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
+ }
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
@@ -7121,17 +7338,16 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
}
- // Save the return address (and get it off the stack).
+ // Push arguments below the return address.
__ pop(ecx);
-
- // Push arguments.
__ push(eax);
__ push(edx);
__ push(ecx);
// Inlined floating point compare.
// Call builtin if operands are not floating point or smi.
- FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
+ Label check_for_symbols;
+ FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
FloatingPointHelper::LoadFloatOperands(masm, ecx);
__ FCmp();
@@ -7155,6 +7371,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ mov(eax, 1);
__ ret(2 * kPointerSize); // eax, edx were pushed
+ // Fast negative check for symbol-to-symbol equality.
+ __ bind(&check_for_symbols);
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
+ BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(2 * kPointerSize);
+ }
+
__ bind(&call_builtin);
// must swap argument order
__ pop(ecx);
@@ -7188,6 +7416,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
void StackCheckStub::Generate(MacroAssembler* masm) {
// Because builtins always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack. The receiver
@@ -7230,7 +7472,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception.
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index e409513488..d25d07c7e2 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -273,6 +273,14 @@ class CodeGenState BASE_EMBEDDED {
};
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+ NO_ARGUMENTS_ALLOCATION,
+ EAGER_ARGUMENTS_ALLOCATION,
+ LAZY_ARGUMENTS_ALLOCATION
+};
// -------------------------------------------------------------------------
@@ -332,12 +340,11 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
+ bool is_eval() { return is_eval_; }
// Generating deferred code.
void ProcessDeferred();
- bool is_eval() { return is_eval_; }
-
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
@@ -373,6 +380,12 @@ class CodeGenerator: public AstVisitor {
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
+ // Returns the arguments allocation mode.
+ ArgumentsAllocationMode ArgumentsMode() const;
+
+ // Store the arguments object and allocate it if necessary.
+ Result StoreArgumentsObject(bool initial);
+
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
@@ -408,6 +421,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
@@ -470,6 +484,14 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ // Use an optimized version of Function.prototype.apply that avoid
+ // allocating the arguments object and just copies the arguments
+ // from the stack.
+ void CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position);
+
void CheckStack();
struct InlineRuntimeLUT {
@@ -527,58 +549,6 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
- // Methods and constants for fast case switch statement support.
- //
- // Only allow fast-case switch if the range of labels is at most
- // this factor times the number of case labels.
- // Value is derived from comparing the size of code generated by the normal
- // switch code for Smi-labels to the size of a single pointer. If code
- // quality increases this number should be decreased to match.
- static const int kFastSwitchMaxOverheadFactor = 5;
-
- // Minimal number of switch cases required before we allow jump-table
- // optimization.
- static const int kFastSwitchMinCaseCount = 5;
-
- // The limit of the range of a fast-case switch, as a factor of the number
- // of cases of the switch. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMaxOverheadFactor();
-
- // The minimal number of cases in a switch before the fast-case switch
- // optimization is enabled. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMinCaseCount();
-
- // Allocate a jump table and create code to jump through it.
- // Should call GenerateFastCaseSwitchCases to generate the code for
- // all the cases at the appropriate point.
- void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
- int min_index,
- int range,
- Label* fail_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels);
-
- // Generate the code for cases for the fast case switch.
- // Called by GenerateFastCaseSwitchJumpTable.
- void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame);
-
- // Fast support for constant-Smi switches.
- void GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index);
-
- // Fast support for constant-Smi switches. Tests whether switch statement
- // permits optimization and calls GenerateFastCaseSwitch if it does.
- // Returns true if the fast-case switch was generated, and false if not.
- bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
-
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 5da9b2f929..1ba475779b 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -141,6 +141,9 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
}
+const int LoadIC::kOffsetToLoadInstruction = 13;
+
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 3d97a66fa0..0854636574 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -189,7 +189,7 @@ void VirtualFrame::MakeMergable() {
backing_element = elements_[element.index()];
}
Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
+ ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED);
@@ -218,14 +218,12 @@ void VirtualFrame::MakeMergable() {
}
}
}
- // No need to set the copied flag---there are no copies of
- // copies or constants so the original was not copied.
- elements_[i].set_static_type(element.static_type());
+ // No need to set the copied flag --- there are no copies.
} else {
- // Clear the copy flag of non-constant, non-copy elements above
- // the high water mark. They cannot be copied because copes are
- // always higher than their backing store and copies are not
- // allowed above the water mark.
+ // Clear the copy flag of non-constant, non-copy elements.
+ // They cannot be copied because copies are not allowed.
+ // The copy flag is not relied on before the end of this loop,
+ // including when registers are spilled.
elements_[i].clear_copied();
}
}
@@ -998,7 +996,6 @@ Result VirtualFrame::Pop() {
if (element.is_memory()) {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
- temp.set_static_type(element.static_type());
__ pop(temp.reg());
return temp;
}
@@ -1030,12 +1027,11 @@ Result VirtualFrame::Pop() {
FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
// Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied();
- new_element.set_static_type(element.static_type());
elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg(), element.static_type());
+ return Result(temp.reg());
} else if (element.is_register()) {
- return Result(element.reg(), element.static_type());
+ return Result(element.reg());
} else {
ASSERT(element.is_constant());
return Result(element.handle());
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index b69b800b04..314ea73b28 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -43,7 +43,7 @@ namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public ZoneObject {
+class VirtualFrame: public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -65,7 +65,7 @@ class VirtualFrame : public ZoneObject {
private:
bool previous_state_;
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
};
// An illegal index into the virtual frame.
@@ -78,6 +78,7 @@ class VirtualFrame : public ZoneObject {
explicit VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
@@ -87,9 +88,7 @@ class VirtualFrame : public ZoneObject {
int element_count() { return elements_.length(); }
// The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
+ int height() { return element_count() - expression_base_index(); }
int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
@@ -255,7 +254,9 @@ class VirtualFrame : public ZoneObject {
void PushReceiverSlotAddress();
// Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
+ void PushFunction() {
+ PushFrameSlotAt(function_index());
+ }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
@@ -290,7 +291,9 @@ class VirtualFrame : public ZoneObject {
}
// The receiver frame slot.
- Operand Receiver() { return ParameterAt(-1); }
+ Operand Receiver() {
+ return ParameterAt(-1);
+ }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
@@ -320,9 +323,7 @@ class VirtualFrame : public ZoneObject {
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count);
+ Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
// Call load IC. Name and receiver are found on top of the frame.
// Receiver is not dropped.
@@ -357,10 +358,14 @@ class VirtualFrame : public ZoneObject {
void Drop(int count);
// Drop one element.
- void Drop() { Drop(1); }
+ void Drop() {
+ Drop(1);
+ }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
+ void Dup() {
+ PushFrameSlotAt(element_count() - 1);
+ }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -378,15 +383,17 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Immediate immediate);
// Push an element on the virtual frame.
- void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Register reg);
void Push(Handle<Object> value);
- void Push(Smi* value) { Push(Handle<Object>(value)); }
+ void Push(Smi* value) {
+ Push(Handle<Object> (value));
+ }
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg(), result->static_type());
+ Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
@@ -418,32 +425,48 @@ class VirtualFrame : public ZoneObject {
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
+ int parameter_count() {
+ return cgen()->scope()->num_parameters();
+ }
+ int local_count() {
+ return cgen()->scope()->num_stack_slots();
+ }
// The index of the element that is at the processor's frame pointer
// (the ebp register). The parameters, receiver, and return address
// are below the frame pointer.
- int frame_pointer() { return parameter_count() + 2; }
+ int frame_pointer() {
+ return parameter_count() + 2;
+ }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() { return 1; }
+ int param0_index() {
+ return 1;
+ }
// The index of the context slot in the frame. It is immediately
// above the frame pointer.
- int context_index() { return frame_pointer() + 1; }
+ int context_index() {
+ return frame_pointer() + 1;
+ }
// The index of the function slot in the frame. It is above the frame
// pointer and the context slot.
- int function_index() { return frame_pointer() + 2; }
+ int function_index() {
+ return frame_pointer() + 2;
+ }
// The index of the first local. Between the frame pointer and the
// locals lie the context and the function.
- int local0_index() { return frame_pointer() + 3; }
+ int local0_index() {
+ return frame_pointer() + 3;
+ }
// The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
+ int expression_base_index() {
+ return local0_index() + local_count();
+ }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
@@ -547,7 +570,6 @@ class VirtualFrame : public ZoneObject {
friend class JumpTarget;
};
-
} } // namespace v8::internal
#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 16235db210..35c40366ec 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -863,6 +863,25 @@ static bool StoreICableLookup(LookupResult* lookup) {
}
+static bool LookupForStoreIC(JSObject* object,
+ String* name,
+ LookupResult* lookup) {
+ object->LocalLookup(name, lookup);
+ if (!StoreICableLookup(lookup)) {
+ return false;
+ }
+
+ if (lookup->type() == INTERCEPTOR) {
+ if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
+ object->LocalLookupRealNamedProperty(name, lookup);
+ return StoreICableLookup(lookup);
+ }
+ }
+
+ return true;
+}
+
+
Object* StoreIC::Store(State state,
Handle<Object> object,
Handle<String> name,
@@ -889,8 +908,7 @@ Object* StoreIC::Store(State state,
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
- receiver->LocalLookup(*name, &lookup);
- if (StoreICableLookup(&lookup)) {
+ if (LookupForStoreIC(*receiver, *name, &lookup)) {
UpdateCaches(&lookup, state, receiver, name, value);
}
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 9c96ba2fef..7d033778de 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -221,7 +221,7 @@ class LoadIC: public IC {
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label).
- static const int kOffsetToLoadInstruction = 13;
+ static const int kOffsetToLoadInstruction;
private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 6fce1f5c9b..7b294ec455 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -405,7 +405,6 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
// Prepare space for the return values.
int number_of_capture_registers =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
- OffsetsVector offsets(number_of_capture_registers);
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
@@ -421,15 +420,19 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
- int* offsets_vector = offsets.vector();
bool rc;
+ // We have to initialize this with something to make gcc happy but we can't
+ // initialize it with its real value until after the GC-causing things are
+ // over.
+ FixedArray* array = NULL;
// Dispatch to the correct RegExp implementation.
-
Handle<String> original_subject = subject;
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
if (UseNativeRegexp()) {
#if V8_TARGET_ARCH_IA32
+ OffsetsVector captures(number_of_capture_registers);
+ int* captures_vector = captures.vector();
RegExpMacroAssemblerIA32::Result res;
do {
bool is_ascii = subject->IsAsciiRepresentation();
@@ -439,8 +442,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
res = RegExpMacroAssemblerIA32::Match(code,
subject,
- offsets_vector,
- offsets.length(),
+ captures_vector,
+ captures.length(),
previous_index);
// If result is RETRY, the string have changed representation, and we
// must restart from scratch.
@@ -453,7 +456,16 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
|| res == RegExpMacroAssemblerIA32::FAILURE);
rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
-#else
+ if (!rc) return Factory::null_value();
+
+ array = last_match_info->elements();
+ ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
+ // The captures come in (start, end+1) pairs.
+ for (int i = 0; i < number_of_capture_registers; i += 2) {
+ SetCapture(array, i, captures_vector[i]);
+ SetCapture(array, i + 1, captures_vector[i + 1]);
+ }
+#else // !V8_TARGET_ARCH_IA32
UNREACHABLE();
#endif
} else {
@@ -461,33 +473,36 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
}
+ // Now that we have done EnsureCompiledIrregexp we can get the number of
+ // registers.
+ int number_of_registers =
+ IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data()));
+ OffsetsVector registers(number_of_registers);
+ int* register_vector = registers.vector();
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- offsets_vector[i] = -1;
+ register_vector[i] = -1;
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
rc = IrregexpInterpreter::Match(byte_codes,
subject,
- offsets_vector,
+ register_vector,
previous_index);
+ if (!rc) return Factory::null_value();
+
+ array = last_match_info->elements();
+ ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
+ // The captures come in (start, end+1) pairs.
+ for (int i = 0; i < number_of_capture_registers; i += 2) {
+ SetCapture(array, i, register_vector[i]);
+ SetCapture(array, i + 1, register_vector[i + 1]);
+ }
}
- // Handle results from RegExp implementation.
-
- if (!rc) {
- return Factory::null_value();
- }
-
- FixedArray* array = last_match_info->elements();
- ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
- // The captures come in (start, end+1) pairs.
SetLastCaptureCount(array, number_of_capture_registers);
SetLastSubject(array, *original_subject);
SetLastInput(array, *original_subject);
- for (int i = 0; i < number_of_capture_registers; i+=2) {
- SetCapture(array, i, offsets_vector[i]);
- SetCapture(array, i + 1, offsets_vector[i + 1]);
- }
+
return last_match_info;
}
@@ -896,12 +911,13 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
+ // Count pushes performed to force a stack limit check occasionally.
+ int pushes = 0;
+
for (int reg = 0; reg <= max_register; reg++) {
if (!affected_registers.Get(reg)) {
continue;
}
- // Count pushes performed to force a stack limit check occasionally.
- int pushes = 0;
// The chronologically first deferred action in the trace
// is used to infer the action needed to restore a register
@@ -1885,7 +1901,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
uint32_t differing_bits = (from ^ to);
// A mask and compare is only perfect if the differing bits form a
// number like 00011111 with one single block of trailing 1s.
- if ((differing_bits & (differing_bits + 1)) == 0) {
+ if ((differing_bits & (differing_bits + 1)) == 0 &&
+ from + differing_bits == to) {
pos->determines_perfectly = true;
}
uint32_t common_bits = ~SmearBitsRight(differing_bits);
diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/jump-target.cc
index a9d777073a..8168dd0c02 100644
--- a/deps/v8/src/jump-target.cc
+++ b/deps/v8/src/jump-target.cc
@@ -81,17 +81,12 @@ void JumpTarget::ComputeEntryFrame() {
// frame.
for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i];
- // We do not allow copies or constants in bidirectional frames. All
- // elements above the water mark on bidirectional frames have
- // unknown static types.
+ // We do not allow copies or constants in bidirectional frames.
if (direction_ == BIDIRECTIONAL) {
if (element.is_constant() || element.is_copy()) {
elements.Add(NULL);
continue;
}
- // It's safe to change the static type on the initial frame
- // element, see comment in JumpTarget::Combine.
- initial_frame->elements_[i].set_static_type(StaticType::unknown());
}
elements.Add(&initial_frame->elements_[i]);
}
@@ -142,18 +137,12 @@ void JumpTarget::ComputeEntryFrame() {
for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) {
// Loop over all the reaching frames to check whether the element
- // is synced on all frames, to count the registers it occupies,
- // and to compute a merged static type.
+ // is synced on all frames and to count the registers it occupies.
bool is_synced = true;
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
- StaticType type; // Initially invalid.
- if (direction_ != BIDIRECTIONAL) {
- type = reaching_frames_[0]->elements_[i].static_type();
- }
-
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
is_synced = is_synced && element.is_synced();
@@ -167,7 +156,6 @@ void JumpTarget::ComputeEntryFrame() {
best_reg_num = num;
}
}
- type = type.merge(element.static_type());
}
// If the value is synced on all frames, put it in memory. This
@@ -175,7 +163,6 @@ void JumpTarget::ComputeEntryFrame() {
// memory-to-register move when the value is needed later.
if (is_synced) {
// Already recorded as a memory element.
- entry_frame_->elements_[i].set_static_type(type);
continue;
}
@@ -190,20 +177,15 @@ void JumpTarget::ComputeEntryFrame() {
}
}
- if (best_reg_num == RegisterAllocator::kInvalidRegister) {
- // If there was no register found, the element is already
- // recorded as in memory.
- entry_frame_->elements_[i].set_static_type(type);
- } else {
+ if (best_reg_num != RegisterAllocator::kInvalidRegister) {
// If there was a register choice, use it. Preserve the copied
- // flag on the element. Set the static type as computed.
+ // flag on the element.
bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED);
if (is_copied) entry_frame_->elements_[i].set_copied();
- entry_frame_->elements_[i].set_static_type(type);
entry_frame_->set_register_location(reg, i);
}
}
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
new file mode 100644
index 0000000000..1844d2bf72
--- /dev/null
+++ b/deps/v8/src/log-inl.h
@@ -0,0 +1,126 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_INL_H_
+#define V8_LOG_INL_H_
+
+#include "log.h"
+
+namespace v8 {
+namespace internal {
+
+//
+// VMState class implementation. A simple stack of VM states held by the
+// logger and partially threaded through the call stack. States are pushed by
+// VMState construction and popped by destruction.
+//
+#ifdef ENABLE_LOGGING_AND_PROFILING
+inline const char* StateToString(StateTag state) {
+ switch (state) {
+ case JS:
+ return "JS";
+ case GC:
+ return "GC";
+ case COMPILER:
+ return "COMPILER";
+ case OTHER:
+ return "OTHER";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+VMState::VMState(StateTag state) : disabled_(true) {
+ if (!Logger::is_logging()) {
+ return;
+ }
+
+ disabled_ = false;
+#if !defined(ENABLE_HEAP_PROTECTION)
+ // When not protecting the heap, there is no difference between
+ // EXTERNAL and OTHER. As an optimization in that case, we will not
+ // perform EXTERNAL->OTHER transitions through the API. We thus
+ // compress the two states into one.
+ if (state == EXTERNAL) state = OTHER;
+#endif
+ state_ = state;
+ previous_ = Logger::current_state_;
+ Logger::current_state_ = this;
+
+ if (FLAG_log_state_changes) {
+ LOG(UncheckedStringEvent("Entering", StateToString(state_)));
+ if (previous_ != NULL) {
+ LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
+ }
+ }
+
+#ifdef ENABLE_HEAP_PROTECTION
+ if (FLAG_protect_heap && previous_ != NULL) {
+ if (state_ == EXTERNAL) {
+ // We are leaving V8.
+ ASSERT(previous_->state_ != EXTERNAL);
+ Heap::Protect();
+ } else if (previous_->state_ == EXTERNAL) {
+ // We are entering V8.
+ Heap::Unprotect();
+ }
+ }
+#endif
+}
+
+
+VMState::~VMState() {
+ if (disabled_) return;
+ Logger::current_state_ = previous_;
+
+ if (FLAG_log_state_changes) {
+ LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
+ if (previous_ != NULL) {
+ LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
+ }
+ }
+
+#ifdef ENABLE_HEAP_PROTECTION
+ if (FLAG_protect_heap && previous_ != NULL) {
+ if (state_ == EXTERNAL) {
+ // We are reentering V8.
+ ASSERT(previous_->state_ != EXTERNAL);
+ Heap::Unprotect();
+ } else if (previous_->state_ == EXTERNAL) {
+ // We are leaving V8.
+ Heap::Protect();
+ }
+ }
+#endif
+}
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_LOG_INL_H_
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 028eb3a015..b31864be46 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -261,14 +261,20 @@ void LogMessageBuilder::AppendAddress(Address addr) {
void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
- if (!FLAG_compress_log || bias == NULL) {
+ if (!FLAG_compress_log) {
Append("0x%" V8PRIxPTR, addr);
+ } else if (bias == NULL) {
+ Append("%" V8PRIxPTR, addr);
} else {
- intptr_t delta = addr - bias;
- // To avoid printing negative offsets in an unsigned form,
- // we are printing an absolute value with a sign.
- const char sign = delta >= 0 ? '+' : '-';
- if (sign == '-') { delta = -delta; }
+ uintptr_t delta;
+ char sign;
+ if (addr >= bias) {
+ delta = addr - bias;
+ sign = '+';
+ } else {
+ delta = bias - addr;
+ sign = '-';
+ }
Append("%c%" V8PRIxPTR, sign, delta);
}
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index af49128eaa..0dba08dae5 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -31,9 +31,7 @@
#include "bootstrapper.h"
#include "log.h"
-#include "log-utils.h"
#include "macro-assembler.h"
-#include "platform.h"
#include "serialize.h"
#include "string-stream.h"
@@ -304,6 +302,7 @@ VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
+bool Logger::is_logging_ = false;
#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
@@ -318,11 +317,6 @@ const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
#undef DECLARE_SHORT_EVENT
-bool Logger::IsEnabled() {
- return Log::IsEnabled();
-}
-
-
void Logger::ProfilerBeginEvent() {
if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
@@ -426,26 +420,30 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
void Logger::SharedLibraryEvent(const char* library_path,
- unsigned start,
- unsigned end) {
+ uintptr_t start,
+ uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
- msg.Append("shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
- start, end);
+ msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
+ library_path,
+ start,
+ end);
msg.WriteToLogFile();
#endif
}
void Logger::SharedLibraryEvent(const wchar_t* library_path,
- unsigned start,
- unsigned end) {
+ uintptr_t start,
+ uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
- msg.Append("shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
- start, end);
+ msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
+ library_path,
+ start,
+ end);
msg.WriteToLogFile();
#endif
}
@@ -623,6 +621,42 @@ void Logger::DeleteEvent(const char* name, void* object) {
}
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// A class that contains all common code dealing with record compression.
+class CompressionHelper {
+ public:
+ explicit CompressionHelper(int window_size)
+ : compressor_(window_size), repeat_count_(0) { }
+
+ // Handles storing message in compressor, retrieving the previous one and
+ // prefixing it with repeat count, if needed.
+ // Returns true if message needs to be written to log.
+ bool HandleMessage(LogMessageBuilder* msg) {
+ if (!msg->StoreInCompressor(&compressor_)) {
+ // Current message repeats the previous one, don't write it.
+ ++repeat_count_;
+ return false;
+ }
+ if (repeat_count_ == 0) {
+ return msg->RetrieveCompressedPrevious(&compressor_);
+ }
+ OS::SNPrintF(prefix_, "%s,%d,",
+ Logger::log_events_[Logger::REPEAT_META_EVENT],
+ repeat_count_ + 1);
+ repeat_count_ = 0;
+ return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
+ }
+
+ private:
+ LogRecordCompressor compressor_;
+ int repeat_count_;
+ EmbeddedVector<char, 20> prefix_;
+};
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
@@ -639,6 +673,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(*p);
}
msg.Append('"');
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -653,7 +691,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s\"\n", code->ExecutableSize(), *str);
+ msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -671,8 +714,13 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s %s:%d\"\n",
+ msg.Append(",%d,\"%s %s:%d\"",
code->ExecutableSize(), *str, *sourcestr, line);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -684,7 +732,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
LogMessageBuilder msg;
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address());
- msg.Append(",%d,\"args_count: %d\"\n", code->ExecutableSize(), args_count);
+ msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -699,48 +752,17 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false);
- msg.Append("\"\n");
+ msg.Append('\"');
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// A class that contains all common code dealing with record compression.
-class CompressionHelper {
- public:
- explicit CompressionHelper(int window_size)
- : compressor_(window_size), repeat_count_(0) { }
-
- // Handles storing message in compressor, retrieving the previous one and
- // prefixing it with repeat count, if needed.
- // Returns true if message needs to be written to log.
- bool HandleMessage(LogMessageBuilder* msg) {
- if (!msg->StoreInCompressor(&compressor_)) {
- // Current message repeats the previous one, don't write it.
- ++repeat_count_;
- return false;
- }
- if (repeat_count_ == 0) {
- return msg->RetrieveCompressedPrevious(&compressor_);
- }
- OS::SNPrintF(prefix_, "%s,%d,",
- Logger::log_events_[Logger::REPEAT_META_EVENT],
- repeat_count_ + 1);
- repeat_count_ = 0;
- return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
- }
-
- private:
- LogRecordCompressor compressor_;
- int repeat_count_;
- EmbeddedVector<char, 20> prefix_;
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
static Address prev_to_ = NULL;
@@ -918,6 +940,7 @@ void Logger::PauseProfiler() {
// Must be the same message as Log::kDynamicBufferSeal.
LOG(UncheckedStringEvent("profiler", "pause"));
}
+ is_logging_ = false;
}
@@ -925,6 +948,7 @@ void Logger::ResumeProfiler() {
if (!profiler_->paused() || !Log::IsEnabled()) {
return;
}
+ is_logging_ = true;
if (FLAG_prof_lazy) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
@@ -998,10 +1022,9 @@ void Logger::LogCompiledFunctions() {
Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
- line_num += script->line_offset()->value() + 1;
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
shared->code(), *func_name,
- *script_name, line_num));
+ *script_name, line_num + 1));
} else {
// Can't distinguish enum and script here, so always use Script.
LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
@@ -1042,9 +1065,11 @@ bool Logger::Setup() {
FLAG_prof_auto = false;
}
- bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_prof_lazy;
+ || FLAG_log_regexp || FLAG_log_state_changes;
+
+ bool open_log_file = start_logging || FLAG_prof_lazy;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
@@ -1107,10 +1132,15 @@ bool Logger::Setup() {
compression_helper_ = new CompressionHelper(kCompressionWindowSize);
}
+ is_logging_ = start_logging;
+
if (FLAG_prof) {
profiler_ = new Profiler();
- if (!FLAG_prof_auto)
+ if (!FLAG_prof_auto) {
profiler_->pause();
+ } else {
+ is_logging_ = true;
+ }
profiler_->Engage();
}
@@ -1168,85 +1198,4 @@ void Logger::EnableSlidingStateWindow() {
}
-//
-// VMState class implementation. A simple stack of VM states held by the
-// logger and partially threaded through the call stack. States are pushed by
-// VMState construction and popped by destruction.
-//
-#ifdef ENABLE_LOGGING_AND_PROFILING
-static const char* StateToString(StateTag state) {
- switch (state) {
- case JS:
- return "JS";
- case GC:
- return "GC";
- case COMPILER:
- return "COMPILER";
- case OTHER:
- return "OTHER";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-VMState::VMState(StateTag state) {
-#if !defined(ENABLE_HEAP_PROTECTION)
- // When not protecting the heap, there is no difference between
- // EXTERNAL and OTHER. As an optimization in that case, we will not
- // perform EXTERNAL->OTHER transitions through the API. We thus
- // compress the two states into one.
- if (state == EXTERNAL) state = OTHER;
-#endif
- state_ = state;
- previous_ = Logger::current_state_;
- Logger::current_state_ = this;
-
- if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Entering", StateToString(state_)));
- if (previous_ != NULL) {
- LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
- }
- }
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap && previous_ != NULL) {
- if (state_ == EXTERNAL) {
- // We are leaving V8.
- ASSERT(previous_->state_ != EXTERNAL);
- Heap::Protect();
- } else if (previous_->state_ == EXTERNAL) {
- // We are entering V8.
- Heap::Unprotect();
- }
- }
-#endif
-}
-
-
-VMState::~VMState() {
- Logger::current_state_ = previous_;
-
- if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
- if (previous_ != NULL) {
- LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
- }
- }
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap && previous_ != NULL) {
- if (state_ == EXTERNAL) {
- // We are reentering V8.
- ASSERT(previous_->state_ != EXTERNAL);
- Heap::Unprotect();
- } else if (previous_->state_ == EXTERNAL) {
- // We are leaving V8.
- Heap::Protect();
- }
- }
-#endif
-}
-#endif
-
} } // namespace v8::internal
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 08e957a12e..f68234f1e6 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -28,6 +28,9 @@
#ifndef V8_LOG_H_
#define V8_LOG_H_
+#include "platform.h"
+#include "log-utils.h"
+
namespace v8 {
namespace internal {
@@ -77,7 +80,7 @@ class CompressionHelper;
#ifdef ENABLE_LOGGING_AND_PROFILING
#define LOG(Call) \
do { \
- if (v8::internal::Logger::IsEnabled()) \
+ if (v8::internal::Logger::is_logging()) \
v8::internal::Logger::Call; \
} while (false)
#else
@@ -88,12 +91,13 @@ class CompressionHelper;
class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
- explicit VMState(StateTag state);
- ~VMState();
+ inline explicit VMState(StateTag state);
+ inline ~VMState();
StateTag state() { return state_; }
private:
+ bool disabled_;
StateTag state_;
VMState* previous_;
#else
@@ -217,11 +221,11 @@ class Logger {
static void HeapSampleItemEvent(const char* type, int number, int bytes);
static void SharedLibraryEvent(const char* library_path,
- unsigned start,
- unsigned end);
+ uintptr_t start,
+ uintptr_t end);
static void SharedLibraryEvent(const wchar_t* library_path,
- unsigned start,
- unsigned end);
+ uintptr_t start,
+ uintptr_t end);
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
@@ -236,7 +240,9 @@ class Logger {
return current_state_ ? current_state_->state() : OTHER;
}
- static bool IsEnabled();
+ static bool is_logging() {
+ return is_logging_;
+ }
// Pause/Resume collection of profiling data.
// When data collection is paused, Tick events are discarded until
@@ -317,8 +323,10 @@ class Logger {
friend class VMState;
friend class LoggerTestHelper;
+
+ static bool is_logging_;
#else
- static bool is_enabled() { return false; }
+ static bool is_logging() { return false; }
#endif
};
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 56e4ea6caf..89d97e925e 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -947,13 +947,18 @@ void EncodeFreeRegion(Address free_start, int free_size) {
// Try to promote all objects in new space. Heap numbers and sequential
-// strings are promoted to the code space, all others to the old space.
+// strings are promoted to the code space, large objects to large object space,
+// and all others to the old space.
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
- Object* forwarded = target_space->MCAllocateRaw(object_size);
-
+ Object* forwarded;
+ if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ forwarded = Failure::Exception();
+ } else {
+ OldSpace* target_space = Heap::TargetSpace(object);
+ ASSERT(target_space == Heap::old_pointer_space() ||
+ target_space == Heap::old_data_space());
+ forwarded = target_space->MCAllocateRaw(object_size);
+ }
if (forwarded->IsFailure()) {
forwarded = Heap::new_space()->MCAllocateRaw(object_size);
}
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 7805d47578..ec4b3528bb 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -37,13 +37,13 @@ function GetInstanceName(cons) {
if (cons.length == 0) {
return "";
}
- var first = cons.charAt(0).toLowerCase();
+ var first = %StringToLowerCase(StringCharAt.call(cons, 0));
var mapping = kVowelSounds;
- if (cons.length > 1 && (cons.charAt(0) != first)) {
+ if (cons.length > 1 && (StringCharAt.call(cons, 0) != first)) {
// First char is upper case
- var second = cons.charAt(1).toLowerCase();
+ var second = %StringToLowerCase(StringCharAt.call(cons, 1));
// Second char is upper case
- if (cons.charAt(1) != second)
+ if (StringCharAt.call(cons, 1) != second)
mapping = kCapitalVowelSounds;
}
var s = mapping[first] ? "an " : "a ";
@@ -126,7 +126,7 @@ function FormatString(format, args) {
var str;
try { str = ToDetailString(args[i]); }
catch (e) { str = "#<error>"; }
- result = result.split("%" + i).join(str);
+ result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
}
return result;
}
@@ -146,17 +146,9 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, args) {
- if (args instanceof $Array) {
- for (var i = 0; i < args.length; i++) {
- var elem = args[i];
- if (elem instanceof $Array && elem.length > 100) { // arbitrary limit, grab a reasonable slice to report
- args[i] = elem.slice(0,20).concat("...");
- }
- }
- } else if (IS_UNDEFINED(args)) {
+ if (IS_UNDEFINED(args)) {
args = [];
}
-
var e = new constructor(kAddMessageAccessorsMarker);
e.type = type;
e.arguments = args;
@@ -281,7 +273,7 @@ Script.prototype.locationFromPosition = function (position,
// Determine start, end and column.
var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
var end = this.line_ends[line];
- if (end > 0 && this.source.charAt(end - 1) == '\r') end--;
+ if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
var column = position - start;
// Adjust according to the offset within the resource.
@@ -394,7 +386,7 @@ Script.prototype.sourceLine = function (opt_line) {
// Return the source line.
var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
var end = this.line_ends[line];
- return this.source.substring(start, end);
+ return StringSubstring.call(this.source, start, end);
}
@@ -498,7 +490,7 @@ SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
* Source text for this location.
*/
SourceLocation.prototype.sourceText = function () {
- return this.script.source.substring(this.start, this.end);
+ return StringSubstring.call(this.script.source, this.start, this.end);
};
@@ -535,7 +527,7 @@ function SourceSlice(script, from_line, to_line, from_position, to_position) {
* the line terminating characters (if any)
*/
SourceSlice.prototype.sourceText = function () {
- return this.script.source.substring(this.from_position, this.to_position);
+ return StringSubstring.call(this.script.source, this.from_position, this.to_position);
};
diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js
index 060586dc3b..d0e8aa44e2 100644
--- a/deps/v8/src/mirror-delay.js
+++ b/deps/v8/src/mirror-delay.js
@@ -1895,8 +1895,8 @@ JSONProtocolSerializer.prototype.includeSource_ = function() {
}
-JSONProtocolSerializer.prototype.compactFormat_ = function() {
- return this.options_ && this.options_.compactFormat;
+JSONProtocolSerializer.prototype.inlineRefs_ = function() {
+ return this.options_ && this.options_.inlineRefs;
}
@@ -1960,7 +1960,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
// the mirror to the referenced mirrors.
if (reference &&
(mirror.isValue() || mirror.isScript() || mirror.isContext())) {
- if (this.compactFormat_() && mirror.isValue()) {
+ if (this.inlineRefs_() && mirror.isValue()) {
return this.serializeReferenceWithDisplayData_(mirror);
} else {
this.add_(mirror);
@@ -2051,7 +2051,10 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.sourceLength = mirror.source().length;
content.scriptType = mirror.scriptType();
content.compilationType = mirror.compilationType();
- if (mirror.compilationType() == 1) { // Compilation type eval.
+ // For compilation type eval emit information on the script from which
+ // eval was called if a script is present.
+ if (mirror.compilationType() == 1 &&
+ mirror.evalFromFunction().script()) {
content.evalFromScript =
this.serializeReference(mirror.evalFromFunction().script());
var evalFromLocation = mirror.evalFromLocation()
@@ -2172,7 +2175,7 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
result.name = propertyMirror.name();
var propertyValue = propertyMirror.value();
- if (this.compactFormat_() && propertyValue.isValue()) {
+ if (this.inlineRefs_() && propertyValue.isValue()) {
result.value = this.serializeReferenceWithDisplayData_(propertyValue);
} else {
if (propertyMirror.attributes() != PropertyAttribute.None) {
@@ -2229,6 +2232,15 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text;
}
+
+ content.scopes = [];
+ for (var i = 0; i < mirror.scopeCount(); i++) {
+ var scope = mirror.scope(i);
+ content.scopes.push({
+ type: scope.scopeType(),
+ index: i
+ });
+ }
}
@@ -2236,7 +2248,9 @@ JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
content.index = mirror.scopeIndex();
content.frameIndex = mirror.frameIndex();
content.type = mirror.scopeType();
- content.object = this.serializeReference(mirror.scopeObject());
+ content.object = this.inlineRefs_() ?
+ this.serializeValue(mirror.scopeObject()) :
+ this.serializeReference(mirror.scopeObject());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index d34e46539e..8c83715649 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -481,11 +481,6 @@ bool Object::IsMapCache() {
}
-bool Object::IsLookupCache() {
- return IsHashTable();
-}
-
-
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
@@ -659,6 +654,12 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INTPTR_FIELD(p, offset) \
+ (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INTPTR_FIELD(p, offset, value) \
+ (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
@@ -1304,7 +1305,6 @@ int DescriptorArray::Search(String* name) {
}
-
String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number)));
@@ -1388,7 +1388,6 @@ CAST_ACCESSOR(Dictionary)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(MapCache)
-CAST_ACCESSOR(LookupCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqAsciiString)
@@ -1786,11 +1785,17 @@ int Map::inobject_properties() {
int HeapObject::SizeFromMap(Map* map) {
InstanceType instance_type = map->instance_type();
- // Only inline the two most frequent cases.
- if (instance_type == JS_OBJECT_TYPE) return map->instance_size();
+ // Only inline the most frequent cases.
+ if (instance_type == JS_OBJECT_TYPE ||
+ (instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
+ (kStringTag | kConsStringTag) ||
+ instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) {
return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
}
+ if (instance_type == BYTE_ARRAY_TYPE) {
+ return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
+ }
// Otherwise do the general size computation.
return SlowSizeFromMap(map);
}
@@ -2130,6 +2135,7 @@ ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
@@ -2303,12 +2309,12 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Address Proxy::proxy() {
- return AddressFrom<Address>(READ_INT_FIELD(this, kProxyOffset));
+ return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
}
void Proxy::set_proxy(Address value) {
- WRITE_INT_FIELD(this, kProxyOffset, OffsetFrom(value));
+ WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
}
@@ -2639,6 +2645,13 @@ void Map::ClearCodeCache() {
}
+void JSArray::EnsureSize(int required_size) {
+ ASSERT(HasFastElements());
+ if (elements()->length() >= required_size) return;
+ Expand(required_size);
+}
+
+
void JSArray::SetContent(FixedArray* storage) {
set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER);
set_elements(storage);
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index cbd36e0a37..ad57d17fb8 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1302,16 +1302,19 @@ Object* JSObject::ReplaceSlowProperty(String* name,
Object* value,
PropertyAttributes attributes) {
Dictionary* dictionary = property_dictionary();
- PropertyDetails old_details =
- dictionary->DetailsAt(dictionary->FindStringEntry(name));
- int new_index = old_details.index();
- if (old_details.IsTransition()) new_index = 0;
+ int old_index = dictionary->FindStringEntry(name);
+ int new_enumeration_index = 0; // 0 means "Use the next available index."
+ if (old_index != -1) {
+ // All calls to ReplaceSlowProperty have had all transitions removed.
+ ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
+ new_enumeration_index = dictionary->DetailsAt(old_index).index();
+ }
- PropertyDetails new_details(attributes, NORMAL, old_details.index());
+ PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
Object* result =
- property_dictionary()->SetOrAddStringEntry(name, value, new_details);
+ dictionary->SetOrAddStringEntry(name, value, new_details);
if (result->IsFailure()) return result;
- if (property_dictionary() != result) {
+ if (dictionary != result) {
set_properties(Dictionary::cast(result));
}
return value;
@@ -1562,7 +1565,11 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors();
- int number = descriptors->Search(name);
+ int number = DescriptorLookupCache::Lookup(descriptors, name);
+ if (number == DescriptorLookupCache::kAbsent) {
+ number = descriptors->Search(name);
+ DescriptorLookupCache::Update(descriptors, name, number);
+ }
if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(this, descriptors->GetDetails(number), number);
} else {
@@ -4632,7 +4639,7 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
- IteratePointers(v, kNameOffset, kCodeOffset + kPointerSize);
+ IteratePointers(v, kNameOffset, kConstructStubOffset + kPointerSize);
IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize);
}
@@ -4977,10 +4984,8 @@ Object* JSArray::Initialize(int capacity) {
}
-void JSArray::EnsureSize(int required_size) {
+void JSArray::Expand(int required_size) {
Handle<JSArray> self(this);
- ASSERT(HasFastElements());
- if (elements()->length() >= required_size) return;
Handle<FixedArray> old_backing(elements());
int old_size = old_backing->length();
// Doubling in size would be overkill, but leave some slack to avoid
@@ -6352,8 +6357,8 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity(
int n, HashTableKey* key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
- // Make sure 25% is free
- if (nof + (nof >> 2) <= capacity) return this;
+ // Make sure 50% is free
+ if (nof + (nof >> 1) <= capacity) return this;
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
@@ -6756,60 +6761,6 @@ class SymbolsKey : public HashTableKey {
};
-// MapNameKeys are used as keys in lookup caches.
-class MapNameKey : public HashTableKey {
- public:
- MapNameKey(Map* map, String* name)
- : map_(map), name_(name) { }
-
- bool IsMatch(Object* other) {
- if (!other->IsFixedArray()) return false;
- FixedArray* pair = FixedArray::cast(other);
- Map* map = Map::cast(pair->get(0));
- if (map != map_) return false;
- String* name = String::cast(pair->get(1));
- return name->Equals(name_);
- }
-
- typedef uint32_t (*HashFunction)(Object* obj);
-
- virtual HashFunction GetHashFunction() { return MapNameHash; }
-
- static uint32_t MapNameHashHelper(Map* map, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- return addr_hash ^ name->Hash();
- }
-
- static uint32_t MapNameHash(Object* obj) {
- FixedArray* pair = FixedArray::cast(obj);
- Map* map = Map::cast(pair->get(0));
- String* name = String::cast(pair->get(1));
- return MapNameHashHelper(map, name);
- }
-
- virtual uint32_t Hash() {
- return MapNameHashHelper(map_, name_);
- }
-
- virtual Object* GetObject() {
- Object* obj = Heap::AllocateFixedArray(2);
- if (obj->IsFailure()) return obj;
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, map_);
- pair->set(1, name_);
- return pair;
- }
-
- virtual bool IsStringKey() { return false; }
-
- private:
- Map* map_;
- String* name_;
-};
-
-
Object* MapCache::Lookup(FixedArray* array) {
SymbolsKey key(array);
int entry = FindEntry(&key);
@@ -6832,31 +6783,6 @@ Object* MapCache::Put(FixedArray* array, Map* value) {
}
-int LookupCache::Lookup(Map* map, String* name) {
- MapNameKey key(map, name);
- int entry = FindEntry(&key);
- if (entry == -1) return kNotFound;
- return Smi::cast(get(EntryToIndex(entry) + 1))->value();
-}
-
-
-Object* LookupCache::Put(Map* map, String* name, int value) {
- MapNameKey key(map, name);
- Object* obj = EnsureCapacity(1, &key);
- if (obj->IsFailure()) return obj;
- Object* k = key.GetObject();
- if (k->IsFailure()) return k;
-
- LookupCache* cache = reinterpret_cast<LookupCache*>(obj);
- int entry = cache->FindInsertionEntry(k, key.Hash());
- int index = EntryToIndex(entry);
- cache->set(index, k);
- cache->set(index + 1, Smi::FromInt(value), SKIP_WRITE_BARRIER);
- cache->ElementAdded();
- return cache;
-}
-
-
Object* Dictionary::Allocate(int at_least_space_for) {
Object* obj = DictionaryBase::Allocate(at_least_space_for);
// Initialize the next enumeration index.
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 21907f8f3a..fd9af38c30 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -59,7 +59,6 @@
// - SymbolTable
// - CompilationCacheTable
// - MapCache
-// - LookupCache
// - Context
// - GlobalContext
// - String
@@ -678,7 +677,6 @@ class Object BASE_EMBEDDED {
inline bool IsSymbolTable();
inline bool IsCompilationCacheTable();
inline bool IsMapCache();
- inline bool IsLookupCache();
inline bool IsPrimitive();
inline bool IsGlobalObject();
inline bool IsJSGlobalObject();
@@ -1641,6 +1639,9 @@ class FixedArray: public Array {
// Garbage collection support.
static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
+ // Code Generation support.
+ static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
// Casting.
static inline FixedArray* cast(Object* obj);
@@ -2012,27 +2013,6 @@ class MapCache: public HashTable<0, 2> {
};
-// LookupCache.
-//
-// Maps a key consisting of a map and a name to an index within a
-// fast-case properties array.
-//
-// LookupCaches are used to avoid repeatedly searching instance
-// descriptors.
-class LookupCache: public HashTable<0, 2> {
- public:
- int Lookup(Map* map, String* name);
- Object* Put(Map* map, String* name, int offset);
- static inline LookupCache* cast(Object* obj);
-
- // Constant returned by Lookup when the key was not found.
- static const int kNotFound = -1;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(LookupCache);
-};
-
-
// Dictionary for keeping properties and elements in slow case.
//
// One element in the prefix is used for storing non-element
@@ -2056,6 +2036,7 @@ class Dictionary: public DictionaryBase {
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
+ ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2)));
}
@@ -2766,6 +2747,9 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // [construct stub]: Code stub for constructing instances of this function.
+ DECL_ACCESSORS(construct_stub, Code)
+
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -2861,7 +2845,8 @@ class SharedFunctionInfo: public HeapObject {
// (An even number of integers has a size that is a multiple of a pointer.)
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kLengthOffset = kCodeOffset + kPointerSize;
+ static const int kConstructStubOffset = kCodeOffset + kPointerSize;
+ static const int kLengthOffset = kConstructStubOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
@@ -4005,7 +3990,7 @@ class JSArray: public JSObject {
// Uses handles. Ensures that the fixed array backing the JSArray has at
// least the stated size.
- void EnsureSize(int minimum_size_of_backing_fixed_array);
+ inline void EnsureSize(int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
#ifdef DEBUG
@@ -4018,6 +4003,10 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize;
private:
+ // Expand the fixed array backing of a fast-case JSArray to at least
+ // the requested size.
+ void Expand(int minimum_size_of_backing_fixed_array);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
diff --git a/deps/v8/src/oprofile-agent.cc b/deps/v8/src/oprofile-agent.cc
index c4595b40a9..8aa3937f90 100644
--- a/deps/v8/src/oprofile-agent.cc
+++ b/deps/v8/src/oprofile-agent.cc
@@ -52,6 +52,10 @@ bool OProfileAgent::Initialize() {
return true;
}
#else
+ if (FLAG_oprofile) {
+ OS::Print("Warning: --oprofile specified but binary compiled without "
+ "oprofile support.\n");
+ }
return true;
#endif
}
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index a9a5e32e23..2b4be79a0f 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -1582,7 +1582,8 @@ VariableProxy* AstBuildingParser::Declare(Handle<String> name,
// For global const variables we bind the proxy to a variable.
if (mode == Variable::CONST && top_scope_->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
- var = NEW(Variable(top_scope_, name, Variable::CONST, true, false));
+ Variable::Kind kind = Variable::NORMAL;
+ var = NEW(Variable(top_scope_, name, Variable::CONST, true, kind));
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -2653,10 +2654,15 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
double y_val = y->AsLiteral()->handle()->Number();
int64_t y_int = static_cast<int64_t>(y_val);
// There are rounding issues with this optimization, but they don't
- // apply if the number to be divided with has a reciprocal that can
- // be precisely represented as a floating point number. This is
- // the case if the number is an integer power of 2.
- if (static_cast<double>(y_int) == y_val && IsPowerOf2(y_int)) {
+ // apply if the number to be divided with has a reciprocal that can be
+ // precisely represented as a floating point number. This is the case
+ // if the number is an integer power of 2. Negative integer powers of
+ // 2 work too, but for -2, -1, 1 and 2 we don't do the strength
+ // reduction because the inlined optimistic idiv has a reasonable
+ // chance of succeeding by producing a Smi answer with no remainder.
+ if (static_cast<double>(y_int) == y_val &&
+ (IsPowerOf2(y_int) || IsPowerOf2(-y_int)) &&
+ (y_int > 2 || y_int < -2)) {
y = NewNumberLiteral(1 / y_val);
op = Token::MUL;
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 79ffe81497..39495ab967 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -224,8 +224,8 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+static uintptr_t StringToULong(char* buffer) {
+ return strtoul(buffer, NULL, 16); // NOLINT
}
#endif
@@ -242,13 +242,13 @@ void OS::LogSharedLibraryAddresses() {
addr_buffer[10] = 0;
int result = read(fd, addr_buffer + 2, 8);
if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
+ uintptr_t start = StringToULong(addr_buffer);
result = read(fd, addr_buffer + 2, 1);
if (result < 1) break;
if (addr_buffer[2] != '-') break;
result = read(fd, addr_buffer + 2, 8);
if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
+ uintptr_t end = StringToULong(addr_buffer);
char buffer[MAP_LENGTH];
int bytes_read = -1;
do {
@@ -262,10 +262,21 @@ void OS::LogSharedLibraryAddresses() {
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ // If there is no filename for this line then log it as an anonymous
+ // mapping and use the address as its name.
+ if (start_of_path == NULL) {
+ // 40 is enough to print a 64 bit address range.
+ ASSERT(sizeof(buffer) > 40);
+ snprintf(buffer,
+ sizeof(buffer),
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR,
+ start,
+ end);
+ LOG(SharedLibraryEvent(buffer, start, end));
+ } else {
+ buffer[bytes_read] = 0;
+ LOG(SharedLibraryEvent(start_of_path, start, end));
+ }
}
close(fd);
#endif
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 3e0e2841bc..5a0eae2533 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -35,10 +35,6 @@
#include <AvailabilityMacros.h>
-#ifdef MAC_OS_X_VERSION_10_5
-# include <execinfo.h> // backtrace, backtrace_symbols
-#endif
-
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
@@ -58,6 +54,17 @@
#include "platform.h"
+// Manually define these here as weak imports, rather than including execinfo.h.
+// This lets us launch on 10.4 which does not have these calls.
+extern "C" {
+ extern int backtrace(void**, int) __attribute__((weak_import));
+ extern char** backtrace_symbols(void* const*, int)
+ __attribute__((weak_import));
+ extern void backtrace_symbols_fd(void* const*, int, int)
+ __attribute__((weak_import));
+}
+
+
namespace v8 {
namespace internal {
@@ -214,9 +221,10 @@ int OS::ActivationFrameAlignment() {
int OS::StackWalk(Vector<StackFrame> frames) {
-#ifndef MAC_OS_X_VERSION_10_5
- return 0;
-#else
+ // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
+ if (backtrace == NULL)
+ return 0;
+
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
int frames_count = backtrace(addresses, frames_size);
@@ -244,7 +252,6 @@ int OS::StackWalk(Vector<StackFrame> frames) {
free(symbols);
return frames_count;
-#endif
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 4522c74031..b5123c5aca 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -44,6 +44,8 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
+#define V8_INFINITY INFINITY
+
// Windows specific stuff.
#ifdef WIN32
@@ -58,7 +60,8 @@ enum {
FP_NORMAL
};
-#define INFINITY HUGE_VAL
+#undef V8_INFINITY
+#define V8_INFINITY HUGE_VAL
namespace v8 {
namespace internal {
@@ -75,14 +78,6 @@ int strncasecmp(const char* s1, const char* s2, int n);
#endif // _MSC_VER
-// MinGW specific stuff.
-#ifdef __MINGW32__
-
-// Needed for va_list.
-#include <stdarg.h>
-
-#endif // __MINGW32__
-
// Random is missing on both Visual Studio and MinGW.
int random();
@@ -90,6 +85,10 @@ int random();
// GCC specific stuff
#ifdef __GNUC__
+
+// Needed for va_list on at least MinGW and Android.
+#include <stdarg.h>
+
#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
@@ -100,8 +99,8 @@ int random();
// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
#include <limits>
-#undef INFINITY
-#define INFINITY std::numeric_limits<double>::infinity()
+#undef V8_INFINITY
+#define V8_INFINITY std::numeric_limits<double>::infinity()
#endif
#endif // __GNUC__
@@ -109,6 +108,8 @@ int random();
namespace v8 {
namespace internal {
+class Semaphore;
+
double ceiling(double x);
// Forward declarations.
diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc
index 2599232ce8..d1b08bbc4c 100644
--- a/deps/v8/src/register-allocator.cc
+++ b/deps/v8/src/register-allocator.cc
@@ -40,18 +40,7 @@ namespace internal {
Result::Result(Register reg) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
- value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
- | TypeField::encode(REGISTER)
- | DataField::encode(reg.code_);
-}
-
-
-Result::Result(Register reg, StaticType type) {
- ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
- CodeGeneratorScope::Current()->allocator()->Use(reg);
- value_ = StaticTypeField::encode(type.static_type_)
- | TypeField::encode(REGISTER)
- | DataField::encode(reg.code_);
+ value_ = TypeField::encode(REGISTER) | DataField::encode(reg.code_);
}
diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h
index c5391918d5..f7167d9262 100644
--- a/deps/v8/src/register-allocator.h
+++ b/deps/v8/src/register-allocator.h
@@ -45,80 +45,6 @@ namespace internal {
// -------------------------------------------------------------------------
-// StaticType
-//
-// StaticType represent the type of an expression or a word at runtime.
-// The types are ordered by knowledge, so that if a value can come about
-// in more than one way, and there are different static types inferred
-// for the different ways, the types can be combined to a type that we
-// are still certain of (possibly just "unknown").
-
-class StaticType BASE_EMBEDDED {
- public:
- StaticType() : static_type_(UNKNOWN_TYPE) {}
-
- static StaticType unknown() { return StaticType(); }
- static StaticType smi() { return StaticType(SMI_TYPE); }
- static StaticType jsstring() { return StaticType(STRING_TYPE); }
- static StaticType heap_object() { return StaticType(HEAP_OBJECT_TYPE); }
-
- // Accessors
- bool is_unknown() { return static_type_ == UNKNOWN_TYPE; }
- bool is_smi() { return static_type_ == SMI_TYPE; }
- bool is_heap_object() { return (static_type_ & HEAP_OBJECT_TYPE) != 0; }
- bool is_jsstring() { return static_type_ == STRING_TYPE; }
-
- bool operator==(StaticType other) const {
- return static_type_ == other.static_type_;
- }
-
- // Find the best approximating type for a value.
- // The argument must not be NULL.
- static StaticType TypeOf(Object* object) {
- // Remember to make the most specific tests first. A string is also a heap
- // object, so test for string-ness first.
- if (object->IsSmi()) return smi();
- if (object->IsString()) return jsstring();
- if (object->IsHeapObject()) return heap_object();
- return unknown();
- }
-
- // Merges two static types to a type that combines the knowledge
- // of both. If there is no way to combine (e.g., being a string *and*
- // being a smi), the resulting type is unknown.
- StaticType merge(StaticType other) {
- StaticType x(
- static_cast<StaticTypeEnum>(static_type_ & other.static_type_));
- return x;
- }
-
- private:
- enum StaticTypeEnum {
- // Numbers are chosen so that least upper bound of the following
- // partial order is implemented by bitwise "and":
- //
- // string
- // |
- // heap-object smi
- // \ /
- // unknown
- //
- UNKNOWN_TYPE = 0x00,
- SMI_TYPE = 0x01,
- HEAP_OBJECT_TYPE = 0x02,
- STRING_TYPE = 0x04 | HEAP_OBJECT_TYPE
- };
- explicit StaticType(StaticTypeEnum static_type) : static_type_(static_type) {}
-
- // StaticTypeEnum static_type_;
- StaticTypeEnum static_type_;
-
- friend class FrameElement;
- friend class Result;
-};
-
-
-// -------------------------------------------------------------------------
// Results
//
// Results encapsulate the compile-time values manipulated by the code
@@ -138,13 +64,9 @@ class Result BASE_EMBEDDED {
// Construct a register Result.
explicit Result(Register reg);
- // Construct a register Result with a known static type.
- Result(Register reg, StaticType static_type);
-
// Construct a Result whose value is a compile-time constant.
explicit Result(Handle<Object> value) {
- value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_)
- | TypeField::encode(CONSTANT)
+ value_ = TypeField::encode(CONSTANT)
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@@ -182,15 +104,6 @@ class Result BASE_EMBEDDED {
inline void Unuse();
- StaticType static_type() const {
- return StaticType(StaticTypeField::decode(value_));
- }
-
- void set_static_type(StaticType type) {
- value_ = value_ & ~StaticTypeField::mask();
- value_ = value_ | StaticTypeField::encode(type.static_type_);
- }
-
Type type() const { return TypeField::decode(value_); }
void invalidate() { value_ = TypeField::encode(INVALID); }
@@ -225,9 +138,8 @@ class Result BASE_EMBEDDED {
private:
uint32_t value_;
- class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {};
- class TypeField: public BitField<Type, 3, 2> {};
- class DataField: public BitField<uint32_t, 5, 32 - 6> {};
+ class TypeField: public BitField<Type, 0, 2> {};
+ class DataField: public BitField<uint32_t, 2, 32 - 3> {};
inline void CopyTo(Result* destination) const;
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index e0a0226ec8..4d1fbd9dd7 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -283,7 +283,10 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
case Token::ASSIGN:
// No type can be infered from the general assignment.
- scoped_fni.Enter();
+ // Don't infer if it is "a = function(){...}();"-like expression.
+ if (node->value()->AsCall() == NULL) {
+ scoped_fni.Enter();
+ }
break;
case Token::ASSIGN_BIT_OR:
case Token::ASSIGN_BIT_XOR:
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index d1c9162d1e..dcff28bc36 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -50,9 +50,8 @@ namespace v8 {
namespace internal {
-#define RUNTIME_ASSERT(value) do { \
- if (!(value)) return IllegalOperation(); \
-} while (false)
+#define RUNTIME_ASSERT(value) \
+ if (!(value)) return Top::ThrowIllegalOperation();
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
@@ -97,11 +96,6 @@ namespace internal {
static StaticResource<StringInputBuffer> runtime_string_input_buffer;
-static Object* IllegalOperation() {
- return Top::Throw(Heap::illegal_access_symbol());
-}
-
-
static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
StackLimitCheck check;
if (check.HasOverflowed()) return Top::StackOverflow();
@@ -124,7 +118,8 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
}
}
mode = copy->GetWriteBarrierMode();
- for (int i = 0; i < copy->map()->inobject_properties(); i++) {
+ int nof = copy->map()->inobject_properties();
+ for (int i = 0; i < nof; i++) {
Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) {
JSObject* jsObject = JSObject::cast(value);
@@ -522,12 +517,9 @@ static Object* Runtime_IsConstructCall(Arguments args) {
static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
- CONVERT_CHECKED(JSRegExp, raw_re, args[0]);
- Handle<JSRegExp> re(raw_re);
- CONVERT_CHECKED(String, raw_pattern, args[1]);
- Handle<String> pattern(raw_pattern);
- CONVERT_CHECKED(String, raw_flags, args[2]);
- Handle<String> flags(raw_flags);
+ CONVERT_ARG_CHECKED(JSRegExp, re, 0);
+ CONVERT_ARG_CHECKED(String, pattern, 1);
+ CONVERT_ARG_CHECKED(String, flags, 2);
Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
if (result.is_null()) return Failure::Exception();
return *result;
@@ -537,8 +529,7 @@ static Object* Runtime_RegExpCompile(Arguments args) {
static Object* Runtime_CreateApiFunction(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
- CONVERT_CHECKED(FunctionTemplateInfo, raw_data, args[0]);
- Handle<FunctionTemplateInfo> data(raw_data);
+ CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
return *Factory::CreateApiFunction(data);
}
@@ -1066,15 +1057,12 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
static Object* Runtime_RegExpExec(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 4);
- CONVERT_CHECKED(JSRegExp, raw_regexp, args[0]);
- Handle<JSRegExp> regexp(raw_regexp);
- CONVERT_CHECKED(String, raw_subject, args[1]);
- Handle<String> subject(raw_subject);
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_CHECKED(String, subject, 1);
// Due to the way the JS files are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_CHECKED(Smi, index, args[2]);
- CONVERT_CHECKED(JSArray, raw_last_match_info, args[3]);
- Handle<JSArray> last_match_info(raw_last_match_info);
+ CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index->value() >= 0);
RUNTIME_ASSERT(index->value() <= subject->length());
@@ -1217,8 +1205,7 @@ static Object* Runtime_SetCode(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSFunction, raw_target, args[0]);
- Handle<JSFunction> target(raw_target);
+ CONVERT_ARG_CHECKED(JSFunction, target, 0);
Handle<Object> code = args.at<Object>(1);
Handle<Context> context(target->context());
@@ -2633,12 +2620,9 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
String* key = String::cast(args[1]);
if (receiver->HasFastProperties()) {
// Attempt to use lookup cache.
- Object* obj = Heap::GetKeyedLookupCache();
- if (obj->IsFailure()) return obj;
- LookupCache* cache = LookupCache::cast(obj);
Map* receiver_map = receiver->map();
- int offset = cache->Lookup(receiver_map, key);
- if (offset != LookupCache::kNotFound) {
+ int offset = KeyedLookupCache::Lookup(receiver_map, key);
+ if (offset != -1) {
Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value;
}
@@ -2648,9 +2632,7 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
receiver->LocalLookup(key, &result);
if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
int offset = result.GetFieldIndex();
- Object* obj = cache->Put(receiver_map, key, offset);
- if (obj->IsFailure()) return obj;
- Heap::SetKeyedLookupCache(LookupCache::cast(obj));
+ KeyedLookupCache::Update(receiver_map, key, offset);
Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value;
}
@@ -2977,9 +2959,7 @@ static Object* Runtime_IsPropertyEnumerable(Arguments args) {
static Object* Runtime_GetPropertyNames(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSObject, raw_object, args[0]);
- Handle<JSObject> object(raw_object);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
return *GetKeysFor(object);
}
@@ -3718,20 +3698,8 @@ static Object* Runtime_NumberMod(Arguments args) {
static Object* Runtime_StringAdd(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
-
CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]);
- int len1 = str1->length();
- int len2 = str2->length();
- if (len1 == 0) return str2;
- if (len2 == 0) return str1;
- int length_sum = len1 + len2;
- // Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large to fit in a Smi.
- if (length_sum > Smi::kMaxValue || length_sum < 0) {
- Top::context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
return Heap::AllocateConsString(str1, str2);
}
@@ -4166,16 +4134,64 @@ static Object* Runtime_Math_log(Arguments args) {
}
+// Helper function to compute x^y, where y is known to be an
+// integer. Uses binary decomposition to limit the number of
+// multiplications; see the discussion in "Hacker's Delight" by Henry
+// S. Warren, Jr., figure 11-6, page 213.
+static double powi(double x, int y) {
+ ASSERT(y != kMinInt);
+ unsigned n = (y < 0) ? -y : y;
+ double m = x;
+ double p = 1;
+ while (true) {
+ if ((n & 1) != 0) p *= m;
+ n >>= 1;
+ if (n == 0) {
+ if (y < 0) {
+ // Unfortunately, we have to be careful when p has reached
+ // infinity in the computation, because sometimes the higher
+ // internal precision in the pow() implementation would have
+ // given us a finite p. This happens very rarely.
+ double result = 1.0 / p;
+ return (result == 0 && isinf(p))
+ ? pow(x, static_cast<double>(y)) // Avoid pow(double, int).
+ : result;
+ } else {
+ return p;
+ }
+ }
+ m *= m;
+ }
+}
+
+
static Object* Runtime_Math_pow(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
+
+ // If the second argument is a smi, it is much faster to call the
+ // custom powi() function than the generic pow().
+ if (args[1]->IsSmi()) {
+ int y = Smi::cast(args[1])->value();
+ return Heap::AllocateHeapNumber(powi(x, y));
+ }
+
CONVERT_DOUBLE_CHECKED(y, args[1]);
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return Heap::nan_value();
+ if (y == 0.5) {
+ // It's not uncommon to use Math.pow(x, 0.5) to compute the square
+ // root of a number. To speed up such computations, we explictly
+ // check for this case and use the sqrt() function which is faster
+ // than pow().
+ return Heap::AllocateHeapNumber(sqrt(x));
+ } else if (y == -0.5) {
+ // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
+ return Heap::AllocateHeapNumber(1.0 / sqrt(x));
} else if (y == 0) {
return Smi::FromInt(1);
+ } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+ return Heap::nan_value();
} else {
return Heap::AllocateHeapNumber(pow(x, y));
}
@@ -4295,45 +4311,61 @@ static Object* Runtime_NewClosure(Arguments args) {
}
+static Handle<Code> ComputeConstructStub(Handle<Map> map) {
+ // TODO(385): Change this to create a construct stub specialized for
+ // the given map to make allocation of simple objects - and maybe
+ // arrays - much faster.
+ return Handle<Code>(Builtins::builtin(Builtins::JSConstructStubGeneric));
+}
+
+
static Object* Runtime_NewObject(Arguments args) {
- NoHandleAllocation ha;
+ HandleScope scope;
ASSERT(args.length() == 1);
- Object* constructor = args[0];
- if (constructor->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(constructor);
+ Handle<Object> constructor = args.at<Object>(0);
+
+ // If the constructor isn't a proper function we throw a type error.
+ if (!constructor->IsJSFunction()) {
+ Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
+ Handle<Object> type_error =
+ Factory::NewTypeError("not_constructor", arguments);
+ return Top::Throw(*type_error);
+ }
- // Handle stepping into constructors if step into is active.
+ Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::StepInActive()) {
- HandleScope scope;
- Debug::HandleStepIn(Handle<JSFunction>(function), 0, true);
- }
+ // Handle stepping into constructors if step into is active.
+ if (Debug::StepInActive()) {
+ Debug::HandleStepIn(function, 0, true);
+ }
#endif
- if (function->has_initial_map() &&
- function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
+ if (function->has_initial_map()) {
+ if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
// The 'Function' function ignores the receiver object when
// called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new
- // JSFunction. AllocateJSObject should not be used to allocate
- // JSFunctions since it does not properly initialize the shared
- // part of the function. Since the receiver is ignored anyway,
- // we use the global object as the receiver instead of a new
- // JSFunction object. This way, errors are reported the same
- // way whether or not 'Function' is called using 'new'.
+ // JSFunction. Factory::NewJSObject() should not be used to
+ // allocate JSFunctions since it does not properly initialize
+ // the shared part of the function. Since the receiver is
+ // ignored anyway, we use the global object as the receiver
+ // instead of a new JSFunction object. This way, errors are
+ // reported the same way whether or not 'Function' is called
+ // using 'new'.
return Top::context()->global();
}
- return Heap::AllocateJSObject(function);
}
- HandleScope scope;
- Handle<Object> cons(constructor);
- // The constructor is not a function; throw a type error.
- Handle<Object> type_error =
- Factory::NewTypeError("not_constructor", HandleVector(&cons, 1));
- return Top::Throw(*type_error);
+ bool first_allocation = !function->has_initial_map();
+ Handle<JSObject> result = Factory::NewJSObject(function);
+ if (first_allocation) {
+ Handle<Map> map = Handle<Map>(function->initial_map());
+ Handle<Code> stub = ComputeConstructStub(map);
+ function->shared()->set_construct_stub(*stub);
+ }
+ return *result;
}
@@ -4534,7 +4566,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
ASSERT(args.length() == 2);
if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(IllegalOperation(), NULL);
+ return MakePair(Top::ThrowIllegalOperation(), NULL);
}
Handle<Context> context = args.at<Context>(0);
Handle<String> name = args.at<String>(1);
@@ -6622,8 +6654,8 @@ static Object* Runtime_GetBreakLocations(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
- Handle<SharedFunctionInfo> shared(raw_fun->shared());
+ CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
if (break_locations->IsUndefined()) return Heap::undefined_value();
@@ -6640,8 +6672,8 @@ static Object* Runtime_GetBreakLocations(Arguments args) {
static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
- Handle<SharedFunctionInfo> shared(raw_fun->shared());
+ CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ Handle<SharedFunctionInfo> shared(fun->shared());
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
Handle<Object> break_point_object_arg = args.at<Object>(2);
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index d4b49704ae..df26b8894a 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -391,8 +391,9 @@ function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
function APPLY_PREPARE(args) {
var length;
- // First check whether length is a positive Smi and args is an array. This is the
- // fast case. If this fails, we do the slow case that takes care of more eventualities
+ // First check whether length is a positive Smi and args is an
+ // array. This is the fast case. If this fails, we do the slow case
+ // that takes care of more eventualities.
if (%_IsArray(args)) {
length = args.length;
if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index fedfbd64fe..8a237fd0ec 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -432,10 +432,13 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
String* name,
Variable::Mode* mode) {
ASSERT(name->IsSymbol());
+ int result = ContextSlotCache::Lookup(code, name, mode);
+ if (result != ContextSlotCache::kNotFound) return result;
if (code->sinfo_size() > 0) {
// Loop below depends on the NULL sentinel after the context slot names.
ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
*(ContextEntriesAddr(code) + 1) == NULL);
+
// slots start after length entry
Object** p0 = ContextEntriesAddr(code) + 1;
Object** p = p0;
@@ -443,14 +446,18 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
while (*p != NULL) {
if (*p == name) {
ASSERT(((p - p0) & 1) == 0);
- if (mode != NULL) {
- ReadInt(p + 1, reinterpret_cast<int*>(mode));
- }
- return ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ int v;
+ ReadInt(p + 1, &v);
+ Variable::Mode mode_value = static_cast<Variable::Mode>(v);
+ if (mode != NULL) *mode = mode_value;
+ result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ ContextSlotCache::Update(code, name, mode_value, result);
+ return result;
}
p += 2;
}
}
+ ContextSlotCache::Update(code, name, Variable::INTERNAL, -1);
return -1;
}
@@ -526,7 +533,78 @@ int ScopeInfo<Allocator>::NumberOfLocals() const {
}
+int ContextSlotCache::Hash(Code* code, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2;
+ return (addr_hash ^ name->Hash()) % kLength;
+}
+
+
+int ContextSlotCache::Lookup(Code* code,
+ String* name,
+ Variable::Mode* mode) {
+ int index = Hash(code, name);
+ Key& key = keys_[index];
+ if ((key.code == code) && key.name->Equals(name)) {
+ Value result(values_[index]);
+ if (mode != NULL) *mode = result.mode();
+ return result.index() + kNotFound;
+ }
+ return kNotFound;
+}
+
+
+void ContextSlotCache::Update(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index) {
+ String* symbol;
+ ASSERT(slot_index > kNotFound);
+ if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ int index = Hash(code, symbol);
+ Key& key = keys_[index];
+ key.code = code;
+ key.name = symbol;
+ // Please note value only takes a uint as index.
+ values_[index] = Value(mode, slot_index - kNotFound).raw();
+#ifdef DEBUG
+ ValidateEntry(code, name, mode, slot_index);
+#endif
+ }
+}
+
+
+void ContextSlotCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].code = NULL;
+}
+
+
+ContextSlotCache::Key ContextSlotCache::keys_[ContextSlotCache::kLength];
+
+
+uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
+
+
#ifdef DEBUG
+
+void ContextSlotCache::ValidateEntry(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index) {
+ String* symbol;
+ if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ int index = Hash(code, name);
+ Key& key = keys_[index];
+ ASSERT(key.code == code);
+ ASSERT(key.name->Equals(name));
+ Value result(values_[index]);
+ ASSERT(result.mode() == mode);
+ ASSERT(result.index() + kNotFound == slot_index);
+ }
+}
+
+
template <class Allocator>
static void PrintList(const char* list_name,
int nof_internal_slots,
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index a097d34f9f..28d169a394 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -163,6 +163,74 @@ class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
};
+// Cache for mapping (code, property name) into context slot index.
+// The cache contains both positive and negative results.
+// Slot index equals -1 means the property is absent.
+// Cleared at startup and prior to mark sweep collection.
+class ContextSlotCache {
+ public:
+ // Lookup context slot index for (code, name).
+ // If absent, kNotFound is returned.
+ static int Lookup(Code* code,
+ String* name,
+ Variable::Mode* mode);
+
+ // Update an element in the cache.
+ static void Update(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
+
+ // Clear the cache.
+ static void Clear();
+
+ static const int kNotFound = -2;
+ private:
+ inline static int Hash(Code* code, String* name);
+
+#ifdef DEBUG
+ static void ValidateEntry(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
+#endif
+
+ static const int kLength = 256;
+ struct Key {
+ Code* code;
+ String* name;
+ };
+
+ struct Value {
+ Value(Variable::Mode mode, int index) {
+ ASSERT(ModeField::is_valid(mode));
+ ASSERT(IndexField::is_valid(index));
+ value_ = ModeField::encode(mode) | IndexField::encode(index);
+ ASSERT(mode == this->mode());
+ ASSERT(index == this->index());
+ }
+
+ inline Value(uint32_t value) : value_(value) {}
+
+ uint32_t raw() { return value_; }
+
+ Variable::Mode mode() { return ModeField::decode(value_); }
+
+ int index() { return IndexField::decode(value_); }
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class ModeField: public BitField<Variable::Mode, 0, 3> {};
+ class IndexField: public BitField<int, 3, 32-3> {};
+ private:
+ uint32_t value_;
+ };
+
+ static Key keys_[kLength];
+ static uint32_t values_[kLength];
+};
+
+
} } // namespace v8::internal
#endif // V8_SCOPEINFO_H_
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 7122eb03cc..88b1c66f3b 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -81,12 +81,12 @@ Variable* LocalsMap::Declare(Scope* scope,
Handle<String> name,
Variable::Mode mode,
bool is_valid_LHS,
- bool is_this) {
+ Variable::Kind kind) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
- p->value = new Variable(scope, name, mode, is_valid_LHS, is_this);
+ p->value = new Variable(scope, name, mode, is_valid_LHS, kind);
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -169,7 +169,8 @@ void Scope::Initialize(bool inside_with) {
// such parameter is 'this' which is passed on the stack when
// invoking scripts
{ Variable* var =
- locals_.Declare(this, Factory::this_symbol(), Variable::VAR, false, true);
+ locals_.Declare(this, Factory::this_symbol(), Variable::VAR,
+ false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
receiver_->BindTo(var);
@@ -179,7 +180,8 @@ void Scope::Initialize(bool inside_with) {
// Declare 'arguments' variable which exists in all functions.
// Note that it may never be accessed, in which case it won't
// be allocated during variable allocation.
- Declare(Factory::arguments_symbol(), Variable::VAR);
+ locals_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
+ true, Variable::ARGUMENTS);
}
}
@@ -203,7 +205,7 @@ Variable* Scope::Lookup(Handle<String> name) {
Variable* Scope::DeclareFunctionVar(Handle<String> name) {
ASSERT(is_function_scope() && function_ == NULL);
- function_ = new Variable(this, name, Variable::CONST, true, false);
+ function_ = new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
return function_;
}
@@ -213,7 +215,7 @@ Variable* Scope::Declare(Handle<String> name, Variable::Mode mode) {
// INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary().
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- return locals_.Declare(this, name, mode, true, false);
+ return locals_.Declare(this, name, mode, true, Variable::NORMAL);
}
@@ -247,7 +249,8 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
VariableProxy* Scope::NewTemporary(Handle<String> name) {
- Variable* var = new Variable(this, name, Variable::TEMPORARY, true, false);
+ Variable* var = new Variable(this, name, Variable::TEMPORARY, true,
+ Variable::NORMAL);
VariableProxy* tmp = new VariableProxy(name, false, false);
tmp->BindTo(var);
temps_.Add(var);
@@ -503,7 +506,7 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
Variable* var = map->Lookup(name);
if (var == NULL) {
// Declare a new non-local.
- var = map->Declare(NULL, name, mode, true, false);
+ var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
// Allocate it by giving it a dynamic lookup.
var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
}
@@ -619,7 +622,7 @@ void Scope::ResolveVariable(Scope* global_scope,
// We must have a global variable.
ASSERT(global_scope != NULL);
var = new Variable(global_scope, proxy->name(),
- Variable::DYNAMIC, true, false);
+ Variable::DYNAMIC, true, Variable::NORMAL);
} else if (scope_inside_with_) {
// If we are inside a with statement we give up and look up
@@ -797,7 +800,7 @@ void Scope::AllocateParameterLocals() {
// are never allocated in the context).
Variable* arguments_shadow =
new Variable(this, Factory::arguments_shadow_symbol(),
- Variable::INTERNAL, true, false);
+ Variable::INTERNAL, true, Variable::ARGUMENTS);
arguments_shadow_ =
new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
arguments_shadow_->BindTo(arguments_shadow);
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index b2f61ef660..ea4e0f7466 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -47,7 +47,7 @@ class LocalsMap: public HashMap {
virtual ~LocalsMap();
Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode,
- bool is_valid_LHS, bool is_this);
+ bool is_valid_LHS, Variable::Kind kind);
Variable* Lookup(Handle<String> name);
};
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index eb497fb2e8..f45d65d304 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -1261,15 +1261,19 @@ RelativeAddress Serializer::Allocate(HeapObject* obj) {
found = Heap::InSpace(obj, s);
}
CHECK(found);
+ int size = obj->Size();
if (s == NEW_SPACE) {
- Space* space = Heap::TargetSpace(obj);
- ASSERT(space == Heap::old_pointer_space() ||
- space == Heap::old_data_space());
- s = (space == Heap::old_pointer_space()) ?
- OLD_POINTER_SPACE :
- OLD_DATA_SPACE;
+ if (size > Heap::MaxObjectSizeInPagedSpace()) {
+ s = LO_SPACE;
+ } else {
+ OldSpace* space = Heap::TargetSpace(obj);
+ ASSERT(space == Heap::old_pointer_space() ||
+ space == Heap::old_data_space());
+ s = (space == Heap::old_pointer_space()) ?
+ OLD_POINTER_SPACE :
+ OLD_DATA_SPACE;
+ }
}
- int size = obj->Size();
GCTreatment gc_treatment = DataObject;
if (obj->IsFixedArray()) gc_treatment = PointerObject;
else if (obj->IsCode()) gc_treatment = CodeObject;
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index a62b0a8d3e..0538c5f366 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -1041,7 +1041,6 @@ class SemiSpaceIterator : public ObjectIterator {
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
- ASSERT_OBJECT_SIZE(size);
current_ += size;
return object;
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index df1f393e01..3d8a11b225 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -370,10 +370,10 @@ function addCaptureString(builder, matchInfo, index) {
// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
// should be 'abcd' and not 'dddd' (or anything else).
function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- var result = new ReplaceResultBuilder(subject);
var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
if (IS_NULL(lastMatchInfo)) return subject;
+ var result = new ReplaceResultBuilder(subject);
// There's at least one match. If the regexp is global, we have to loop
// over all matches. The loop is not in C++ code here like the one in
// RegExp.prototype.exec, because of the interleaved function application.
@@ -498,10 +498,8 @@ function StringSlice(start, end) {
// ECMA-262 section 15.5.4.14
function StringSplit(separator, limit) {
var subject = ToString(this);
- var result = [];
- var lim = (limit === void 0) ? 0xffffffff : ToUint32(limit);
-
- if (lim === 0) return result;
+ limit = (limit === void 0) ? 0xffffffff : ToUint32(limit);
+ if (limit === 0) return [];
// ECMA-262 says that if separator is undefined, the result should
// be an array of size 1 containing the entire string. SpiderMonkey
@@ -509,28 +507,31 @@ function StringSplit(separator, limit) {
// undefined is explicitly given, they convert it to a string and
// use that. We do as SpiderMonkey and KJS.
if (%_ArgumentsLength() === 0) {
- result[result.length] = subject;
- return result;
+ return [subject];
}
var length = subject.length;
- var currentIndex = 0;
- var startIndex = 0;
-
- var sep;
if (IS_REGEXP(separator)) {
- sep = separator;
- %_Log('regexp', 'regexp-split,%0S,%1r', [subject, sep]);
+ %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
} else {
- sep = ToString(separator);
+ separator = ToString(separator);
+ // If the separator string is empty then return the elements in the subject.
+ if (separator.length == 0) {
+ var result = $Array(length);
+ for (var i = 0; i < length; i++) result[i] = subject[i];
+ return result;
+ }
}
if (length === 0) {
- if (splitMatch(sep, subject, 0, 0) != null) return result;
- result[result.length] = subject;
- return result;
+ if (splitMatch(separator, subject, 0, 0) != null) return [];
+ return [subject];
}
+ var currentIndex = 0;
+ var startIndex = 0;
+ var result = [];
+
while (true) {
if (startIndex === length) {
@@ -538,7 +539,7 @@ function StringSplit(separator, limit) {
return result;
}
- var lastMatchInfo = splitMatch(sep, subject, currentIndex, startIndex);
+ var lastMatchInfo = splitMatch(separator, subject, currentIndex, startIndex);
if (IS_NULL(lastMatchInfo)) {
result[result.length] = subject.slice(currentIndex, length);
@@ -553,21 +554,18 @@ function StringSplit(separator, limit) {
continue;
}
- result[result.length] =
- SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
- if (result.length === lim) return result;
+ result[result.length] = SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
+ if (result.length === limit) return result;
for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
var start = lastMatchInfo[CAPTURE(i)];
var end = lastMatchInfo[CAPTURE(i + 1)];
if (start != -1 && end != -1) {
- result[result.length] = SubString(subject,
- lastMatchInfo[CAPTURE(i)],
- lastMatchInfo[CAPTURE(i + 1)]);
+ result[result.length] = SubString(subject, start, end);
} else {
result[result.length] = void 0;
}
- if (result.length === lim) return result;
+ if (result.length === limit) return result;
}
startIndex = currentIndex = endIndex;
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 42a2b7edfb..96d4a01e71 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -611,6 +611,11 @@ Failure* Top::ReThrow(Object* exception, MessageLocation* location) {
}
+Failure* Top::ThrowIllegalOperation() {
+ return Throw(Heap::illegal_access_symbol());
+}
+
+
void Top::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index 53d67e555f..25242f7ccf 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -239,6 +239,7 @@ class Top {
static Failure* ReThrow(Object* exception, MessageLocation* location = NULL);
static void ScheduleThrow(Object* exception);
static void ReportPendingMessages();
+ static Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
static Object* PromoteScheduledException();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 4e906df929..2cfce3d198 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -71,6 +71,7 @@
#include "objects-inl.h"
#include "spaces-inl.h"
#include "heap-inl.h"
+#include "log-inl.h"
#include "messages.h"
namespace v8 {
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 6c9f82f080..d9a78a5e7d 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -140,12 +140,12 @@ Variable::Variable(Scope* scope,
Handle<String> name,
Mode mode,
bool is_valid_LHS,
- bool is_this)
+ Kind kind)
: scope_(scope),
name_(name),
mode_(mode),
is_valid_LHS_(is_valid_LHS),
- is_this_(is_this),
+ kind_(kind),
local_if_not_shadowed_(NULL),
is_accessed_from_inner_scope_(false),
rewrite_(NULL) {
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 50620718cf..c0d14356fb 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -137,6 +137,12 @@ class Variable: public ZoneObject {
// in a context
};
+ enum Kind {
+ NORMAL,
+ THIS,
+ ARGUMENTS
+ };
+
// Printing support
static const char* Mode2String(Mode mode);
@@ -172,7 +178,8 @@ class Variable: public ZoneObject {
}
bool is_global() const;
- bool is_this() const { return is_this_; }
+ bool is_this() const { return kind_ == THIS; }
+ bool is_arguments() const { return kind_ == ARGUMENTS; }
Variable* local_if_not_shadowed() const {
ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
@@ -190,13 +197,13 @@ class Variable: public ZoneObject {
private:
Variable(Scope* scope, Handle<String> name, Mode mode, bool is_valid_LHS,
- bool is_this);
+ Kind kind);
Scope* scope_;
Handle<String> name_;
Mode mode_;
bool is_valid_LHS_;
- bool is_this_;
+ Kind kind_;
Variable* local_if_not_shadowed_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index c10906022a..306249ae6c 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 2
-#define BUILD_NUMBER 8
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 10
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/virtual-frame.cc b/deps/v8/src/virtual-frame.cc
index 39dbf17350..44e5fae447 100644
--- a/deps/v8/src/virtual-frame.cc
+++ b/deps/v8/src/virtual-frame.cc
@@ -73,7 +73,6 @@ FrameElement VirtualFrame::CopyElementAt(int index) {
case FrameElement::MEMORY: // Fall through.
case FrameElement::REGISTER:
// All copies are backed by memory or register locations.
- result.set_static_type(target.static_type());
result.set_type(FrameElement::COPY);
result.clear_copied();
result.clear_sync();
@@ -153,7 +152,6 @@ void VirtualFrame::SpillElementAt(int index) {
if (elements_[index].is_register()) {
Unuse(elements_[index].reg());
}
- new_element.set_static_type(elements_[index].static_type());
elements_[index] = new_element;
}
@@ -211,9 +209,6 @@ void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
ASSERT(source.is_valid());
elements_[i].clear_sync();
}
- // No code needs to be generated to change the static type of an
- // element.
- elements_[i].set_static_type(target.static_type());
}
}
@@ -246,11 +241,8 @@ void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
void VirtualFrame::PrepareForReturn() {
// Spill all locals. This is necessary to make sure all locals have
// the right value when breaking at the return site in the debugger.
- // Set their static type to unknown so that they will match the known
- // return frame.
for (int i = 0; i < expression_base_index(); i++) {
SpillElementAt(i);
- elements_[i].set_static_type(StaticType::unknown());
}
}
@@ -283,7 +275,6 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
// register element, or the new element at frame_index, must be made
// a copy.
int i = register_location(value->reg());
- ASSERT(value->static_type() == elements_[i].static_type());
if (i < frame_index) {
// The register FrameElement is lower in the frame than the new copy.
@@ -310,8 +301,7 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
Use(value->reg(), frame_index);
elements_[frame_index] =
FrameElement::RegisterElement(value->reg(),
- FrameElement::NOT_SYNCED,
- value->static_type());
+ FrameElement::NOT_SYNCED);
}
} else {
ASSERT(value->is_constant());
@@ -328,18 +318,16 @@ void VirtualFrame::PushFrameSlotAt(int index) {
}
-void VirtualFrame::Push(Register reg, StaticType static_type) {
+void VirtualFrame::Push(Register reg) {
if (is_used(reg)) {
int index = register_location(reg);
FrameElement element = CopyElementAt(index);
- ASSERT(static_type.merge(element.static_type()) == element.static_type());
elements_.Add(element);
} else {
Use(reg, element_count());
FrameElement element =
FrameElement::RegisterElement(reg,
- FrameElement::NOT_SYNCED,
- static_type);
+ FrameElement::NOT_SYNCED);
elements_.Add(element);
}
}
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index ec27983cc1..196f2eedbd 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -29,6 +29,7 @@
#define V8_X64_ASSEMBLER_X64_INL_H_
#include "cpu.h"
+#include "memory.h"
namespace v8 {
namespace internal {
@@ -70,18 +71,28 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+ emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
+}
+
+
+void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
void Assembler::emit_rex_64(Register reg, const Operand& op) {
+ emit(0x48 | reg.high_bit() << 2 | op.rex_);
+}
+
+
+void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
}
void Assembler::emit_rex_64(Register rm_reg) {
ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
- emit(0x48 | (rm_reg.code() >> 3));
+ emit(0x48 | rm_reg.high_bit());
}
@@ -91,17 +102,17 @@ void Assembler::emit_rex_64(const Operand& op) {
void Assembler::emit_rex_32(Register reg, Register rm_reg) {
- emit(0x40 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+ emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
}
void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | (reg.code() & 0x8) >> 1 | op.rex_);
+ emit(0x40 | reg.high_bit() << 2 | op.rex_);
}
void Assembler::emit_rex_32(Register rm_reg) {
- emit(0x40 | (rm_reg.code() & 0x8) >> 3);
+ emit(0x40 | rm_reg.high_bit());
}
@@ -111,19 +122,37 @@ void Assembler::emit_rex_32(const Operand& op) {
void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3;
+ byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
+ byte rex_bits = reg.high_bit() << 2 | op.rex_;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
+void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
void Assembler::emit_optional_rex_32(Register rm_reg) {
- if (rm_reg.code() & 0x8 != 0) emit(0x41);
+ if (rm_reg.high_bit()) emit(0x41);
}
@@ -147,11 +176,8 @@ void Assembler::set_target_address_at(Address pc, Address target) {
// Implementation of RelocInfo
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(int delta) {
- if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
- *p -= delta; // relocate entry
- } else if (IsInternalReference(rmode_)) {
+void RelocInfo::apply(intptr_t delta) {
+ if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
*p += delta; // relocate entry
@@ -244,11 +270,11 @@ Object** RelocInfo::call_object_address() {
// -----------------------------------------------------------------------------
// Implementation of Operand
-void Operand::set_modrm(int mod, Register rm) {
- ASSERT((mod & -4) == 0);
- buf_[0] = (mod << 6) | (rm.code() & 0x7);
+void Operand::set_modrm(int mod, Register rm_reg) {
+ ASSERT(is_uint2(mod));
+ buf_[0] = mod << 6 | rm_reg.low_bits();
// Set REX.B to the high bit of rm.code().
- rex_ |= (rm.code() >> 3);
+ rex_ |= rm_reg.high_bit();
}
@@ -258,8 +284,8 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
- rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;
+ buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
+ rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index cc64471356..7da6a8f666 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -77,7 +77,7 @@ Operand::Operand(Register base, int32_t disp): rex_(0) {
len_ = 1;
if (base.is(rsp) || base.is(r12)) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(kTimes1, rsp, base);
+ set_sib(times_1, rsp, base);
}
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
@@ -113,20 +113,20 @@ Operand::Operand(Register base,
}
-// Safe default is no features.
-// TODO(X64): Safe defaults include SSE2 for X64.
-uint64_t CpuFeatures::supported_ = 0;
+// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
+// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
+uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
uint64_t CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
- ASSERT(supported_ == 0);
+ ASSERT(supported_ == kDefaultCpuFeatures);
if (Serializer::enabled()) return; // No features if we might serialize.
Assembler assm(NULL, 0);
Label cpuid, done;
#define __ assm.
- // Save old esp, since we are going to modify the stack.
+ // Save old rsp, since we are going to modify the stack.
__ push(rbp);
__ pushfq();
__ push(rcx);
@@ -154,11 +154,11 @@ void CpuFeatures::Probe() {
// safe here.
__ bind(&cpuid);
__ movq(rax, Immediate(1));
- supported_ = (1 << CPUID);
+ supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID);
__ cpuid();
}
- supported_ = 0;
+ supported_ = kDefaultCpuFeatures;
// Move the result from ecx:edx to rax and make sure to mark the
// CPUID feature as supported.
@@ -187,6 +187,10 @@ void CpuFeatures::Probe() {
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
+ // SSE2 and CMOV must be available on an X64 CPU.
+ ASSERT(IsSupported(CPUID));
+ ASSERT(IsSupported(SSE2));
+ ASSERT(IsSupported(CMOV));
}
// -----------------------------------------------------------------------------
@@ -341,8 +345,9 @@ void Assembler::GrowBuffer() {
#endif
// copy the data
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ intptr_t pc_delta = desc.buffer - buffer_;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size);
@@ -365,11 +370,8 @@ void Assembler::GrowBuffer() {
// relocate runtime entries
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- *p -= pc_delta; // relocate entry
- } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
if (*p != 0) { // 0 means uninitialized.
*p += pc_delta;
}
@@ -380,13 +382,14 @@ void Assembler::GrowBuffer() {
}
-void Assembler::emit_operand(int rm, const Operand& adr) {
- ASSERT_EQ(rm & 0x07, rm);
+void Assembler::emit_operand(int code, const Operand& adr) {
+ ASSERT(is_uint3(code));
const unsigned length = adr.len_;
ASSERT(length > 0);
// Emit updated ModR/M byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (rm << 3);
+ ASSERT((adr.buf_[0] & 0x38) == 0);
+ pc_[0] = adr.buf_[0] | code << 3;
// Emit the rest of the encoded operand.
for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
@@ -413,6 +416,16 @@ void Assembler::arithmetic_op(byte opcode, Register dst, Register src) {
emit_modrm(dst, src);
}
+
+void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(opcode);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src) {
@@ -452,6 +465,27 @@ void Assembler::immediate_arithmetic_op(byte subcode,
void Assembler::immediate_arithmetic_op_32(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0x83);
+ if (is_int8(src.value_)) {
+ emit_modrm(subcode, dst);
+ emit(src.value_);
+ } else if (dst.is(rax)) {
+ emit(0x05 | (subcode << 3));
+ emitl(src.value_);
+ } else {
+ emit(0x81);
+ emit_modrm(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::immediate_arithmetic_op_32(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
@@ -470,18 +504,34 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
void Assembler::immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src) {
+ const Operand& dst,
+ Immediate src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
- ASSERT(is_int8(src.value_));
+ ASSERT(is_int8(src.value_) || is_uint8(src.value_));
emit(0x80);
emit_operand(subcode, dst);
emit(src.value_);
}
+void Assembler::immediate_arithmetic_op_8(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (dst.code() > 3) {
+ // Use 64-bit mode byte registers.
+ emit_rex_64(dst);
+ }
+ ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+ emit(0x80);
+ emit_modrm(subcode, dst);
+ emit(src.value_);
+}
+
+
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -508,6 +558,15 @@ void Assembler::shift(Register dst, int subcode) {
}
+void Assembler::shift_32(Register dst, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xD3);
+ emit_modrm(subcode, dst);
+}
+
+
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -553,7 +612,7 @@ void Assembler::call(Register adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: FF /2 r64
- if (adr.code() > 7) {
+ if (adr.high_bit()) {
emit_rex_64(adr);
}
emit(0xFF);
@@ -571,6 +630,57 @@ void Assembler::call(const Operand& op) {
}
+void Assembler::cmovq(Condition cc, Register dst, Register src) {
+ // No need to check CpuInfo for CMOV support, it's a required part of the
+ // 64-bit architecture.
+ ASSERT(cc >= 0); // Use mov for unconditional moves.
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: REX.W 0f 40 + cc /r
+ emit_rex_64(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+ ASSERT(cc >= 0);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: REX.W 0f 40 + cc /r
+ emit_rex_64(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cmovl(Condition cc, Register dst, Register src) {
+ ASSERT(cc >= 0);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: 0f 40 + cc /r
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+ ASSERT(cc >= 0);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: 0f 40 + cc /r
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_operand(dst, src);
+}
+
+
+
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
EnsureSpace ensure_space(this);
@@ -640,6 +750,16 @@ void Assembler::idiv(Register src) {
}
+void Assembler::imul(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -666,6 +786,16 @@ void Assembler::imul(Register dst, Register src, Immediate imm) {
}
+void Assembler::imull(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::incq(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -703,7 +833,7 @@ void Assembler::int3() {
void Assembler::j(Condition cc, Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
+ ASSERT(is_uint4(cc));
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -771,7 +901,7 @@ void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode FF/4 r64
- if (target.code() > 7) {
+ if (target.high_bit()) {
emit_rex_64(target);
}
emit(0xFF);
@@ -922,7 +1052,7 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
- emit(0xB8 | (dst.code() & 0x7));
+ emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value), rmode);
}
@@ -931,7 +1061,7 @@ void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
- emit(0xB8 | (dst.code() & 0x7)); // Not a ModR/M byte.
+ emit(0xB8 | dst.low_bits());
emitq(value, rmode);
}
@@ -940,7 +1070,7 @@ void Assembler::movq(Register dst, ExternalReference ref) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
- emit(0xB8 | (dst.code() & 0x7));
+ emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(ref.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
@@ -961,7 +1091,7 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
last_pc_ = pc_;
ASSERT(!Heap::InNewSpace(*value));
emit_rex_64(dst);
- emit(0xB8 | dst.code() & 0x7);
+ emit(0xB8 | dst.low_bits());
if (value->IsHeapObject()) {
emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
} else {
@@ -971,6 +1101,34 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
}
+void Assembler::movsxlq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x63);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::movsxlq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x63);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzxbq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xB6);
+ emit_operand(dst, src);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1113,10 +1271,10 @@ void Assembler::nop(int n) {
void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (dst.code() > 7) {
+ if (dst.high_bit()) {
emit_rex_64(dst);
}
- emit(0x58 | (dst.code() & 0x7));
+ emit(0x58 | dst.low_bits());
}
@@ -1139,10 +1297,10 @@ void Assembler::popfq() {
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (src.code() > 7) {
+ if (src.high_bit()) {
emit_rex_64(src);
}
- emit(0x50 | (src.code() & 0x7));
+ emit(0x50 | src.low_bits());
}
@@ -1216,7 +1374,7 @@ void Assembler::ret(int imm16) {
void Assembler::setcc(Condition cc, Register reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
+ ASSERT(is_uint4(cc));
if (reg.code() > 3) { // Use x64 byte registers, where different.
emit_rex_32(reg);
}
@@ -1252,7 +1410,7 @@ void Assembler::xchg(Register dst, Register src) {
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
emit_rex_64(other);
- emit(0x90 | (other.code() & 0x7));
+ emit(0x90 | other.low_bits());
} else {
emit_rex_64(src, dst);
emit(0x87);
@@ -1364,6 +1522,493 @@ void Assembler::testq(Register dst, Immediate mask) {
}
+// FPU instructions
+
+
+void Assembler::fld(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fld1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE8);
+}
+
+
+void Assembler::fldz() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xEE);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit_operand(0, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDD);
+ emit_operand(0, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit_operand(3, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDD);
+ emit_operand(3, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(0, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDF);
+ emit_operand(5, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(3, adr);
+}
+
+
+void Assembler::fisttp_s(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(1, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(2, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDF);
+ emit_operand(8, adr);
+}
+
+
+void Assembler::fabs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE1);
+}
+
+
+void Assembler::fchs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE0);
+}
+
+
+void Assembler::fcos() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xFF);
+}
+
+
+void Assembler::fsin() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xFE);
+}
+
+
+void Assembler::fadd(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDA);
+ emit_operand(4, adr);
+}
+
+
+void Assembler::fmul(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::faddp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xF8);
+}
+
+
+void Assembler::fprem1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDA);
+ emit(0xE9);
+}
+
+
+void Assembler::fcompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDE);
+ emit(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDF);
+ emit(0xE0);
+}
+
+
+void Assembler::fwait() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9B);
+}
+
+
+void Assembler::frndint() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xFC);
+}
+
+
+void Assembler::fnclex() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit(0xE2);
+}
+
+
+void Assembler::sahf() {
+ // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
+ // in 64-bit mode. Test CpuID.
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9E);
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+ ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
+ ASSERT(is_uint3(i)); // illegal stack offset
+ emit(b1);
+ emit(b2 + i);
+}
+
+// SSE 2 operations
+
+void Assembler::movsd(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11); // store
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11); // store
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movsd(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+ Register ireg = { reg.code() };
+ emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+ emit(0xC0 | (dst.code() << 3) | src.code());
+}
+
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ emit(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
// Relocation information implementations
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@@ -1427,9 +2072,7 @@ void Assembler::WriteRecordedPositions() {
}
-const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
} } // namespace v8::internal
@@ -1463,18 +2106,6 @@ const int RelocInfo::kApplyMask =
namespace v8 {
namespace internal {
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* a) {
- UNIMPLEMENTED();
-}
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* a) {
- UNIMPLEMENTED();
-}
-
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* a) {
- UNIMPLEMENTED();
-}
-
void BreakLocationIterator::ClearDebugBreakAtReturn() {
UNIMPLEMENTED();
@@ -1489,109 +2120,4 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
UNIMPLEMENTED();
}
-void CallIC::Generate(MacroAssembler* a, int b, ExternalReference const& c) {
- UNIMPLEMENTED();
-}
-
-void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
- UNIMPLEMENTED();
-}
-
-void CallIC::GenerateNormal(MacroAssembler* a, int b) {
- UNIMPLEMENTED();
-}
-
-Object* CallStubCompiler::CompileCallConstant(Object* a,
- JSObject* b,
- JSFunction* c,
- StubCompiler::CheckType d,
- Code::Flags flags) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* CallStubCompiler::CompileCallField(Object* a,
- JSObject* b,
- int c,
- String* d,
- Code::Flags flags) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* CallStubCompiler::CompileCallInterceptor(Object* a,
- JSObject* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-void JumpTarget::DoBind() {
- UNIMPLEMENTED();
-}
-
-void JumpTarget::DoBranch(Condition a, Hint b) {
- UNIMPLEMENTED();
-}
-
-void JumpTarget::DoJump() {
- UNIMPLEMENTED();
-}
-
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
- JSObject* b,
- AccessorInfo* c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
- JSObject* b,
- Object* c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* LoadStubCompiler::CompileLoadField(JSObject* a,
- JSObject* b,
- int c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
- JSObject* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
- AccessorInfo* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* StoreStubCompiler::CompileStoreField(JSObject* a,
- int b,
- Map* c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-Object* StubCompiler::CompileLazyCompile(Code::Flags a) {
- UNIMPLEMENTED();
- return NULL;
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 650c218f11..0d98e5fe4a 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -45,7 +45,7 @@ namespace internal {
// Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) {
const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
- return x == x & kUInt32Mask;
+ return x == (x & kUInt32Mask);
}
static inline bool is_int32(int64_t x) {
@@ -82,8 +82,6 @@ struct Register {
}
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(Register reg) const { return code_ == reg.code_; }
- // The byte-register distinction of ai32 has dissapeared.
- bool is_byte_register() const { return false; }
int code() const {
ASSERT(is_valid());
return code_;
@@ -92,6 +90,17 @@ struct Register {
return 1 << code_;
}
+ // Return the high bit of the register code as a 0 or 1. Used often
+ // when constructing the REX prefix byte.
+ int high_bit() const {
+ return code_ >> 3;
+ }
+ // Return the 3 low bits of the register code. Used when encoding registers
+ // in modR/M, SIB, and opcode bytes.
+ int low_bits() const {
+ return code_ & 0x7;
+ }
+
// (unfortunately we can't make this private in a struct when initializing
// by assignment.)
int code_;
@@ -115,7 +124,8 @@ extern Register r14;
extern Register r15;
extern Register no_reg;
-struct XMMRegister {
+
+struct MMXRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; }
int code() const {
ASSERT(is_valid());
@@ -125,6 +135,34 @@ struct XMMRegister {
int code_;
};
+extern MMXRegister mm0;
+extern MMXRegister mm1;
+extern MMXRegister mm2;
+extern MMXRegister mm3;
+extern MMXRegister mm4;
+extern MMXRegister mm5;
+extern MMXRegister mm6;
+extern MMXRegister mm7;
+extern MMXRegister mm8;
+extern MMXRegister mm9;
+extern MMXRegister mm10;
+extern MMXRegister mm11;
+extern MMXRegister mm12;
+extern MMXRegister mm13;
+extern MMXRegister mm14;
+extern MMXRegister mm15;
+
+
+struct XMMRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
extern XMMRegister xmm0;
extern XMMRegister xmm1;
extern XMMRegister xmm2;
@@ -238,12 +276,12 @@ class Immediate BASE_EMBEDDED {
// Machine instruction Operands
enum ScaleFactor {
- kTimes1 = 0,
- kTimes2 = 1,
- kTimes4 = 2,
- kTimes8 = 3,
- kTimesIntSize = kTimes4,
- kTimesPointerSize = kTimes8
+ times_1 = 0,
+ times_2 = 1,
+ times_4 = 2,
+ times_8 = 3,
+ times_int_size = times_4,
+ times_pointer_size = times_8
};
@@ -290,11 +328,11 @@ class Operand BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
-// if (CpuFeatures::IsSupported(SSE2)) {
-// CpuFeatures::Scope fscope(SSE2);
-// // Generate SSE2 floating point code.
+// if (CpuFeatures::IsSupported(SSE3)) {
+// CpuFeatures::Scope fscope(SSE3);
+// // Generate SSE3 floating point code.
// } else {
-// // Generate standard x87 floating point code.
+// // Generate standard x87 or SSE2 floating point code.
// }
class CpuFeatures : public AllStatic {
public:
@@ -331,6 +369,10 @@ class CpuFeatures : public AllStatic {
#endif
};
private:
+ // Safe defaults include SSE2 and CMOV for X64. It is always available, if
+ // anyone checks, but they shouldn't need to check.
+ static const uint64_t kDefaultCpuFeatures =
+ (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
static uint64_t supported_;
static uint64_t enabled_;
};
@@ -338,11 +380,15 @@ class CpuFeatures : public AllStatic {
class Assembler : public Malloced {
private:
- // The relocation writer's position is kGap bytes below the end of
+ // We check before assembling an instruction that there is sufficient
+ // space to write an instruction and its relocation information.
+ // The relocation writer's position must be kGap bytes above the end of
// the generated instructions. This leaves enough space for the
- // longest possible x64 instruction (There is a 15 byte limit on
- // instruction length, ruling out some otherwise valid instructions) and
- // allows for a single, fast space check per instruction.
+ // longest possible x64 instruction, 15 bytes, and the longest possible
+ // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+ // (There is a 15 byte limit on x64 instruction length that rules out some
+ // otherwise valid instructions.)
+ // This allows for a single, fast space check per instruction.
static const int kGap = 32;
public:
@@ -373,8 +419,8 @@ class Assembler : public Malloced {
static inline void set_target_address_at(Address pc, Address target);
// Distance between the address of the code target in the call instruction
- // and the return address
- static const int kTargetAddrToReturnAddrDist = kPointerSize;
+ // and the return address. Checked in the debug build.
+ static const int kTargetAddrToReturnAddrDist = 3 + kPointerSize;
// ---------------------------------------------------------------------------
@@ -446,12 +492,19 @@ class Assembler : public Malloced {
void movq(Register dst, ExternalReference ext);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+ void movsxlq(Register dst, Register src);
+ void movsxlq(Register dst, const Operand& src);
+ void movzxbq(Register dst, const Operand& src);
+
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
- // Conditional moves
- // Implement conditional moves here.
+ // Conditional moves.
+ void cmovq(Condition cc, Register dst, Register src);
+ void cmovq(Condition cc, Register dst, const Operand& src);
+ void cmovl(Condition cc, Register dst, Register src);
+ void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
void xchg(Register dst, Register src);
@@ -461,6 +514,14 @@ class Assembler : public Malloced {
arithmetic_op(0x03, dst, src);
}
+ void addl(Register dst, Register src) {
+ arithmetic_op_32(0x03, dst, src);
+ }
+
+ void addl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
@@ -482,6 +543,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x0, dst, src);
}
+ void cmpb(Register dst, Immediate src) {
+ immediate_arithmetic_op_8(0x7, dst, src);
+ }
+
void cmpb(const Operand& dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -502,6 +567,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x7, dst, src);
}
+ void cmpl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+
void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
@@ -540,6 +609,8 @@ class Assembler : public Malloced {
void imul(Register dst, const Operand& src);
// Performs the operation dst = src * imm.
void imul(Register dst, Register src, Immediate imm);
+ // Multiply 32 bit registers
+ void imull(Register dst, Register src);
void incq(Register dst);
void incq(const Operand& dst);
@@ -604,6 +675,10 @@ class Assembler : public Malloced {
shift(dst, 0x4);
}
+ void shll(Register dst) {
+ shift_32(dst, 0x4);
+ }
+
void shr(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x5);
}
@@ -612,6 +687,10 @@ class Assembler : public Malloced {
shift(dst, 0x5);
}
+ void shrl(Register dst) {
+ shift_32(dst, 0x5);
+ }
+
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
@@ -635,6 +714,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x5, dst, src);
}
+ void subl(Register dst, Register src) {
+ arithmetic_op_32(0x2B, dst, src);
+ }
+
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
@@ -770,19 +853,37 @@ class Assembler : public Malloced {
void fwait();
void fnclex();
+ void fsin();
+ void fcos();
+
void frndint();
+ void sahf();
+
// SSE2 instructions
+ void movsd(const Operand& dst, XMMRegister src);
+ void movsd(Register src, XMMRegister dst);
+ void movsd(XMMRegister dst, Register src);
+ void movsd(XMMRegister src, const Operand& dst);
+
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
- void cvtsi2sd(XMMRegister dst, const Operand& src);
+ void cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void cvtlsi2sd(XMMRegister dst, Register src);
+ void cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void cvtqsi2sd(XMMRegister dst, Register src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, Register src);
+
// Use either movsd or movlpd.
// void movdbl(XMMRegister dst, const Operand& src);
// void movdbl(const Operand& dst, XMMRegister src);
@@ -812,11 +913,6 @@ class Assembler : public Malloced {
// Used for inline tables, e.g., jump-tables.
// void dd(uint64_t data, RelocInfo::Mode reloc_info);
- // Writes the absolute address of a bound label at the given position in
- // the generated code. That positions should have the relocation mode
- // internal_reference!
- void WriteInternalReference(int position, const Label& bound_label);
-
int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
@@ -866,6 +962,7 @@ class Assembler : public Malloced {
// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
// REX.W is set.
inline void emit_rex_64(Register reg, Register rm_reg);
+ inline void emit_rex_64(XMMRegister reg, Register rm_reg);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the destination, index, and base register codes.
@@ -873,6 +970,7 @@ class Assembler : public Malloced {
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is set.
inline void emit_rex_64(Register reg, const Operand& op);
+ inline void emit_rex_64(XMMRegister reg, const Operand& op);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the register code.
@@ -917,6 +1015,18 @@ class Assembler : public Malloced {
// is emitted.
inline void emit_optional_rex_32(Register reg, const Operand& op);
+ // As for emit_optional_rex_32(Register, Register), except that
+ // the registers are XMM registers.
+ inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
+
+ // As for emit_optional_rex_32(Register, Register), except that
+ // the registers are XMM registers.
+ inline void emit_optional_rex_32(XMMRegister reg, Register base);
+
+ // As for emit_optional_rex_32(Register, const Operand&), except that
+ // the register is an XMM register.
+ inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
+
// Optionally do as emit_rex_32(Register) if the register number has
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
@@ -931,7 +1041,7 @@ class Assembler : public Malloced {
// the second operand of the operation, a register or operation
// subcode, into the reg field of the ModR/M byte.
void emit_operand(Register reg, const Operand& adr) {
- emit_operand(reg.code() & 0x07, adr);
+ emit_operand(reg.low_bits(), adr);
}
// Emit the ModR/M byte, and optionally the SIB byte and
@@ -941,14 +1051,14 @@ class Assembler : public Malloced {
// Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
void emit_modrm(Register reg, Register rm_reg) {
- emit(0xC0 | (reg.code() & 0x7) << 3 | (rm_reg.code() & 0x7));
+ emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
}
// Emit a ModR/M byte with an operation subcode in the reg field and
// a register in the rm_reg field.
void emit_modrm(int code, Register rm_reg) {
- ASSERT((code & ~0x7) == 0);
- emit(0xC0 | (code & 0x7) << 3 | (rm_reg.code() & 0x7));
+ ASSERT(is_uint3(code));
+ emit(0xC0 | code << 3 | rm_reg.low_bits());
}
// Emit the code-object-relative offset of the label's position
@@ -959,23 +1069,31 @@ class Assembler : public Malloced {
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
void arithmetic_op(byte opcode, Register dst, Register src);
+ void arithmetic_op_32(byte opcode, Register dst, Register src);
void arithmetic_op(byte opcode, Register reg, const Operand& op);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
- // Operate on a 32-bit word in memory.
+ // Operate on a 32-bit word in memory or register.
void immediate_arithmetic_op_32(byte subcode,
const Operand& dst,
Immediate src);
- // Operate on a byte in memory.
- void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
+ void immediate_arithmetic_op_32(byte subcode,
+ Register dst,
Immediate src);
+ // Operate on a byte in memory or register.
+ void immediate_arithmetic_op_8(byte subcode,
+ const Operand& dst,
+ Immediate src);
+ void immediate_arithmetic_op_8(byte subcode,
+ Register dst,
+ Immediate src);
// Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode);
// Shift dst by cl % 64 bits.
void shift(Register dst, int subcode);
+ void shift_32(Register dst, int subcode);
- // void emit_farith(int b1, int b2, int i);
+ void emit_farith(int b1, int b2, int i);
// labels
// void print(Label* L);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index eb9c43f788..459921cd40 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -34,11 +34,20 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- Builtins::CFunctionId id) {
- masm->int3(); // UNIMPLEMENTED.
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+ // TODO(1238487): Don't pass the function in a static variable.
+ ExternalReference passed = ExternalReference::builtin_passed_function();
+ __ movq(kScratchRegister, passed.address(), RelocInfo::EXTERNAL_REFERENCE);
+ __ movq(Operand(kScratchRegister, 0), rdi);
+
+ // The actual argument count has already been loaded into register
+ // rax, but JumpToBuiltin expects rax to contain the number of
+ // arguments including the receiver.
+ __ incq(rax);
+ __ JumpToBuiltin(ExternalReference(id));
}
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(rbp);
__ movq(rbp, rsp);
@@ -50,10 +59,10 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(rdi);
// Preserve the number of arguments on the stack. Must preserve both
- // eax and ebx because these registers are used when copying the
+ // rax and rbx because these registers are used when copying the
// arguments and the receiver.
ASSERT(kSmiTagSize == 1);
- __ lea(rcx, Operand(rax, rax, kTimes1, kSmiTag));
+ __ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
__ push(rcx);
}
@@ -71,7 +80,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
__ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, kTimes4, 1 * kPointerSize)); // 1 ~ receiver
+ __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
__ push(rcx);
}
@@ -98,7 +107,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, kTimesPointerSize, offset));
+ __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
__ movq(rcx, Immediate(-1)); // account for receiver
Label copy;
@@ -117,7 +126,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, kTimesPointerSize, offset));
+ __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ movq(rcx, Immediate(-1)); // account for receiver
Label copy;
@@ -159,18 +168,421 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ // Stack Layout:
+ // rsp: return address
+ // +1: Argument n
+ // +2: Argument n-1
+ // ...
+ // +n: Argument 1 = receiver
+ // +n+1: Argument 0 = function to call
+ //
+ // rax contains the number of arguments, n, not counting the function.
+ //
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ testq(rax, rax);
+ __ j(not_zero, &done);
+ __ pop(rbx);
+ __ Push(Factory::undefined_value());
+ __ push(rbx);
+ __ incq(rax);
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call from the stack.
+ { Label done, non_function, function;
+ // The function to call is at position n+1 on the stack.
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ j(zero, &non_function);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(equal, &function);
+
+ // Non-function called: Clear the function to force exception.
+ __ bind(&non_function);
+ __ xor_(rdi, rdi);
+ __ jmp(&done);
+
+ // Function called: Change context eagerly to get the right global object.
+ __ bind(&function);
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ __ bind(&done);
+ }
+
+ // 3. Make sure first argument is an object; convert if necessary.
+ { Label call_to_object, use_global_receiver, patch_receiver, done;
+ __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &call_to_object);
+
+ __ Cmp(rbx, Factory::null_value());
+ __ j(equal, &use_global_receiver);
+ __ Cmp(rbx, Factory::undefined_value());
+ __ j(equal, &use_global_receiver);
+
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &call_to_object);
+ __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+ __ j(below_equal, &done);
+
+ __ bind(&call_to_object);
+ __ EnterInternalFrame(); // preserves rax, rbx, rdi
+
+ // Store the arguments count on the stack (smi tagged).
+ ASSERT(kSmiTag == 0);
+ __ shl(rax, Immediate(kSmiTagSize));
+ __ push(rax);
+
+ __ push(rdi); // save edi across the call
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ pop(rdi); // restore edi after the call
+
+ // Get the arguments count and untag it.
+ __ pop(rax);
+ __ shr(rax, Immediate(kSmiTagSize));
+
+ __ LeaveInternalFrame();
+ __ jmp(&patch_receiver);
+
+ // Use the global receiver object from the called function as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+
+ __ bind(&done);
+ }
+
+ // 4. Shift stuff one slot down the stack.
+ { Label loop;
+ __ lea(rcx, Operand(rax, +1)); // +1 ~ copy receiver too
+ __ bind(&loop);
+ __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+ __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+ __ decq(rcx);
+ __ j(not_zero, &loop);
+ }
+
+ // 5. Remove TOS (copy of last arguments), but keep return address.
+ __ pop(rbx);
+ __ pop(rcx);
+ __ push(rbx);
+ __ decq(rax);
+
+ // 6. Check that function really was a function and get the code to
+ // call from the function and check that the number of expected
+ // arguments matches what we're providing.
+ { Label invoke, trampoline;
+ __ testq(rdi, rdi);
+ __ j(not_zero, &invoke);
+ __ xor_(rbx, rbx);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ __ bind(&trampoline);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&invoke);
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movsxlq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ cmpq(rax, rbx);
+ __ j(not_equal, &trampoline);
+ }
+
+ // 7. Jump (tail-call) to the code in register edx without checking arguments.
+ ParameterCount expected(0);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
}
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ // Stack at entry:
+ // rsp: return address
+ // rsp+8: arguments
+ // rsp+16: receiver ("this")
+ // rsp+24: function
+ __ EnterInternalFrame();
+ // Stack frame:
+ // rbp: Old base pointer
+ // rbp[1]: return address
+ // rbp[2]: function arguments
+ // rbp[3]: receiver
+ // rbp[4]: function
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(Operand(rbp, kArgumentsOffset));
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ if (FLAG_check_stack) {
+ // We need to catch preemptions right here, otherwise an unlucky preemption
+ // could show up as a failed apply.
+ Label retry_preemption;
+ Label no_preemption;
+ __ bind(&retry_preemption);
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_guard_limit);
+ __ movq(rcx, rsp);
+ __ subq(rcx, Operand(kScratchRegister, 0));
+ // rcx contains the difference between the stack limit and the stack top.
+ // We use it below to check that there is enough room for the arguments.
+ __ j(above, &no_preemption);
+
+ // Preemption!
+ // Because runtime functions always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack.
+ __ push(rax);
+ __ push(Immediate(Smi::FromInt(0)));
+
+ // Do call to runtime routine.
+ __ CallRuntime(Runtime::kStackGuard, 1);
+ __ pop(rax);
+ __ jmp(&retry_preemption);
+
+ __ bind(&no_preemption);
+
+ Label okay;
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movq(rdx, rax);
+ __ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay);
+
+ // Too bad: Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ }
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(rax); // limit
+ __ push(Immediate(0)); // index
+
+ // Change context eagerly to get the right global object if
+ // necessary.
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &call_to_object);
+ __ Cmp(rbx, Factory::null_value());
+ __ j(equal, &use_global_receiver);
+ __ Cmp(rbx, Factory::undefined_value());
+ __ j(equal, &use_global_receiver);
+
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(less, &call_to_object);
+ __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+ __ j(less_equal, &push_receiver);
+
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ jmp(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(rbx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rcx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ push(rcx);
+ __ push(rax);
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Remove IC arguments from the stack and push the nth argument.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rax);
+
+ // Update the index on the stack and in register rax.
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ addq(rax, Immediate(Smi::FromInt(1)));
+ __ movq(Operand(rbp, kIndexOffset), rax);
+
+ __ bind(&entry);
+ __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Invoke the function.
+ ParameterCount actual(rax);
+ __ shr(rax, Immediate(kSmiTagSize));
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+ __ LeaveInternalFrame();
+ __ ret(3 * kPointerSize); // remove function, receiver, and arguments
}
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rax: number of arguments
+ // -- rdi: constructor function
+ // -----------------------------------
+
+ Label non_function_call;
+ // Check that function is not a smi.
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ j(zero, &non_function_call);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &non_function_call);
+
+ // Jump to the function-specific construct stub.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
+ __ jmp(rbx);
+
+ // edi: called object
+ // eax: number of arguments
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing eax).
+ __ movq(rbx, Immediate(0));
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ // Enter a construct frame.
+ __ EnterConstructFrame();
+
+ // Store a smi-tagged arguments count on the stack.
+ __ shl(rax, Immediate(kSmiTagSize));
+ __ push(rax);
+
+ // Push the function to invoke on the stack.
+ __ push(rdi);
+
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+
+ // TODO(x64): Implement inlined allocation.
+
+ // Allocate the new receiver object using the runtime call.
+ // rdi: function (constructor)
+ __ bind(&rt_call);
+ // Must restore edi (constructor) before calling runtime.
+ __ movq(rdi, Operand(rsp, 0));
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ movq(rbx, rax); // store result in rbx
+
+ // New object allocated.
+ // rbx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(rdi);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movq(rax, Operand(rsp, 0));
+ __ shr(rax, Immediate(kSmiTagSize));
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(rbx);
+ __ push(rbx);
+
+ // Setup pointer to last argument.
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(greater_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0));
+
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
+ __ LeaveConstructFrame();
+
+ // Remove caller arguments from the stack and return.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(rcx);
+ __ ret(0);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Expects five C++ function parameters.
@@ -254,7 +666,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ xor_(rcx, rcx); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, kTimesPointerSize, 0));
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ push(Operand(kScratchRegister, 0)); // dereference handle
__ addq(rcx, Immediate(1));
__ bind(&entry);
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/x64/codegen-x64-inl.h
index 733378de2e..6869fc9352 100644
--- a/deps/v8/src/x64/codegen-x64-inl.h
+++ b/deps/v8/src/x64/codegen-x64-inl.h
@@ -32,10 +32,12 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
// Platform-specific inline functions.
-void DeferredCode::Jump() { UNIMPLEMENTED(); }
-void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
@@ -47,6 +49,7 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
+#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index dc32227b44..fc196ce790 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -25,23 +25,54 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(X64): Remove stdio.h when compiler test is removed.
+#include <stdio.h>
#include "v8.h"
-#include "macro-assembler.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "ic-inl.h"
+#include "parser.h"
#include "register-allocator-inl.h"
-#include "codegen.h"
-// TEST
+#include "scopes.h"
+
+// TODO(X64): Remove compiler.h when compiler test is removed.
#include "compiler.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
-void DeferredCode::SaveRegisters() { UNIMPLEMENTED(); }
+void DeferredCode::SaveRegisters() {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ push(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+ __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
+ }
+ }
+}
+
+void DeferredCode::RestoreRegisters() {
+ // Restore registers in reverse order due to the stack.
+ for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ pop(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore) {
+ action &= ~kSyncedFlag;
+ __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
+ }
+ }
+}
-void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -91,17 +122,63 @@ CodeGenerator::CodeGenerator(int buffer_size,
in_spilled_code_(false) {
}
-#define __ ACCESS_MASM(masm_)
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
- UNIMPLEMENTED();
+ __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(rsi); // The context is the second argument.
+ frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
}
+
void CodeGenerator::TestCodeGenerator() {
// Compile a function from a string, and run it.
+
+ // Set flags appropriately for this stage of implementation.
+ // TODO(X64): Make ic work, and stop disabling them.
+ // These settings stick - remove them when we don't want them anymore.
+#ifdef DEBUG
+ FLAG_print_builtin_source = true;
+ FLAG_print_builtin_ast = true;
+#endif
+ FLAG_use_ic = false;
+
+ // Read the file "test.js" from the current directory, compile, and run it.
+ // If the file is not there, use a simple script embedded here instead.
+ Handle<String> test_script;
+ FILE* file = fopen("test.js", "rb");
+ if (file == NULL) {
+ test_script = Factory::NewStringFromAscii(CStrVector(
+ "// Put all code in anonymous function to avoid global scope.\n"
+ "(function(){"
+ " var x = true ? 47 : 32;"
+ " return x;"
+ "})()"));
+ } else {
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = fread(&chars[i], 1, size - i, file);
+ i += read;
+ }
+ fclose(file);
+ test_script = Factory::NewStringFromAscii(CStrVector(chars));
+ delete[] chars;
+ }
+
Handle<JSFunction> test_function = Compiler::Compile(
- Factory::NewStringFromAscii(CStrVector("42")),
+ test_script,
Factory::NewStringFromAscii(CStrVector("CodeGeneratorTestScript")),
0,
0,
@@ -128,15 +205,18 @@ void CodeGenerator::TestCodeGenerator() {
0,
NULL,
&pending_exceptions);
- CHECK(result->IsSmi());
- CHECK_EQ(42, Smi::cast(*result)->value());
+ // Function compiles and runs, but returns a JSFunction object.
+#ifdef DEBUG
+ PrintF("Result of test function: ");
+ result->Print();
+#endif
}
void CodeGenerator::GenCode(FunctionLiteral* function) {
// Record the position for debugging purposes.
CodeForFunctionPosition(function);
- // ZoneList<Statement*>* body = fun->body();
+ ZoneList<Statement*>* body = function->body();
// Initialize state.
ASSERT(scope_ == NULL);
@@ -169,19 +249,173 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
// Entry:
// Stack: receiver, arguments, return address.
- // ebp: caller's frame pointer
- // esp: stack pointer
- // edi: called JS function
- // esi: callee's context
+ // rbp: caller's frame pointer
+ // rsp: stack pointer
+ // rdi: called JS function
+ // rsi: callee's context
allocator_->Initialize();
frame_->Enter();
- Result return_register = allocator_->Allocate(rax);
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
+ // Allocate the local context if needed.
+ if (scope_->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+
+ // Update context local.
+ frame_->SaveContextRegister();
+
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
+ }
+
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ movq(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ }
+ }
+ }
+
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
+
+ // Generate code to 'execute' declarations and initialize functions
+ // (source elements). In case of an illegal redeclaration we need to
+ // handle that instead of processing the declarations.
+ if (scope_->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ illegal redeclarations");
+ scope_->VisitIllegalRedeclaration(this);
+ } else {
+ Comment cmnt(masm_, "[ declarations");
+ ProcessDeclarations(scope_->declarations());
+ // Bail out if a stack-overflow exception occurred when processing
+ // declarations.
+ if (HasStackOverflow()) return;
+ }
+
+ if (FLAG_trace) {
+ frame_->CallRuntime(Runtime::kTraceEnter, 0);
+ // Ignore the return value.
+ }
+ CheckStack();
+
+ // Compile the body of the function in a vanilla state. Don't
+ // bother compiling all the code if the scope has an illegal
+ // redeclaration.
+ if (!scope_->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+ bool is_builtin = Bootstrapper::IsActive();
+ bool should_trace =
+ is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+ if (should_trace) {
+ frame_->CallRuntime(Runtime::kDebugTrace, 0);
+ // Ignore the return value.
+ }
+#endif
+ VisitStatements(body);
+
+ // Handle the return from the function.
+ if (has_valid_frame()) {
+ // If there is a valid frame, control flow can fall off the end of
+ // the body. In that case there is an implicit return statement.
+ ASSERT(!function_return_is_shadowed_);
+ CodeForReturnPosition(function);
+ frame_->PrepareForReturn();
+ Result undefined(Factory::undefined_value());
+ if (function_return_.is_bound()) {
+ function_return_.Jump(&undefined);
+ } else {
+ function_return_.Bind(&undefined);
+ GenerateReturnSequence(&undefined);
+ }
+ } else if (function_return_.is_linked()) {
+ // If the return target has dangling jumps to it, then we have not
+ // yet generated the return sequence. This can happen when (a)
+ // control does not flow off the end of the body so we did not
+ // compile an artificial return statement just above, and (b) there
+ // are return statements in the body but (c) they are all shadowed.
+ Result return_value;
+ function_return_.Bind(&return_value);
+ GenerateReturnSequence(&return_value);
+ }
+ }
+ }
+
+ // Adjust for function-level loop nesting.
+ loop_nesting_ -= function->loop_nesting();
- __ movq(return_register.reg(), Immediate(0x54)); // Smi 42
+ // Code generation state must be reset.
+ ASSERT(state_ == NULL);
+ ASSERT(loop_nesting() == 0);
+ ASSERT(!function_return_is_shadowed_);
+ function_return_.Unuse();
+ DeleteFrame();
- GenerateReturnSequence(&return_register);
+ // Process any deferred code using the register allocator.
+ if (!HasStackOverflow()) {
+ HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
+ JumpTarget::set_compiling_deferred_code(true);
+ ProcessDeferred();
+ JumpTarget::set_compiling_deferred_code(false);
}
+
+ // There is no need to delete the register allocator, it is a
+ // stack-allocated local.
+ allocator_ = NULL;
+ scope_ = NULL;
}
void CodeGenerator::GenerateReturnSequence(Result* return_value) {
@@ -191,7 +425,7 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// all registers).
if (FLAG_trace) {
frame_->Push(return_value);
- // *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+ *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
}
return_value->ToRegister(rax);
@@ -205,6 +439,8 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
+ // TODO(x64): introduce kX64JSReturnSequenceLength and enable assert.
+
// Check that the size of the code used for returning matches what is
// expected by the debugger.
// ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
@@ -221,195 +457,2962 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
UNIMPLEMENTED();
}
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* a) {
- UNIMPLEMENTED();
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+ return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
+ && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
+ && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
+ && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
+ && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
+ && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
+ && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
+ && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
+ && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
+ && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
+ && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0))
+ && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
+#endif
-void CodeGenerator::VisitBlock(Block* a) {
- UNIMPLEMENTED();
+
+class DeferredStackCheck: public DeferredCode {
+ public:
+ DeferredStackCheck() {
+ set_comment("[ DeferredStackCheck");
+ }
+
+ virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
-void CodeGenerator::VisitDeclaration(Declaration* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::CheckStack() {
+ if (FLAG_check_stack) {
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_guard_limit);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ deferred->Branch(below);
+ deferred->BindExit();
+ }
}
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) {
- UNIMPLEMENTED();
+
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ // TODO(X64): No architecture specific code. Move to shared location.
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Visit(statement);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
}
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ VisitStatements(statements);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
}
-void CodeGenerator::VisitIfStatement(IfStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ ASSERT(!in_spilled_code());
+ for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
}
-void CodeGenerator::VisitContinueStatement(ContinueStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitBlock(Block* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ Block");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ VisitStatements(node->statements());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
}
-void CodeGenerator::VisitBreakStatement(BreakStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+ Comment cmnt(masm_, "[ Declaration");
+ CodeForStatementPosition(node);
+ Variable* var = node->proxy()->var();
+ ASSERT(var != NULL); // must have been resolved
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile time,
+ // we need to "declare" it at runtime to make sure it actually
+ // exists in the local context.
+ if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Variables with a "LOOKUP" slot were introduced as non-locals
+ // during variable resolution and must have mode DYNAMIC.
+ ASSERT(var->is_dynamic());
+ // For now, just do a runtime call. Sync the virtual frame eagerly
+ // so we can simply push the arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+ PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+ frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (node->mode() == Variable::CONST) {
+ __ movq(kScratchRegister, Factory::the_hole_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ } else if (node->fun() != NULL) {
+ Load(node->fun());
+ } else {
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
+ }
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+ // Ignore the return value (declarations are statements).
+ return;
+ }
+
+ ASSERT(!var->is_global());
+
+ // If we have a function or a constant, we need to initialize the variable.
+ Expression* val = NULL;
+ if (node->mode() == Variable::CONST) {
+ val = new Literal(Factory::the_hole_value());
+ } else {
+ val = node->fun(); // NULL if we don't have a function
+ }
+
+ if (val != NULL) {
+ {
+ // Set the initial value.
+ Reference target(this, node->proxy());
+ Load(val);
+ target.SetValue(NOT_CONST_INIT);
+ // The reference is removed from the stack (preserving TOS) when
+ // it goes out of scope.
+ }
+ // Get rid of the assigned value (declarations are statements).
+ frame_->Drop();
+ }
}
-void CodeGenerator::VisitReturnStatement(ReturnStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ CodeForStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ Load(expression);
+ // Remove the lingering expression result from the top of stack.
+ frame_->Drop();
}
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "// EmptyStatement");
+ CodeForStatementPosition(node);
+ // nothing to do
}
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ IfStatement");
+ // Generate different code depending on which parts of the if statement
+ // are present or not.
+ bool has_then_stm = node->HasThenStatement();
+ bool has_else_stm = node->HasElseStatement();
+
+ CodeForStatementPosition(node);
+ JumpTarget exit;
+ if (has_then_stm && has_else_stm) {
+ JumpTarget then;
+ JumpTarget else_;
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Visit(node->else_statement());
+
+ // We may have dangling jumps to the then part.
+ if (then.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Visit(node->then_statement());
+
+ if (else_.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
+ }
+
+ } else if (has_then_stm) {
+ ASSERT(!has_else_stm);
+ JumpTarget then;
+ ControlDestination dest(&then, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // then part.
+ if (then.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then label was bound.
+ Visit(node->then_statement());
+ }
+
+ } else if (has_else_stm) {
+ ASSERT(!has_then_stm);
+ JumpTarget else_;
+ ControlDestination dest(&exit, &else_, false);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.true_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // else part.
+ if (else_.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
+ } else {
+ // The else label was bound.
+ Visit(node->else_statement());
+ }
+
+ } else {
+ ASSERT(!has_then_stm && !has_else_stm);
+ // We only care about the condition's side effects (not its value
+ // or control flow effect). LoadCondition is called without
+ // forcing control flow.
+ ControlDestination dest(&exit, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ if (!dest.is_used()) {
+ // We got a value on the frame rather than (or in addition to)
+ // control flow.
+ frame_->Drop();
+ }
+ }
+
+ if (exit.is_linked()) {
+ exit.Bind();
+ }
}
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ContinueStatement");
+ CodeForStatementPosition(node);
+ node->target()->continue_target()->Jump();
}
-void CodeGenerator::VisitLoopStatement(LoopStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ BreakStatement");
+ CodeForStatementPosition(node);
+ node->target()->break_target()->Jump();
}
-void CodeGenerator::VisitForInStatement(ForInStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ReturnStatement");
+
+ CodeForStatementPosition(node);
+ Load(node->expression());
+ Result return_value = frame_->Pop();
+ if (function_return_is_shadowed_) {
+ function_return_.Jump(&return_value);
+ } else {
+ frame_->PrepareForReturn();
+ if (function_return_.is_bound()) {
+ // If the function return label is already bound we reuse the
+ // code by jumping to the return site.
+ function_return_.Jump(&return_value);
+ } else {
+ function_return_.Bind(&return_value);
+ GenerateReturnSequence(&return_value);
+ }
+ }
}
-void CodeGenerator::VisitTryCatch(TryCatch* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ CodeForStatementPosition(node);
+ Load(node->expression());
+ Result context;
+ if (node->is_catch_block()) {
+ context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kPushContext, 1);
+ }
+
+ // Update context local.
+ frame_->SaveContextRegister();
+
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
}
-void CodeGenerator::VisitTryFinally(TryFinally* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WithExitStatement");
+ CodeForStatementPosition(node);
+ // Pop context.
+ __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
+ // Update context local.
+ frame_->SaveContextRegister();
}
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ // TODO(X64): This code is completely generic and should be moved somewhere
+ // where it can be shared between architectures.
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ SwitchStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ // Compile the switch value.
+ Load(node->tag());
+
+ ZoneList<CaseClause*>* cases = node->cases();
+ int length = cases->length();
+ CaseClause* default_clause = NULL;
+
+ JumpTarget next_test;
+ // Compile the case label expressions and comparisons. Exit early
+ // if a comparison is unconditionally true. The target next_test is
+ // bound before the loop in order to indicate control flow to the
+ // first comparison.
+ next_test.Bind();
+ for (int i = 0; i < length && !next_test.is_unused(); i++) {
+ CaseClause* clause = cases->at(i);
+ // The default is not a test, but remember it for later.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ // We recycle the same target next_test for each test. Bind it if
+ // the previous test has not done so and then unuse it for the
+ // loop.
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ next_test.Unuse();
+
+ // Duplicate the switch value.
+ frame_->Dup();
+
+ // Compile the label expression.
+ Load(clause->label());
+
+ // Compare and branch to the body if true or the next test if
+ // false. Prefer the next test as a fall through.
+ ControlDestination dest(clause->body_target(), &next_test, false);
+ Comparison(equal, true, &dest);
+
+ // If the comparison fell through to the true target, jump to the
+ // actual body.
+ if (dest.true_was_fall_through()) {
+ clause->body_target()->Unuse();
+ clause->body_target()->Jump();
+ }
+ }
+
+ // If there was control flow to a next test from the last one
+ // compiled, compile a jump to the default or break target.
+ if (!next_test.is_unused()) {
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ // Drop the switch value.
+ frame_->Drop();
+ if (default_clause != NULL) {
+ default_clause->body_target()->Jump();
+ } else {
+ node->break_target()->Jump();
+ }
+ }
+
+ // The last instruction emitted was a jump, either to the default
+ // clause or the break target, or else to a case body from the loop
+ // that compiles the tests.
+ ASSERT(!has_valid_frame());
+ // Compile case bodies as needed.
+ for (int i = 0; i < length; i++) {
+ CaseClause* clause = cases->at(i);
+
+ // There are two ways to reach the body: from the corresponding
+ // test or as the fall through of the previous body.
+ if (clause->body_target()->is_linked() || has_valid_frame()) {
+ if (clause->body_target()->is_linked()) {
+ if (has_valid_frame()) {
+ // If we have both a jump to the test and a fall through, put
+ // a jump on the fall through path to avoid the dropping of
+ // the switch value on the test path. The exception is the
+ // default which has already had the switch value dropped.
+ if (clause->is_default()) {
+ clause->body_target()->Bind();
+ } else {
+ JumpTarget body;
+ body.Jump();
+ clause->body_target()->Bind();
+ frame_->Drop();
+ body.Bind();
+ }
+ } else {
+ // No fall through to worry about.
+ clause->body_target()->Bind();
+ if (!clause->is_default()) {
+ frame_->Drop();
+ }
+ }
+ } else {
+ // Otherwise, we have only fall through.
+ ASSERT(has_valid_frame());
+ }
+
+ // We are now prepared to compile the body.
+ Comment cmnt(masm_, "[ Case body");
+ VisitStatements(clause->statements());
+ }
+ clause->body_target()->Unuse();
+ }
+
+ // We may not have a valid frame here so bind the break target only
+ // if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
}
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ LoopStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
+ // known result for the test expression, with no side effects.
+ enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+ if (node->cond() == NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ info = ALWAYS_TRUE;
+ } else {
+ Literal* lit = node->cond()->AsLiteral();
+ if (lit != NULL) {
+ if (lit->IsTrue()) {
+ info = ALWAYS_TRUE;
+ } else if (lit->IsFalse()) {
+ info = ALWAYS_FALSE;
+ }
+ }
+ }
+
+ switch (node->type()) {
+ case LoopStatement::DO_LOOP: {
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
+ IncrementLoopNesting();
+
+ // Label the top of the loop for the backward jump if necessary.
+ if (info == ALWAYS_TRUE) {
+ // Use the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else if (info == ALWAYS_FALSE) {
+ // No need to label it.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ // Continue is the test, so use the backward body target.
+ ASSERT(info == DONT_KNOW);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ body.Bind();
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // Compile the test.
+ if (info == ALWAYS_TRUE) {
+ // If control flow can fall off the end of the body, jump back
+ // to the top and bind the break target at the exit.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+
+ } else if (info == ALWAYS_FALSE) {
+ // We may have had continues or breaks in the body.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+
+ } else {
+ ASSERT(info == DONT_KNOW);
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ }
+ break;
+ }
+
+ case LoopStatement::WHILE_LOOP: {
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function
+ // literal twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+
+ IncrementLoopNesting();
+
+ // If the condition is always false and has no side effects, we
+ // do not need to compile anything.
+ if (info == ALWAYS_FALSE) break;
+
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
+
+ // Based on the condition analysis, compile the test as necessary.
+ if (info == ALWAYS_TRUE) {
+ // We will not compile the test expression. Label the top of
+ // the loop with the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (test_at_bottom) {
+ // Continue is the test at the bottom, no need to label the
+ // test at the top. The body is a backward target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ // Label the test at the top as the continue target. The
+ // body is a forward-only target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ }
+ // Compile the test with the body as the true target and
+ // preferred fall-through and with the break target as the
+ // false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may
+ // have been unconditionally false (if there are no jumps to
+ // the body).
+ if (!body.is_linked()) break;
+
+ // Otherwise, jump around the body on the fall through and
+ // then bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ if (info == ALWAYS_TRUE) {
+ // The loop body has been labeled with the continue target.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (test_at_bottom) {
+ // If we have chosen to recompile the test at the bottom,
+ // then it is the continue target.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a backward
+ // jump from here and thus an invalid fall-through).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ } else {
+ // If we have chosen not to recompile the test at the
+ // bottom, jump back to the one at the top.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ }
+ }
+
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ }
+
+ case LoopStatement::FOR_LOOP: {
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function
+ // literal twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+
+ // Compile the init expression if present.
+ if (node->init() != NULL) {
+ Visit(node->init());
+ }
+
+ IncrementLoopNesting();
+
+ // If the condition is always false and has no side effects, we
+ // do not need to compile anything else.
+ if (info == ALWAYS_FALSE) break;
+
+ // Target for backward edge if no test at the bottom, otherwise
+ // unused.
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+ // Target for backward edge if there is a test at the bottom,
+ // otherwise used as target for test at the top.
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
+
+ // Based on the condition analysis, compile the test as necessary.
+ if (info == ALWAYS_TRUE) {
+ // We will not compile the test expression. Label the top of
+ // the loop.
+ if (node->next() == NULL) {
+ // Use the continue target if there is no update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // Otherwise use the backward loop target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW);
+ if (test_at_bottom) {
+ // Continue is either the update expression or the test at
+ // the bottom, no need to label the test at the top.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else if (node->next() == NULL) {
+ // We are not recompiling the test at the bottom and there
+ // is no update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // We are not recompiling the test at the bottom and there
+ // is an update expression.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+
+ // Compile the test with the body as the true target and
+ // preferred fall-through and with the break target as the
+ // false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may
+ // have been unconditionally false (if there are no jumps to
+ // the body).
+ if (!body.is_linked()) break;
+
+ // Otherwise, jump around the body on the fall through and
+ // then bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // If there is an update expression, compile it if necessary.
+ if (node->next() != NULL) {
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ // Control can reach the update by falling out of the body or
+ // by a continue.
+ if (has_valid_frame()) {
+ // Record the source position of the statement as this code
+ // which is after the code for the body actually belongs to
+ // the loop statement and not the body.
+ CodeForStatementPosition(node);
+ Visit(node->next());
+ }
+ }
+
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ if (info == ALWAYS_TRUE) {
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (test_at_bottom) {
+ if (node->continue_target()->is_linked()) {
+ // We can have dangling jumps to the continue target if
+ // there was no update expression.
+ node->continue_target()->Bind();
+ }
+ // Control can reach the test at the bottom by falling out
+ // of the body, by a continue in the body, or from the
+ // update expression.
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a
+ // backward jump from here).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ } else {
+ // Otherwise, jump back to the test at the top.
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
+ }
+ }
+
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ }
+ }
+
+ DecrementLoopNesting();
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ForInStatement");
+ CodeForStatementPosition(node);
+
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
+
+ // Get the object to enumerate over (converted to JSObject).
+ LoadAndSpill(node->enumerable());
+
+ // Both SpiderMonkey and kjs ignore null and undefined in contrast
+ // to the specification. 12.6.4 mandates a call to ToObject.
+ frame_->EmitPop(rax);
+
+ // rax: value to be iterated over
+ __ Cmp(rax, Factory::undefined_value());
+ exit.Branch(equal);
+ __ Cmp(rax, Factory::null_value());
+ exit.Branch(equal);
+
+ // Stack layout in body:
+ // [iteration counter (smi)] <- slot 0
+ // [length of array] <- slot 1
+ // [FixedArray] <- slot 2
+ // [Map or 0] <- slot 3
+ // [Object] <- slot 4
+
+ // Check if enumerable is already a JSObject
+ // rax: value to be iterated over
+ __ testl(rax, Immediate(kSmiTagMask));
+ primitive.Branch(zero);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ jsobject.Branch(above_equal);
+
+ primitive.Bind();
+ frame_->EmitPush(rax);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+ // function call returns the value in rax, which is where we want it below
+
+ jsobject.Bind();
+ // Get the set of properties (as a FixedArray or Map).
+ // rax: value to be iterated over
+ frame_->EmitPush(rax); // push the object being iterated over (slot 4)
+
+ frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
+ frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a Map, we can do a fast modification check.
+ // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // rax: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
+ __ movq(rdx, rax);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ Cmp(rcx, Factory::meta_map());
+ fixed_array.Branch(not_equal);
+
+ // Get enum cache
+ // rax: map (result from call to Runtime::kGetPropertyNamesFast)
+ __ movq(rcx, rax);
+ __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
+ // Get the bridge array held in the enumeration index field.
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
+ // Get the cache from the bridge array.
+ __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ frame_->EmitPush(rax); // <- slot 3
+ frame_->EmitPush(rdx); // <- slot 2
+ __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ shl(rax, Immediate(kSmiTagSize));
+ frame_->EmitPush(rax); // <- slot 1
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+ entry.Jump();
+
+ fixed_array.Bind();
+ // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
+ frame_->EmitPush(rax); // <- slot 2
+
+ // Push the length of the array and the initial index onto the stack.
+ __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ shl(rax, Immediate(kSmiTagSize));
+ frame_->EmitPush(rax); // <- slot 1
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+
+ // Condition.
+ entry.Bind();
+ // Grab the current frame's height for the break and continue
+ // targets only after all the state is pushed on the frame.
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ __ movq(rax, frame_->ElementAt(0)); // load the current count
+ __ cmpq(rax, frame_->ElementAt(1)); // compare to the array length
+ node->break_target()->Branch(above_equal);
+
+ // Get the i'th entry of the array.
+ __ movq(rdx, frame_->ElementAt(2));
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ // Multiplier is times_4 since rax is already a Smi.
+ __ movq(rbx, Operand(rdx, rax, times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case rax: current iteration count rbx: i'th entry
+ // of the enum cache
+ __ movq(rdx, frame_->ElementAt(3));
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ // rax: current iteration count
+ // rbx: i'th entry of the enum cache
+ // rdx: expected map value
+ __ movq(rcx, frame_->ElementAt(4));
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpq(rcx, rdx);
+ end_del_check.Branch(equal);
+
+ // Convert the entry to a string (or null if it isn't a property anymore).
+ frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
+ frame_->EmitPush(rbx); // push entry
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
+ __ movq(rbx, rax);
+
+ // If the property has been removed while iterating, we just skip it.
+ __ Cmp(rbx, Factory::null_value());
+ node->continue_target()->Branch(equal);
+
+ end_del_check.Bind();
+ // Store the entry in the 'each' expression and take another spin in the
+ // loop. rdx: i'th entry of the enum cache (or string there of)
+ frame_->EmitPush(rbx);
+ { Reference each(this, node->each());
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+ if (!each.is_illegal()) {
+ if (each.size() > 0) {
+ frame_->EmitPush(frame_->ElementAt(each.size()));
+ }
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ if (each.size() > 0) {
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ frame_->Drop();
+ }
+ }
+ }
+ // Unloading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Drop();
+
+ // Body.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ // Next. Reestablish a spilled frame in case we are coming here via
+ // a continue in the body.
+ node->continue_target()->Bind();
+ frame_->SpillAll();
+ frame_->EmitPop(rax);
+ __ addq(rax, Immediate(Smi::FromInt(1)));
+ frame_->EmitPush(rax);
+ entry.Jump();
+
+ // Cleanup. No need to spill because VirtualFrame::Drop is safe for
+ // any frame.
+ node->break_target()->Bind();
+ frame_->Drop(5);
+
+ // Exit.
+ exit.Bind();
+
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+}
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryCatch");
+ CodeForStatementPosition(node);
+
+ JumpTarget try_block;
+ JumpTarget exit;
+
+ try_block.Call();
+ // --- Catch block ---
+ frame_->EmitPush(rax);
+
+ // Store the caught exception in the catch variable.
+ { Reference ref(this, node->catch_var());
+ ASSERT(ref.is_slot());
+ // Load the exception to the top of the stack. Here we make use of the
+ // convenient property that it doesn't matter whether a value is
+ // immediately on top of or underneath a zero-sized reference.
+ ref.SetValue(NOT_CONST_INIT);
+ }
+
+ // Remove the exception from the stack.
+ frame_->Drop();
+
+ VisitStatementsAndSpill(node->catch_block()->statements());
+ if (has_valid_frame()) {
+ exit.Jump();
+ }
+
+
+ // --- Try block ---
+ try_block.Bind();
+
+ frame_->PushTryHandler(TRY_CATCH_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
+ //
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
+ bool has_unlinks = false;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ has_unlinks = has_unlinks || shadows[i]->is_linked();
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // Make sure that there's nothing left on the stack above the
+ // handler structure.
+ if (FLAG_debug_code) {
+ __ movq(kScratchRegister, handler_address);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ Assert(equal, "stack pointer should point to top handler");
+ }
+
+ // If we can fall off the end of the try block, unlink from try chain.
+ if (has_valid_frame()) {
+ // The next handler address is on top of the frame. Unlink from
+ // the handler list and drop the rest of this handler from the
+ // frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+ if (has_unlinks) {
+ exit.Jump();
+ }
+ }
+
+ // Generate unlink code for the (formerly) shadowing targets that
+ // have been jumped to. Deallocate each shadow target.
+ Result return_value;
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // Unlink from try chain; be careful not to destroy the TOS if
+ // there is one.
+ if (i == kReturnShadowIndex) {
+ shadows[i]->Bind(&return_value);
+ return_value.ToRegister(rax);
+ } else {
+ shadows[i]->Bind();
+ }
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ frame_->SpillAll();
+
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ frame_->Forget(frame_->height() - handler_height);
+
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ if (i == kReturnShadowIndex) {
+ if (!function_return_is_shadowed_) frame_->PrepareForReturn();
+ shadows[i]->other_target()->Jump(&return_value);
+ } else {
+ shadows[i]->other_target()->Jump();
+ }
+ }
+ }
+
+ exit.Bind();
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryFinally");
+ CodeForStatementPosition(node);
+
+ // State: Used to keep track of reason for entering the finally
+ // block. Should probably be extended to hold information for
+ // break/continue from within the try block.
+ enum { FALLING, THROWING, JUMPING };
+
+ JumpTarget try_block;
+ JumpTarget finally_block;
+
+ try_block.Call();
+
+ frame_->EmitPush(rax);
+ // In case of thrown exceptions, this is where we continue.
+ __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
+ finally_block.Jump();
+
+ // --- Try block ---
+ try_block.Bind();
+
+ frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
+ //
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
+ int nof_unlinks = 0;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // If we can fall off the end of the try block, unlink from the try
+ // chain and set the state on the frame to FALLING.
+ if (has_valid_frame()) {
+ // The next handler address is on top of the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ // Fake a top of stack value (unneeded when FALLING) and set the
+ // state in ecx, then jump around the unlink blocks if any.
+ __ movq(kScratchRegister,
+ Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) {
+ finally_block.Jump();
+ }
+ }
+
+ // Generate code to unlink and set the state for the (formerly)
+ // shadowing targets that have been jumped to.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // If we have come from the shadowed return, the return value is
+ // on the virtual frame. We must preserve it until it is
+ // pushed.
+ if (i == kReturnShadowIndex) {
+ Result return_value;
+ shadows[i]->Bind(&return_value);
+ return_value.ToRegister(rax);
+ } else {
+ shadows[i]->Bind();
+ }
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ frame_->SpillAll();
+
+ // Reload sp from the top handler, because some statements that
+ // we break from (eg, for...in) may have left stuff on the
+ // stack.
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ frame_->Forget(frame_->height() - handler_height);
+
+ // Unlink this handler and drop it from the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ if (i == kReturnShadowIndex) {
+ // If this target shadowed the function return, materialize
+ // the return value on the stack.
+ frame_->EmitPush(rax);
+ } else {
+ // Fake TOS for targets that shadowed breaks and continues.
+ __ movq(kScratchRegister,
+ Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ }
+ __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+ if (--nof_unlinks > 0) {
+ // If this is not the last unlink block, jump around the next.
+ finally_block.Jump();
+ }
+ }
+ }
+
+ // --- Finally block ---
+ finally_block.Bind();
+
+ // Push the state on the stack.
+ frame_->EmitPush(rcx);
+
+ // We keep two elements on the stack - the (possibly faked) result
+ // and the state - while evaluating the finally block.
+ //
+ // Generate code for the statements in the finally block.
+ VisitStatementsAndSpill(node->finally_block()->statements());
+
+ if (has_valid_frame()) {
+ // Restore state and return value or faked TOS.
+ frame_->EmitPop(rcx);
+ frame_->EmitPop(rax);
+ }
+
+ // Generate code to jump to the right destination for all used
+ // formerly shadowing targets. Deallocate each shadow target.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (has_valid_frame() && shadows[i]->is_bound()) {
+ BreakTarget* original = shadows[i]->other_target();
+ __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+ if (i == kReturnShadowIndex) {
+ // The return value is (already) in rax.
+ Result return_value = allocator_->Allocate(rax);
+ ASSERT(return_value.is_valid());
+ if (function_return_is_shadowed_) {
+ original->Branch(equal, &return_value);
+ } else {
+ // Branch around the preparation for return which may emit
+ // code.
+ JumpTarget skip;
+ skip.Branch(not_equal);
+ frame_->PrepareForReturn();
+ original->Jump(&return_value);
+ skip.Bind();
+ }
+ } else {
+ original->Branch(equal);
+ }
+ }
+ }
+
+ if (has_valid_frame()) {
+ // Check if we need to rethrow the exception.
+ JumpTarget exit;
+ __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
+ exit.Branch(not_equal);
+
+ // Rethrow exception.
+ frame_->EmitPush(rax); // undo pop from above
+ frame_->CallRuntime(Runtime::kReThrow, 1);
+
+ // Done.
+ exit.Bind();
+ }
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Spill everything, even constants, to the frame.
+ frame_->SpillAll();
+ frame_->CallRuntime(Runtime::kDebugBreak, 0);
+ // Ignore the return value.
+#endif
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ // Call the runtime to instantiate the function boilerplate object.
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
+ ASSERT(boilerplate->IsBoilerplate());
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Push the boilerplate on the stack.
+ __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+
+ // Create a new closure.
+ frame_->EmitPush(rsi);
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ InstantiateBoilerplate(boilerplate);
}
+
void CodeGenerator::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* a) {
- UNIMPLEMENTED();
+ FunctionBoilerplateLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+ InstantiateBoilerplate(node->boilerplate());
}
-void CodeGenerator::VisitConditional(Conditional* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+ Comment cmnt(masm_, "[ Conditional");
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Load(node->else_expression(), typeof_state());
+
+ if (then.is_linked()) {
+ exit.Jump();
+ then.Bind();
+ Load(node->then_expression(), typeof_state());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Load(node->then_expression(), typeof_state());
+
+ if (else_.is_linked()) {
+ exit.Jump();
+ else_.Bind();
+ Load(node->else_expression(), typeof_state());
+ }
+ }
+
+ exit.Bind();
}
-void CodeGenerator::VisitSlot(Slot* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlot(node, typeof_state());
}
-void CodeGenerator::VisitVariableProxy(VariableProxy* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
+ if (expr != NULL) {
+ Visit(expr);
+ } else {
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValue(typeof_state());
+ }
}
-void CodeGenerator::VisitLiteral(Literal* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+ Comment cmnt(masm_, "[ Literal");
+ frame_->Push(node->handle());
}
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) {
- UNIMPLEMENTED();
+
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function. Leave the regexp boilerplate in
+// 'boilerplate'.
+class DeferredRegExpLiteral: public DeferredCode {
+ public:
+ DeferredRegExpLiteral(Register boilerplate,
+ Register literals,
+ RegExpLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredRegExpLiteral");
+ }
+
+ void Generate();
+
+ private:
+ Register boilerplate_;
+ Register literals_;
+ RegExpLiteral* node_;
+};
+
+
+void DeferredRegExpLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // RegExp pattern (2).
+ __ Push(node_->pattern());
+ // RegExp flags (3).
+ __ Push(node_->flags());
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ Comment cmnt(masm_, "[ RegExp Literal");
+
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the RegExp object. If so,
+ // jump to the deferred code passing the literals array.
+ DeferredRegExpLiteral* deferred =
+ new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
+ __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
+
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
}
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* a) {
- UNIMPLEMENTED();
+
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function. Leave the object boilerplate in
+// 'boilerplate'.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+ DeferredObjectLiteral(Register boilerplate,
+ Register literals,
+ ObjectLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredObjectLiteral");
+ }
+
+ void Generate();
+
+ private:
+ Register boilerplate_;
+ Register literals_;
+ ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // Constant properties (2).
+ __ Push(node_->constant_properties());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code passing the literals array.
+ DeferredObjectLiteral* deferred =
+ new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
+ __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
+
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
+ // Clone the boilerplate object.
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ }
+ Result clone = frame_->CallRuntime(clone_function_id, 1);
+ // Push the newly cloned literal object as the result.
+ frame_->Push(&clone);
+
+ for (int i = 0; i < node->properties()->length(); i++) {
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+ // else fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ Handle<Object> key(property->key()->handle());
+ if (key->IsSymbol()) {
+ // Duplicate the object as the IC receiver.
+ frame_->Dup();
+ Load(property->value());
+ frame_->Push(key);
+ Result ignored = frame_->CallStoreIC();
+ // Drop the duplicated receiver and ignore the result.
+ frame_->Drop();
+ break;
+ }
+ // Fall through
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+ // Ignore the result.
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ frame_->Push(Smi::FromInt(1));
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ frame_->Push(Smi::FromInt(0));
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
}
-void CodeGenerator::VisitAssignment(Assignment* a) {
- UNIMPLEMENTED();
+
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function. Leave the array boilerplate in 'boilerplate'.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+ DeferredArrayLiteral(Register boilerplate,
+ Register literals,
+ ArrayLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredArrayLiteral");
+ }
+
+ void Generate();
+
+ private:
+ Register boilerplate_;
+ Register literals_;
+ ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // Constant properties (2).
+ __ Push(node_->literals());
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void CodeGenerator::VisitThrow(Throw* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code passing the literals array.
+ DeferredArrayLiteral* deferred =
+ new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
+ __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
+
+ // Push the resulting array literal boilerplate on the stack.
+ frame_->Push(&boilerplate);
+ // Clone the boilerplate object.
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ }
+ Result clone = frame_->CallRuntime(clone_function_id, 1);
+ // Push the newly cloned literal object as the result.
+ frame_->Push(&clone);
+
+ // Generate code to set the elements in the array that are not
+ // literals.
+ for (int i = 0; i < node->values()->length(); i++) {
+ Expression* value = node->values()->at(i);
+
+ // If value is a literal the property value is already set in the
+ // boilerplate object.
+ if (value->AsLiteral() != NULL) continue;
+ // If value is a materialized literal the property value is already set
+ // in the boilerplate object if it is simple.
+ if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+ // The property must be set by generated code.
+ Load(value);
+
+ // Get the property value off the stack.
+ Result prop_value = frame_->Pop();
+ prop_value.ToRegister();
+
+ // Fetch the array literal while leaving a copy on the stack and
+ // use it to get the elements array.
+ frame_->Dup();
+ Result elements = frame_->Pop();
+ elements.ToRegister();
+ frame_->Spill(elements.reg());
+ // Get the elements array.
+ __ movq(elements.reg(),
+ FieldOperand(elements.reg(), JSObject::kElementsOffset));
+
+ // Write to the indexed properties array.
+ int offset = i * kPointerSize + Array::kHeaderSize;
+ __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
+
+ // Update the write barrier for the array address.
+ frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+ }
}
-void CodeGenerator::VisitProperty(Property* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+ ASSERT(!in_spilled_code());
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ Load(node->key());
+ Load(node->value());
+ Result result =
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->Push(&result);
}
-void CodeGenerator::VisitCall(Call* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+ Comment cmnt(masm_, "[ Assignment");
+ CodeForStatementPosition(node);
+
+ { Reference target(this, node->target());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+
+ if (node->starts_initialization_block()) {
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ // Change to slow case in the beginning of an initialization
+ // block to avoid the quadratic behavior of repeatedly adding
+ // fast properties.
+
+ // The receiver is the argument to the runtime call. It is the
+ // first value pushed when the reference was loaded to the
+ // frame.
+ // TODO(X64): Enable this and the switch back to fast, once they work.
+ // frame_->PushElementAt(target.size() - 1);
+ // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ Load(node->value());
+
+ } else {
+ // Literal* literal = node->value()->AsLiteral();
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ // Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+ // There are two cases where the target is not read in the right hand
+ // side, that are easy to test for: the right hand side is a literal,
+ // or the right hand side is a different variable. TakeValue invalidates
+ // the target, with an implicit promise that it will be written to again
+ // before it is read.
+ // TODO(X64): Implement TakeValue optimization.
+ if (false) {
+ // if (literal != NULL || (right_var != NULL && right_var != var)) {
+ // target.TakeValue(NOT_INSIDE_TYPEOF);
+ } else {
+ target.GetValue(NOT_INSIDE_TYPEOF);
+ }
+ Load(node->value());
+ GenericBinaryOperation(node->binary_op(),
+ node->type(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ }
+
+ if (var != NULL &&
+ var->mode() == Variable::CONST &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+ } else {
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
+ if (node->ends_initialization_block()) {
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ // End of initialization block. Revert to fast case. The
+ // argument to the runtime call is the receiver, which is the
+ // first value pushed as part of the reference, which is below
+ // the lhs value.
+ // TODO(X64): Enable this once ToFastProperties works.
+ // frame_->PushElementAt(target.size());
+ // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+ }
+ }
}
-void CodeGenerator::VisitCallEval(CallEval* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitThrow(Throw* node) {
+ Comment cmnt(masm_, "[ Throw");
+ CodeForStatementPosition(node);
+
+ Load(node->exception());
+ Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+ frame_->Push(&result);
}
-void CodeGenerator::VisitCallNew(CallNew* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitProperty(Property* node) {
+ Comment cmnt(masm_, "[ Property");
+ Reference property(this, node);
+ property.GetValue(typeof_state());
}
-void CodeGenerator::VisitCallRuntime(CallRuntime* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCall(Call* node) {
+ Comment cmnt(masm_, "[ Call");
+
+ ZoneList<Expression*>* args = node->arguments();
+
+ CodeForStatementPosition(node);
+
+ // Check if the function is a variable or a property.
+ Expression* function = node->expression();
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
+
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
+
+ if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(var->name());
+
+ // Pass the global object as the receiver and let the IC stub
+ // patch the stack to use the global proxy as 'this' in the
+ // invoked function.
+ LoadGlobal();
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // TODO(X64): Enable calls of non-global functions.
+ UNIMPLEMENTED();
+ /*
+ // ----------------------------------
+ // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
+ // ----------------------------------
+
+ // Load the function from the context. Sync the frame so we can
+ // push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(var->name()));
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ // The runtime call returns a pair of values in rax and rdx. The
+ // looked-up function is in rax and the receiver is in rdx. These
+ // register references are not ref counted here. We spill them
+ // eagerly since they are arguments to an inevitable call (and are
+ // not sharable by the arguments).
+ ASSERT(!allocator()->is_used(rax));
+ frame_->EmitPush(rax);
+
+ // Load the receiver.
+ ASSERT(!allocator()->is_used(rdx));
+ frame_->EmitPush(rdx);
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ */
+ } else if (property != NULL) {
+ // Check if the key is a literal string.
+ Literal* literal = property->key()->AsLiteral();
+
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ // ------------------------------------------------------------------
+ // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+ // ------------------------------------------------------------------
+
+ // TODO(X64): Consider optimizing Function.prototype.apply calls
+ // with arguments object. Requires lazy arguments allocation;
+ // see http://codereview.chromium.org/147075.
+
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(literal->handle());
+ Load(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result =
+ frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+
+ } else {
+ // -------------------------------------------
+ // JavaScript example: 'array[index](1, 2, 3)'
+ // -------------------------------------------
+
+ // Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
+
+ // Pass receiver to called function.
+ if (property->is_synthetic()) {
+ // Use global object as receiver.
+ LoadGlobalReceiver();
+ } else {
+ // The reference's size is non-negative.
+ frame_->PushElementAt(ref.size());
+ }
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
+ } else {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is not global
+ // ----------------------------------
+
+ // Load the function.
+ Load(function);
+
+ // Pass the global proxy as the receiver.
+ LoadGlobalReceiver();
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
}
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCallEval(CallEval* node) {
+ Comment cmnt(masm_, "[ CallEval");
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
+ // the function we need to call and the receiver of the call.
+ // Then we call the resolved function using the given arguments.
+
+ ZoneList<Expression*>* args = node->arguments();
+ Expression* function = node->expression();
+
+ CodeForStatementPosition(node);
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ movl(scratch.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ movl(result.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
}
-void CodeGenerator::VisitCountOperation(CountOperation* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+ Comment cmnt(masm_, "[ CallNew");
+ CodeForStatementPosition(node);
+
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments. This is different from ordinary calls, where the
+ // actual function to call is resolved after the arguments have been
+ // evaluated.
+
+ // Compute function to call and use the global object as the
+ // receiver. There is no need to use the global proxy here because
+ // it will always be replaced with a newly allocated object.
+ Load(node->expression());
+ LoadGlobal();
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallConstructor(arg_count);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
}
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ if (CheckForInlineRuntimeCall(node)) {
+ return;
+ }
+
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
+
+ if (function == NULL) {
+ // Prepare stack for calling JS runtime function.
+ frame_->Push(node->name());
+ // Push the builtins object found in the current global object.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), GlobalObject());
+ __ movq(temp.reg(),
+ FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+ frame_->Push(&temp);
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ if (function == NULL) {
+ // Call the JS runtime function.
+ Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting_);
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
+ } else {
+ // Call the C runtime function.
+ Result answer = frame_->CallRuntime(function, arg_count);
+ frame_->Push(&answer);
+ }
}
-void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ // Note that because of NOT and an optimization in comparison of a typeof
+ // expression to a literal string, this function can fail to leave a value
+ // on top of the frame or in the cc register.
+ Comment cmnt(masm_, "[ UnaryOperation");
+
+ Token::Value op = node->op();
+
+ if (op == Token::NOT) {
+ // Swap the true and false targets but keep the same actual label
+ // as the fall through.
+ destination()->Invert();
+ LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ // Swap the labels back.
+ destination()->Invert();
+
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ if (property != NULL) {
+ Load(property->obj());
+ Load(property->key());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
+
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ frame_->Push(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Call the runtime to look up the context holding the named
+ // variable. Sync the virtual frame eagerly so we can push the
+ // arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(variable->name());
+ Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+ ASSERT(context.is_register());
+ frame_->EmitPush(context.reg());
+ context.Unuse();
+ frame_->EmitPush(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
+
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
+
+ } else {
+ // Default: Result of deleting expressions is true.
+ Load(node->expression()); // may have side-effects
+ frame_->SetElementAt(0, Factory::true_value());
+ }
+
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->Push(&answer);
+
+ } else if (op == Token::VOID) {
+ Expression* expression = node->expression();
+ if (expression && expression->AsLiteral() && (
+ expression->AsLiteral()->IsTrue() ||
+ expression->AsLiteral()->IsFalse() ||
+ expression->AsLiteral()->handle()->IsNumber() ||
+ expression->AsLiteral()->handle()->IsString() ||
+ expression->AsLiteral()->handle()->IsJSRegExp() ||
+ expression->AsLiteral()->IsNull())) {
+ // Omit evaluating the value of the primitive literal.
+ // It will be discarded anyway, and can have no side effect.
+ frame_->Push(Factory::undefined_value());
+ } else {
+ Load(node->expression());
+ frame_->SetElementAt(0, Factory::undefined_value());
+ }
+
+ } else {
+ Load(node->expression());
+ switch (op) {
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
+ break;
+
+ case Token::SUB: {
+ bool overwrite =
+ (node->AsBinaryOperation() != NULL &&
+ node->AsBinaryOperation()->ResultOverwriteAllowed());
+ UnarySubStub stub(overwrite);
+ // TODO(1222589): remove dependency of TOS being cached inside stub
+ Result operand = frame_->Pop();
+ Result answer = frame_->CallStub(&stub, &operand);
+ frame_->Push(&answer);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ // Smi check.
+ JumpTarget smi_label;
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ testl(operand.reg(), Immediate(kSmiTagMask));
+ smi_label.Branch(zero, &operand);
+
+ frame_->Push(&operand); // undo popping of TOS
+ Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+ CALL_FUNCTION, 1);
+ continue_label.Jump(&answer);
+ smi_label.Bind(&answer);
+ answer.ToRegister();
+ frame_->Spill(answer.reg());
+ __ not_(answer.reg());
+ // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
+ __ xor_(answer.reg(), Immediate(kSmiTagMask));
+ continue_label.Bind(&answer);
+ frame_->Push(&answer);
+ break;
+ }
+
+ case Token::ADD: {
+ // Smi check.
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ testl(operand.reg(), Immediate(kSmiTagMask));
+ continue_label.Branch(zero, &operand, taken);
+
+ frame_->Push(&operand);
+ Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+ CALL_FUNCTION, 1);
+
+ continue_label.Bind(&answer);
+ frame_->Push(&answer);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
}
-void CodeGenerator::VisitThisFunction(ThisFunction* a) {
- UNIMPLEMENTED();
+
+// The value in dst was optimistically incremented or decremented. The
+// result overflowed or was not smi tagged. Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub. The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+ DeferredPrefixCountOperation(Register dst, bool is_increment)
+ : dst_(dst), is_increment_(is_increment) {
+ set_comment("[ DeferredCountOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ bool is_increment_;
+};
+
+
+void DeferredPrefixCountOperation::Generate() {
+ // Undo the optimistic smi operation.
+ if (is_increment_) {
+ __ subq(dst_, Immediate(Smi::FromInt(1)));
+ } else {
+ __ addq(dst_, Immediate(Smi::FromInt(1)));
+ }
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(rax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (is_increment_) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+// The value in dst was optimistically incremented or decremented. The
+// result overflowed or was not smi tagged. Undo the operation and call
+// into the runtime to convert the argument to a number. Update the
+// original value in old. Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+ DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+ : dst_(dst), old_(old), is_increment_(is_increment) {
+ set_comment("[ DeferredCountOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Register old_;
+ bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
+ // Undo the optimistic smi operation.
+ if (is_increment_) {
+ __ subq(dst_, Immediate(Smi::FromInt(1)));
+ } else {
+ __ addq(dst_, Immediate(Smi::FromInt(1)));
+ }
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ // Save the result of ToNumber to use as the old value.
+ __ push(rax);
+
+ // Call the runtime for the addition or subtraction.
+ __ push(rax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (is_increment_) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(old_);
}
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
+
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+ // Postfix operations need a stack slot under the reference to hold
+ // the old value while the new value is being stored. This is so that
+ // in the case that storing the new value requires a call, the old
+ // value will be in the frame to be spilled.
+ if (is_postfix) frame_->Push(Smi::FromInt(0));
+
+ { Reference target(this, node->expression());
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ target.TakeValue(NOT_INSIDE_TYPEOF);
+
+ Result new_value = frame_->Pop();
+ new_value.ToRegister();
+
+ Result old_value; // Only allocated in the postfix case.
+ if (is_postfix) {
+ // Allocate a temporary to preserve the old value.
+ old_value = allocator_->Allocate();
+ ASSERT(old_value.is_valid());
+ __ movq(old_value.reg(), new_value.reg());
+ }
+ // Ensure the new value is writable.
+ frame_->Spill(new_value.reg());
+
+ // In order to combine the overflow and the smi tag check, we need
+ // to be able to allocate a byte register. We attempt to do so
+ // without spilling. If we fail, we will generate separate overflow
+ // and smi tag checks.
+ //
+ // We allocate and clear the temporary register before
+ // performing the count operation since clearing the register using
+ // xor will clear the overflow flag.
+ Result tmp = allocator_->AllocateWithoutSpilling();
+
+ // Clear scratch register to prepare it for setcc after the operation below.
+ __ xor_(kScratchRegister, kScratchRegister);
+
+ DeferredCode* deferred = NULL;
+ if (is_postfix) {
+ deferred = new DeferredPostfixCountOperation(new_value.reg(),
+ old_value.reg(),
+ is_increment);
+ } else {
+ deferred = new DeferredPrefixCountOperation(new_value.reg(),
+ is_increment);
+ }
+
+ if (is_increment) {
+ __ addq(new_value.reg(), Immediate(Smi::FromInt(1)));
+ } else {
+ __ subq(new_value.reg(), Immediate(Smi::FromInt(1)));
+ }
+
+ // If the count operation didn't overflow and the result is a valid
+ // smi, we're done. Otherwise, we jump to the deferred slow-case
+ // code.
+
+ // We combine the overflow and the smi tag check.
+ __ setcc(overflow, kScratchRegister);
+ __ or_(kScratchRegister, new_value.reg());
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ tmp.Unuse();
+ deferred->Branch(not_zero);
+
+ deferred->BindExit();
+
+ // Postfix: store the old value in the allocated slot under the
+ // reference.
+ if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+
+ frame_->Push(&new_value);
+ // Non-constant: update the reference.
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
+ }
+
+ // Postfix: drop the new value and use the old.
+ if (is_postfix) frame_->Drop();
}
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ // TODO(X64): This code was copied verbatim from codegen-ia32.
+ // Either find a reason to change it or move it to a shared location.
+
+ // Note that due to an optimization in comparison operations (typeof
+ // compared to a string literal), we can evaluate a binary expression such
+ // as AND or OR and not leave a value on the frame or in the cc register.
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = node->op();
+
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
+
+ // NOTE: If the left hand side produces a materialized value (not
+ // control flow), we force the right hand side to do the same. This
+ // is necessary because we assume that if we get control flow on the
+ // last path out of an expression we got it on all paths.
+ if (op == Token::AND) {
+ JumpTarget is_true;
+ ControlDestination dest(&is_true, destination()->false_target(), true);
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The current false target was used as the fall-through. If
+ // there are no dangling jumps to is_true then the left
+ // subexpression was unconditionally false. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_true.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current false target was a forward jump then we have a
+ // valid frame, we have just bound the false target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->false_target()->Unuse();
+ destination()->false_target()->Jump();
+ }
+ is_true.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ } else {
+ // We have actually just jumped to or bound the current false
+ // target but the current control destination is not marked as
+ // used.
+ destination()->Use(false);
+ }
+
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_true
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+ } else {
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_true
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
+
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&pop_and_continue, &exit, true);
+ ToBoolean(&dest);
+
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
+
+ // Compile right side expression.
+ is_true.Bind();
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+
+ } else if (op == Token::OR) {
+ JumpTarget is_false;
+ ControlDestination dest(destination()->true_target(), &is_false, false);
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+ if (dest.true_was_fall_through()) {
+ // The current true target was used as the fall-through. If
+ // there are no dangling jumps to is_false then the left
+ // subexpression was unconditionally true. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_false.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current true target was a forward jump then we have a
+ // valid frame, we have just bound the true target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->true_target()->Unuse();
+ destination()->true_target()->Jump();
+ }
+ is_false.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ } else {
+ // We have just jumped to or bound the current true target but
+ // the current control destination is not marked as used.
+ destination()->Use(true);
+ }
+
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_false
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+ } else {
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_false
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
+
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&exit, &pop_and_continue, false);
+ ToBoolean(&dest);
+
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
+
+ // Compile right side expression.
+ is_false.Bind();
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+
+ } else {
+ // NOTE: The code below assumes that the slow cases (calls to runtime)
+ // never return a constant/immutable object.
+ OverwriteMode overwrite_mode = NO_OVERWRITE;
+ if (node->left()->AsBinaryOperation() != NULL &&
+ node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_LEFT;
+ } else if (node->right()->AsBinaryOperation() != NULL &&
+ node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_RIGHT;
+ }
+
+ Load(node->left());
+ Load(node->right());
+ GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+ }
}
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+ // To make typeof testing for natives implemented in JavaScript really
+ // efficient, we generate special code for expressions of the form:
+ // 'typeof <expression> == <string>'.
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
+
+ // Load the operand and move it to a register.
+ LoadTypeofExpression(operation->expression());
+ Result answer = frame_->Pop();
+ answer.ToRegister();
+
+ if (check->Equals(Heap::number_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->true_target()->Branch(zero);
+ frame_->Spill(answer.reg());
+ __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ Cmp(answer.reg(), Factory::heap_number_map());
+ answer.Unuse();
+ destination()->Split(equal);
+
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+
+ // It can be an undetectable string object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
+ answer.Unuse();
+ destination()->Split(below); // Unsigned byte comparison needed.
+
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ Cmp(answer.reg(), Factory::true_value());
+ destination()->true_target()->Branch(equal);
+ __ Cmp(answer.reg(), Factory::false_value());
+ answer.Unuse();
+ destination()->Split(equal);
+
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ Cmp(answer.reg(), Factory::undefined_value());
+ destination()->true_target()->Branch(equal);
+
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+
+ // It can be an undetectable object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ answer.Unuse();
+ destination()->Split(not_zero);
+
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ frame_->Spill(answer.reg());
+ __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ answer.Unuse();
+ destination()->Split(equal);
+
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ __ Cmp(answer.reg(), Factory::null_value());
+ destination()->true_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movb(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testb(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ destination()->false_target()->Branch(below);
+ __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ answer.Unuse();
+ destination()->Split(below_equal);
+ } else {
+ // Uncommon case: typeof testing against a string literal that is
+ // never returned from the typeof operator.
+ answer.Unuse();
+ destination()->Goto(false);
+ }
+ return;
+ }
+
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN: {
+ Load(left);
+ Load(right);
+ Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+ frame_->Push(&answer); // push the result
+ return;
+ }
+ case Token::INSTANCEOF: {
+ Load(left);
+ Load(right);
+ InstanceofStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ answer.ToRegister();
+ __ testq(answer.reg(), answer.reg());
+ answer.Unuse();
+ destination()->Split(zero);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
+ Load(left);
+ Load(right);
+ Comparison(cc, strict, destination());
}
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ frame_->PushFunction();
}
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
+ Load(args->at(0));
+ Result key = frame_->Pop();
+ // Explicitly create a constant result.
+ Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ Result result = frame_->CallStub(&stub, &key, &count);
+ frame_->Push(&result);
}
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+ // It is a heap object - get map.
+ // Check if the object is a JS array or not.
+ __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
}
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ // ArgumentsAccessStub takes the parameter count as an input argument
+ // in register eax. Create a constant result for it.
+ Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ // Call the shared stub to get to the arguments.length.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+ Result result = frame_->CallStub(&stub, &count);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
+ // TODO(X64): Implement this function.
+ // Ignore arguments and return undefined, to signal failure.
+ frame_->Push(Factory::undefined_value());
}
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(value.reg(),
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (ShouldGenerateLog(args->at(0))) {
+ Load(args->at(1));
+ Load(args->at(2));
+ frame_->CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ Load(args->at(0));
+ Load(args->at(1));
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+ right.ToRegister();
+ left.ToRegister();
+ __ cmpq(right.reg(), left.reg());
+ right.Unuse();
+ left.Unuse();
+ destination()->Split(equal);
+}
+
+
+
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
@@ -418,25 +3421,2426 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
UNIMPLEMENTED();
}
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
+ Result value = frame_->Pop();
+ Result object = frame_->Pop();
+ value.ToRegister();
+ object.ToRegister();
+
+ // if (object->IsSmi()) return value.
+ __ testl(object.reg(), Immediate(kSmiTagMask));
+ leave.Branch(zero, &value);
+
+ // It is a heap object - get its map.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ // if (!object->IsJSValue()) return value.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+ leave.Branch(not_equal, &value);
+
+ // Store the value.
+ __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ Result duplicate_value = allocator_->Allocate();
+ ASSERT(duplicate_value.is_valid());
+ __ movq(duplicate_value.reg(), value.reg());
+ // The object register is also overwritten by the write barrier and
+ // possibly aliased in the frame.
+ frame_->Spill(object.reg());
+ __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+ scratch.reg());
+ object.Unuse();
+ scratch.Unuse();
+ duplicate_value.Unuse();
+
+ // Leave.
+ leave.Bind(&value);
+ frame_->Push(&value);
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ frame_->Dup();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ ASSERT(object.is_valid());
+ // if (object->IsSmi()) return object.
+ __ testl(object.reg(), Immediate(kSmiTagMask));
+ leave.Branch(zero);
+ // It is a heap object - get map.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ // if (!object->IsJSValue()) return object.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+ leave.Branch(not_equal);
+ __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+ object.Unuse();
+ frame_->SetElementAt(0, &temp);
+ leave.Bind();
+}
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator implementation of Expressions
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+ TypeofState typeof_state) {
+ // TODO(x64): No architecture specific code. Move to shared location.
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression, typeof_state);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target;
+ JumpTarget false_target;
+ ControlDestination dest(&true_target, &false_target, true);
+ LoadCondition(x, typeof_state, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The false target was just bound.
+ JumpTarget loaded;
+ frame_->Push(Factory::false_value());
+ // There may be dangling jumps to the true target.
+ if (true_target.is_linked()) {
+ loaded.Jump();
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ loaded.Bind();
+ }
+
+ } else if (dest.is_used()) {
+ // There is true, and possibly false, control flow (with true as
+ // the fall through).
+ JumpTarget loaded;
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ loaded.Bind();
+ }
+
+ } else {
+ // We have a valid value on top of the frame, but we still may
+ // have dangling jumps to the true and false targets from nested
+ // subexpressions (eg, the left subexpressions of the
+ // short-circuited boolean operators).
+ ASSERT(has_valid_frame());
+ if (true_target.is_linked() || false_target.is_linked()) {
+ JumpTarget loaded;
+ loaded.Jump(); // Don't lose the current TOS.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ }
+ }
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ }
+ loaded.Bind();
+ }
+ }
+
+ ASSERT(has_valid_frame());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ ControlDestination* dest,
+ bool force_control) {
+ ASSERT(!in_spilled_code());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, typeof_state, dest);
+ Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ !dest->is_used() &&
+ frame_->height() == original_height) {
+ dest->Goto(true);
+ }
+ }
+
+ if (force_control && !dest->is_used()) {
+ // Convert the TOS value into flow to the control destination.
+ // TODO(X64): Make control flow to control destinations work.
+ ToBoolean(dest);
+ }
+
+ ASSERT(!(force_control && !dest->is_used()));
+ ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+ Comment cmnt(masm_, "[ ToBoolean");
+
+ // The value to convert should be popped from the frame.
+ Result value = frame_->Pop();
+ value.ToRegister();
+ // Fast case checks.
+
+ // 'false' => false.
+ __ Cmp(value.reg(), Factory::false_value());
+ dest->false_target()->Branch(equal);
+
+ // 'true' => true.
+ __ Cmp(value.reg(), Factory::true_value());
+ dest->true_target()->Branch(equal);
+
+ // 'undefined' => false.
+ __ Cmp(value.reg(), Factory::undefined_value());
+ dest->false_target()->Branch(equal);
+
+ // Smi => false iff zero.
+ ASSERT(kSmiTag == 0);
+ __ testq(value.reg(), value.reg());
+ dest->false_target()->Branch(zero);
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ dest->true_target()->Branch(zero);
+
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ testq(temp.reg(), temp.reg());
+ temp.Unuse();
+ dest->Split(not_equal);
+}
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+ UNIMPLEMENTED();
+ // TODO(X64): Implement security policy for loads of smis.
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+ return false;
+}
+
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ cgen_->UnloadReference(this);
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ // References are loaded from both spilled and unspilled code. Set the
+ // state to unspilled to allow that (and explicitly spill after
+ // construction at the construction sites).
+ bool was_in_spilled_code = in_spilled_code_;
+ in_spilled_code_ = false;
+
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
+ Load(property->obj());
+ // We use a named reference if the key is a literal symbol, unless it is
+ // a string that can be legally parsed as an integer. This is because
+ // otherwise we will not get into the slow case code that handles [] on
+ // String objects.
+ Literal* literal = property->key()->AsLiteral();
+ uint32_t dummy;
+ if (literal != NULL &&
+ literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ // Anything else is a runtime error.
+ Load(e);
+ // frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+
+ in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ frame_->Nip(ref->size());
+}
+
+
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->ParameterAt(index);
+
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(rsi)); // do not overwrite context register
+ Register context = rsi;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = 0; i < chain_length; i++) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(rsp, 0);
+ }
+}
+
+
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+ Result tmp,
+ JumpTarget* slow) {
UNIMPLEMENTED();
+ return Operand(rsp, 0);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ JumpTarget slow;
+ JumpTarget done;
+ Result value;
+
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+ // If there was no control flow to slow, we can exit early.
+ if (!slow.is_linked()) {
+ frame_->Push(&value);
+ return;
+ }
+
+ done.Jump(&value);
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ // Only generate the fast case for locals that rewrite to slots.
+ // This rules out argument loads.
+ if (potential_slot != NULL) {
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ value = allocator_->Allocate();
+ ASSERT(value.is_valid());
+ __ movq(value.reg(),
+ ContextSlotOperandCheckExtensions(potential_slot,
+ value,
+ &slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ Cmp(value.reg(), Factory::the_hole_value());
+ done.Branch(not_equal, &value);
+ __ movq(value.reg(), Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ }
+ // There is always control flow to slow from
+ // ContextSlotOperandCheckExtensions so we have to jump around
+ // it.
+ done.Jump(&value);
+ }
+ }
+
+ slow.Bind();
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ if (typeof_state == INSIDE_TYPEOF) {
+ value =
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ }
+
+ done.Bind(&value);
+ frame_->Push(&value);
+
+ } else if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ //
+ // We currently spill the virtual frame because constants use the
+ // potentially unsafe direct-frame access of SlotOperand.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Load const");
+ JumpTarget exit;
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ Cmp(rcx, Factory::the_hole_value());
+ exit.Branch(not_equal);
+ __ movq(rcx, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
+ exit.Bind();
+ frame_->EmitPush(rcx);
+
+ } else if (slot->type() == Slot::PARAMETER) {
+ frame_->PushParameterAt(slot->index());
+
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
+
+ } else {
+ // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+ // here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because it will always be a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
+ frame_->Push(&temp);
+ }
}
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ // TODO(X64): Enable more types of slot.
+
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(slot->var()->name());
+
+ Result value;
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame_->Push(&value);
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ //
+ // We spill the frame in the code below because the direct-frame
+ // access of SlotOperand is potentially unsafe with an unspilled
+ // frame.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Init const");
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ Cmp(rcx, Factory::the_hole_value());
+ exit.Branch(not_equal);
+ }
+
+ // We must execute the store. Storing a variable must keep the (new)
+ // value on the stack. This is necessary for compiling assignment
+ // expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this code.
+ if (slot->type() == Slot::PARAMETER) {
+ frame_->StoreToParameterAt(slot->index());
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(slot->index());
+ } else {
+ // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because the slot is a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ frame_->Dup();
+ Result value = frame_->Pop();
+ value.ToRegister();
+ Result start = allocator_->Allocate();
+ ASSERT(start.is_valid());
+ __ movq(SlotOperand(slot, start.reg()), value.reg());
+ // RecordWrite may destroy the value registers.
+ //
+ // TODO(204): Avoid actually spilling when the value is not
+ // needed (probably the common case).
+ frame_->Spill(value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+ // The results start, value, and temp are unused by going out of
+ // scope.
+ }
+
+ exit.Bind();
+ }
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
UNIMPLEMENTED();
+ return Result(rax);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+ if (in_spilled_code()) {
+ frame_->EmitPush(GlobalObject());
+ } else {
+ Result temp = allocator_->Allocate();
+ __ movq(temp.reg(), GlobalObject());
+ frame_->Push(&temp);
+ }
+}
+
+
+void CodeGenerator::LoadGlobalReceiver() {
+ Result temp = allocator_->Allocate();
+ Register reg = temp.reg();
+ __ movq(reg, GlobalObject());
+ __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->Push(&temp);
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+ if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope_->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope_->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+ { Reference shadow_ref(this, scope_->arguments_shadow());
+ Reference arguments_ref(this, scope_->arguments());
+ ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+ // Here we rely on the convenient property that references to slot
+ // take up zero space in the frame (ie, it doesn't matter that the
+ // stored value is actually below the reference on the frame).
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result arguments = frame_->Pop();
+ if (arguments.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !arguments.handle()->IsTheHole();
+ } else {
+ __ Cmp(arguments.reg(), Factory::the_hole_value());
+ arguments.Unuse();
+ done.Branch(not_equal);
+ }
+ }
+ if (!skip_arguments) {
+ arguments_ref.SetValue(NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ shadow_ref.SetValue(NOT_CONST_INIT);
+ }
+ return frame_->Pop();
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+ Variable* variable = x->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // NOTE: This is somewhat nasty. We force the compiler to load
+ // the variable as if through '<global>.<variable>' to make sure we
+ // do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ // TODO(1241834): Fetch the position from the variable instead of using
+ // no position.
+ Property property(&global, &key, RelocInfo::kNoPosition);
+ Load(&property);
+ } else {
+ Load(x, INSIDE_TYPEOF);
+ }
+}
+
+
+class CompareStub: public CodeStub {
+ public:
+ CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Condition cc_;
+ bool strict_;
+
+ Major MajorKey() { return Compare; }
+
+ int MinorKey() {
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT(static_cast<int>(cc_) < (1 << 15));
+ return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
+ }
+
+ // Branch to the label if the given object isn't a symbol.
+ void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object);
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CompareStub (cc %d), (strict %s)\n",
+ static_cast<int>(cc_),
+ strict_ ? "true" : "false");
+ }
+#endif
+};
+
+
+void CodeGenerator::Comparison(Condition cc,
+ bool strict,
+ ControlDestination* dest) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
+
+ Result left_side;
+ Result right_side;
+ // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+ if (cc == greater || cc == less_equal) {
+ cc = ReverseCondition(cc);
+ left_side = frame_->Pop();
+ right_side = frame_->Pop();
+ } else {
+ right_side = frame_->Pop();
+ left_side = frame_->Pop();
+ }
+ ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+ // If either side is a constant smi, optimize the comparison.
+ bool left_side_constant_smi =
+ left_side.is_constant() && left_side.handle()->IsSmi();
+ bool right_side_constant_smi =
+ right_side.is_constant() && right_side.handle()->IsSmi();
+ bool left_side_constant_null =
+ left_side.is_constant() && left_side.handle()->IsNull();
+ bool right_side_constant_null =
+ right_side.is_constant() && right_side.handle()->IsNull();
+
+ if (left_side_constant_smi || right_side_constant_smi) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side.handle())->value();
+ int right_value = Smi::cast(*right_side.handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else { // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side.ToRegister();
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ __ testl(left_side.reg(), Immediate(kSmiTagMask));
+ is_smi.Branch(zero, taken);
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_val);
+ // Test smi equality and comparison by signed int comparison.
+ if (IsUnsafeSmi(right_side.handle())) {
+ right_side.ToRegister();
+ __ cmpq(left_side.reg(), right_side.reg());
+ } else {
+ __ Cmp(left_side.reg(), right_side.handle());
+ }
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
+ }
+ } else if (cc == equal &&
+ (left_side_constant_null || right_side_constant_null)) {
+ // To make null checks efficient, we check if either the left side or
+ // the right side is the constant 'null'.
+ // If so, we optimize the code by inlining a null check instead of
+ // calling the (very) general runtime routine for checking equality.
+ Result operand = left_side_constant_null ? right_side : left_side;
+ right_side.Unuse();
+ left_side.Unuse();
+ operand.ToRegister();
+ __ Cmp(operand.reg(), Factory::null_value());
+ if (strict) {
+ operand.Unuse();
+ dest->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ dest->true_target()->Branch(equal);
+ __ Cmp(operand.reg(), Factory::undefined_value());
+ dest->true_target()->Branch(equal);
+ __ testl(operand.reg(), Immediate(kSmiTagMask));
+ dest->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ dest->Split(not_zero);
+ }
+ } else { // Neither side is a constant Smi or null.
+ // If either side is a non-smi constant, skip the smi check.
+ bool known_non_smi =
+ (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+ (right_side.is_constant() && !right_side.handle()->IsSmi());
+ left_side.ToRegister();
+ right_side.ToRegister();
+
+ if (known_non_smi) {
+ // When non-smi, call out to the compare stub.
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Both zero and sign flag right.
+ answer.Unuse();
+ dest->Split(cc);
+ } else {
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Register right_reg = right_side.reg();
+
+ __ movq(kScratchRegister, left_side.reg());
+ __ or_(kScratchRegister, right_side.reg());
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ is_smi.Branch(zero, taken);
+ // When non-smi, call out to the compare stub.
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ if (cc == equal) {
+ __ testq(answer.reg(), answer.reg());
+ } else {
+ __ cmpq(answer.reg(), Immediate(0));
+ }
+ answer.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
+ __ cmpq(left_side.reg(), right_side.reg());
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
+ }
+ }
+}
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in src register. Returns operand as floating point number
+ // in XMM register
+ static void LoadFloatOperand(MacroAssembler* masm,
+ Register src,
+ XMMRegister dst);
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // floating point numbers in XMM registers.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2);
+
+ // Code pattern for loading floating point values onto the fp stack.
+ // Input values must be either smi or heap number objects (fp values).
+ // Requirements:
+ // Register version: operands in registers lhs and rhs.
+ // Stack version: operands on TOS+1 and TOS+2.
+ // Returns operands as floating point numbers on fp stack.
+ static void LoadFloatOperands(MacroAssembler* masm);
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+ // Code pattern for loading a floating point value and converting it
+ // to a 32 bit integer. Input value must be either a smi or a heap number
+ // object.
+ // Returns operands as 32-bit sign extended integers in a general purpose
+ // registers.
+ static void LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in rax, operand_2 in rdx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float);
+ // Allocate a heap number in new space with undefined value.
+ // Returns tagged pointer in result, or jumps to need_gc if new space is full.
+ static void AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch,
+ Register result);
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+ DeferredInlineBinaryOperation(Token::Value op,
+ Register dst,
+ Register left,
+ Register right,
+ OverwriteMode mode)
+ : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+ set_comment("[ DeferredInlineBinaryOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register left_;
+ Register right_;
+ OverwriteMode mode_;
+};
+
+
+void DeferredInlineBinaryOperation::Generate() {
+ __ push(left_);
+ __ push(right_);
+ GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+ __ CallStub(&stub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+ SmiAnalysis* type,
+ OverwriteMode overwrite_mode) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Comment cmnt_token(masm_, Token::String(op));
+
+ if (op == Token::COMMA) {
+ // Simply discard left value.
+ frame_->Nip(1);
+ return;
+ }
+
+ // Set the flags based on the operation, type and loop nesting level.
+ GenericBinaryFlags flags;
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ flags = (loop_nesting() > 0)
+ ? SMI_CODE_INLINED
+ : SMI_CODE_IN_STUB;
+ break;
+
+ default:
+ // By default only inline the Smi check code for likely smis if this
+ // operation is part of a loop.
+ flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+ ? SMI_CODE_INLINED
+ : SMI_CODE_IN_STUB;
+ break;
+ }
+
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+
+ if (op == Token::ADD) {
+ bool left_is_string = left.is_constant() && left.handle()->IsString();
+ bool right_is_string = right.is_constant() && right.handle()->IsString();
+ if (left_is_string || right_is_string) {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ Result answer;
+ if (left_is_string) {
+ if (right_is_string) {
+ // TODO(lrn): if both are constant strings
+ // -- do a compile time cons, if allocation during codegen is allowed.
+ answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ } else {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+ }
+ } else if (right_is_string) {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+ }
+ frame_->Push(&answer);
+ return;
+ }
+ // Neither operand is known to be a string.
+ }
+
+ bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+ bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
+
+ if (left_is_smi && right_is_smi) {
+ // Compute the constant result at compile time, and leave it on the frame.
+ int left_int = Smi::cast(*left.handle())->value();
+ int right_int = Smi::cast(*right.handle())->value();
+ if (FoldConstantSmis(op, left_int, right_int)) return;
+ }
+
+ if (left_is_non_smi || right_is_non_smi) {
+ // Set flag so that we go straight to the slow case, with no smi code.
+ generate_no_smi_code = true;
+ } else if (right_is_smi) {
+ ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
+ return;
+ } else if (left_is_smi) {
+ ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
+ return;
+ }
+
+ if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+ LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ // If we know the arguments aren't smis, use the binary operation stub
+ // that does not check for the fast smi case.
+ // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
+ if (generate_no_smi_code) {
+ flags = SMI_CODE_INLINED;
+ }
+ GenericBinaryOpStub stub(op, overwrite_mode, flags);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+ }
+}
+
+
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst. The receiver register is restored after the call.
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+ DeferredReferenceGetNamedValue(Register dst,
+ Register receiver,
+ Handle<String> name)
+ : dst_(dst), receiver_(receiver), name_(name) {
+ set_comment("[ DeferredReferenceGetNamedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Handle<String> name_;
+};
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+ __ push(receiver_);
+ __ Move(rcx, name_);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a test rax instruction to indicate
+ // that the inobject property case was inlined.
+ //
+ // Store the delta to the map check instruction here in the test
+ // instruction. Use masm_-> instead of the __ macro since the
+ // latter can't return a value.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testq(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(receiver_);
+}
+
+
+
+
+// The result of src + value is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAdd::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ __ subq(dst_, Immediate(value_));
+ __ push(dst_);
+ __ push(Immediate(value_));
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&igostub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiAddReversed(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ __ subq(dst_, Immediate(value_));
+ __ push(Immediate(value_));
+ __ push(dst_);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&igostub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The result of src - value is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative subtraction and call the
+// appropriate specialized stub for subtract. The result is left in
+// dst.
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiSub::Generate() {
+ // Undo the optimistic sub operation and call the shared stub.
+ __ addq(dst_, Immediate(value_));
+ __ push(dst_);
+ __ push(Immediate(value_));
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&igostub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ SmiAnalysis* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
+ // NOTE: This is an attempt to inline (a bit) more of the code for
+ // some possible smi operations (like + and -) when (at least) one
+ // of the operands is a constant smi.
+ // Consumes the argument "operand".
+
+ // TODO(199): Optimize some special cases of operations involving a
+ // smi literal (multiply by 2, shift by 0, etc.).
+ if (IsUnsafeSmi(value)) {
+ Result unsafe_operand(value);
+ if (reversed) {
+ LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ overwrite_mode);
+ } else {
+ LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ overwrite_mode);
+ }
+ ASSERT(!operand->is_valid());
+ return;
+ }
+
+ // Get the literal value.
+ Smi* smi_value = Smi::cast(*value);
+
+ switch (op) {
+ case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+
+ // Optimistically add. Call the specialized add stub if the
+ // result is not a smi or overflows.
+ DeferredCode* deferred = NULL;
+ if (reversed) {
+ deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ } else {
+ deferred = new DeferredInlineSmiAdd(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ }
+ __ movq(kScratchRegister, value, RelocInfo::NONE);
+ __ addl(operand->reg(), kScratchRegister);
+ deferred->Branch(overflow);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ deferred->BindExit();
+ frame_->Push(operand);
+ break;
+ }
+ // TODO(X64): Move other implementations from ia32 to here.
+ default: {
+ Result constant_operand(value);
+ if (reversed) {
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
+ }
+ break;
+ }
+ }
+ ASSERT(!operand->is_valid());
+}
+
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ // Special handling of div and mod because they use fixed registers.
+ if (op == Token::DIV || op == Token::MOD) {
+ // We need rax as the quotient register, rdx as the remainder
+ // register, neither left nor right in rax or rdx, and left copied
+ // to rax.
+ Result quotient;
+ Result remainder;
+ bool left_is_in_rax = false;
+ // Step 1: get rax for quotient.
+ if ((left->is_register() && left->reg().is(rax)) ||
+ (right->is_register() && right->reg().is(rax))) {
+ // One or both is in rax. Use a fresh non-rdx register for
+ // them.
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (fresh.reg().is(rdx)) {
+ remainder = fresh;
+ fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ }
+ if (left->is_register() && left->reg().is(rax)) {
+ quotient = *left;
+ *left = fresh;
+ left_is_in_rax = true;
+ }
+ if (right->is_register() && right->reg().is(rax)) {
+ quotient = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rax);
+ } else {
+ // Neither left nor right is in rax.
+ quotient = allocator_->Allocate(rax);
+ }
+ ASSERT(quotient.is_register() && quotient.reg().is(rax));
+ ASSERT(!(left->is_register() && left->reg().is(rax)));
+ ASSERT(!(right->is_register() && right->reg().is(rax)));
+
+ // Step 2: get rdx for remainder if necessary.
+ if (!remainder.is_valid()) {
+ if ((left->is_register() && left->reg().is(rdx)) ||
+ (right->is_register() && right->reg().is(rdx))) {
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (left->is_register() && left->reg().is(rdx)) {
+ remainder = *left;
+ *left = fresh;
+ }
+ if (right->is_register() && right->reg().is(rdx)) {
+ remainder = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rdx);
+ } else {
+ // Neither left nor right is in rdx.
+ remainder = allocator_->Allocate(rdx);
+ }
+ }
+ ASSERT(remainder.is_register() && remainder.reg().is(rdx));
+ ASSERT(!(left->is_register() && left->reg().is(rdx)));
+ ASSERT(!(right->is_register() && right->reg().is(rdx)));
+
+ left->ToRegister();
+ right->ToRegister();
+ frame_->Spill(rax);
+ frame_->Spill(rdx);
+
+ // Check that left and right are smi tagged.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ (op == Token::DIV) ? rax : rdx,
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ if (left->reg().is(right->reg())) {
+ __ testl(left->reg(), Immediate(kSmiTagMask));
+ } else {
+ // Use the quotient register as a scratch for the tag check.
+ if (!left_is_in_rax) __ movq(rax, left->reg());
+ left_is_in_rax = false; // About to destroy the value in rax.
+ __ or_(rax, right->reg());
+ ASSERT(kSmiTag == 0); // Adjust test if not the case.
+ __ testl(rax, Immediate(kSmiTagMask));
+ }
+ deferred->Branch(not_zero);
+
+ if (!left_is_in_rax) __ movq(rax, left->reg());
+ // Sign extend rax into rdx:rax.
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(right->reg(), right->reg());
+ deferred->Branch(zero);
+ // Divide rdx:rax by the right operand.
+ __ idiv(right->reg());
+
+ // Complete the operation.
+ if (op == Token::DIV) {
+ // Check for negative zero result. If result is zero, and divisor
+ // is negative, return a floating point negative zero. The
+ // virtual frame is unchanged in this block, so local control flow
+ // can use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ testq(left->reg(), left->reg());
+ __ j(not_zero, &non_zero_result);
+ __ testq(right->reg(), right->reg());
+ deferred->Branch(negative);
+ __ bind(&non_zero_result);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by
+ // idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmpq(rax, Immediate(0x40000000));
+ deferred->Branch(equal);
+ // Check that the remainder is zero.
+ __ testq(rdx, rdx);
+ deferred->Branch(not_zero);
+ // Tag the result and store it in the quotient register.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&quotient);
+ } else {
+ ASSERT(op == Token::MOD);
+ // Check for a negative zero result. If the result is zero, and
+ // the dividend is negative, return a floating point negative
+ // zero. The frame is unchanged in this block, so local control
+ // flow can use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ testq(rdx, rdx);
+ __ j(not_zero, &non_zero_result);
+ __ testq(left->reg(), left->reg());
+ deferred->Branch(negative);
+ __ bind(&non_zero_result);
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&remainder);
+ }
+ return;
+ }
+
+ // Special handling of shift operations because they use fixed
+ // registers.
+ if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+ // Move left out of rcx if necessary.
+ if (left->is_register() && left->reg().is(rcx)) {
+ *left = allocator_->Allocate();
+ ASSERT(left->is_valid());
+ __ movq(left->reg(), rcx);
+ }
+ right->ToRegister(rcx);
+ left->ToRegister();
+ ASSERT(left->is_register() && !left->reg().is(rcx));
+ ASSERT(right->is_register() && right->reg().is(rcx));
+
+ // We will modify right, it must be spilled.
+ frame_->Spill(rcx);
+
+ // Use a fresh answer register to avoid spilling the left operand.
+ Result answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+ // Check that both operands are smis using the answer register as a
+ // temporary.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ rcx,
+ overwrite_mode);
+ __ movq(answer.reg(), left->reg());
+ __ or_(answer.reg(), rcx);
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Untag both operands.
+ __ movq(answer.reg(), left->reg());
+ __ sar(answer.reg(), Immediate(kSmiTagSize));
+ __ sar(rcx, Immediate(kSmiTagSize));
+ // Perform the operation.
+ switch (op) {
+ case Token::SAR:
+ __ sar(answer.reg());
+ // No checks of result necessary
+ break;
+ case Token::SHR: {
+ Label result_ok;
+ __ shr(answer.reg());
+ // Check that the *unsigned* result fits in a smi. Neither of
+ // the two high-order bits can be set:
+ // * 0x80000000: high bit would be lost when smi tagging.
+ // * 0x40000000: this number would convert to negative when smi
+ // tagging.
+ // These two cases can only happen with shifts by 0 or 1 when
+ // handed a valid smi. If the answer cannot be represented by a
+ // smi, restore the left and right arguments, and jump to slow
+ // case. The low bit of the left argument may be lost, but only
+ // in a case where it is dropped anyway.
+ __ testl(answer.reg(), Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ ASSERT(kSmiTag == 0);
+ __ shl(rcx, Immediate(kSmiTagSize));
+ deferred->Jump();
+ __ bind(&result_ok);
+ break;
+ }
+ case Token::SHL: {
+ Label result_ok;
+ __ shl(answer.reg());
+ // Check that the *signed* result fits in a smi.
+ __ cmpq(answer.reg(), Immediate(0xc0000000));
+ __ j(positive, &result_ok);
+ ASSERT(kSmiTag == 0);
+ __ shl(rcx, Immediate(kSmiTagSize));
+ deferred->Jump();
+ __ bind(&result_ok);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ // Smi-tag the result in answer.
+ ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ __ lea(answer.reg(),
+ Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&answer);
+ return;
+ }
+
+ // Handle the other binary operations.
+ left->ToRegister();
+ right->ToRegister();
+ // A newly allocated register answer is used to hold the answer. The
+ // registers containing left and right are not modified so they don't
+ // need to be spilled in the fast case.
+ Result answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+
+ // Perform the smi tag check.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ if (left->reg().is(right->reg())) {
+ __ testl(left->reg(), Immediate(kSmiTagMask));
+ } else {
+ __ movq(answer.reg(), left->reg());
+ __ or_(answer.reg(), right->reg());
+ ASSERT(kSmiTag == 0); // Adjust test if not the case.
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ }
+ deferred->Branch(not_zero);
+ __ movq(answer.reg(), left->reg());
+ switch (op) {
+ case Token::ADD:
+ __ addl(answer.reg(), right->reg()); // Add optimistically.
+ deferred->Branch(overflow);
+ break;
+
+ case Token::SUB:
+ __ subl(answer.reg(), right->reg()); // Subtract optimistically.
+ deferred->Branch(overflow);
+ break;
+
+ case Token::MUL: {
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // Remove smi tag from the left operand (but keep sign).
+ // Left-hand operand has been copied into answer.
+ __ sar(answer.reg(), Immediate(kSmiTagSize));
+ // Do multiplication of smis, leaving result in answer.
+ __ imull(answer.reg(), right->reg());
+ // Go slow on overflows.
+ deferred->Branch(overflow);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case. The frame is unchanged
+ // in this block, so local control flow can use a Label rather
+ // than a JumpTarget.
+ Label non_zero_result;
+ __ testq(answer.reg(), answer.reg());
+ __ j(not_zero, &non_zero_result);
+ __ movq(answer.reg(), left->reg());
+ __ or_(answer.reg(), right->reg());
+ deferred->Branch(negative);
+ __ xor_(answer.reg(), answer.reg()); // Positive 0 is correct.
+ __ bind(&non_zero_result);
+ break;
+ }
+
+ case Token::BIT_OR:
+ __ or_(answer.reg(), right->reg());
+ break;
+
+ case Token::BIT_AND:
+ __ and_(answer.reg(), right->reg());
+ break;
+
+ case Token::BIT_XOR:
+ __ xor_(answer.reg(), right->reg());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&answer);
}
+
#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
+ }
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Load from Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->LoadFromSlot(slot, typeof_state);
+ break;
+ }
+
+ case NAMED: {
+ // TODO(1241834): Make sure that it is safe to ignore the
+ // distinction between expressions in a typeof and not in a
+ // typeof. If there is a chance that reference errors can be
+ // thrown below, we must distinguish between the two kinds of
+ // loads (typeof expression loads must not throw a reference
+ // error).
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ bool is_global = var != NULL;
+ ASSERT(!is_global || var->is_global());
+
+ // Do not inline the inobject property case for loads from the global
+ // object. Also do not inline for unoptimized code. This saves time
+ // in the code generator. Unoptimized code is toplevel code or code
+ // that is not in a loop.
+ if (is_global ||
+ cgen_->scope()->is_global_scope() ||
+ cgen_->loop_nesting() == 0) {
+ Comment cmnt(masm, "[ Load from named Property");
+ cgen_->frame()->Push(GetName());
+
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the
+ // inobject property case was inlined. Ensure that there is not
+ // a test rax instruction here.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ } else {
+ // Inline the inobject property case.
+ Comment cmnt(masm, "[ Inlined named property load");
+ Result receiver = cgen_->frame()->Pop();
+ receiver.ToRegister();
+
+ Result value = cgen_->allocator()->Allocate();
+ ASSERT(value.is_valid());
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(value.reg(),
+ receiver.reg(),
+ GetName());
+
+ // Check that the receiver is a heap object.
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ __ bind(deferred->patch_site());
+ // This is the map check instruction that will be patched (so we can't
+ // use the double underscore macro that may insert instructions).
+ // Initially use an invalid map to force a failure.
+ masm->Move(kScratchRegister, Factory::null_value());
+ masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ // This branch is always a forwards branch so it's always a fixed
+ // size which allows the assert below to succeed and patching to work.
+ deferred->Branch(not_equal);
+
+ // The delta from the patch label to the load offset must be
+ // statically known.
+ ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+ LoadIC::kOffsetToLoadInstruction);
+ // The initial (invalid) offset has to be large enough to force
+ // a 32-bit instruction encoding to allow patching with an
+ // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
+
+ __ IncrementCounter(&Counters::named_load_inline, 1);
+ deferred->BindExit();
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&value);
+ }
+ break;
+ }
+
+ case KEYED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof.
+ Comment cmnt(masm, "[ Load from keyed Property");
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ bool is_global = var != NULL;
+ ASSERT(!is_global || var->is_global());
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+
+ // TODO(x64): Implement inlined loads for keyed properties.
+ // Comment cmnt(masm, "[ Load from keyed Property");
+
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Reference::TakeValue(TypeofState typeof_state) {
+ // TODO(X64): This function is completely architecture independent. Move
+ // it somewhere shared.
+
+ // For non-constant frame-allocated slots, we invalidate the value in the
+ // slot. For all others, we fall back on GetValue.
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(!is_illegal());
+ if (type_ != SLOT) {
+ GetValue(typeof_state);
+ return;
+ }
+
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP ||
+ slot->type() == Slot::CONTEXT ||
+ slot->var()->mode() == Variable::CONST) {
+ GetValue(typeof_state);
+ return;
+ }
+
+ // Only non-constant, frame-allocated parameters and locals can reach
+ // here.
+ if (slot->type() == Slot::PARAMETER) {
+ cgen_->frame()->TakeParameterAt(slot->index());
+ } else {
+ ASSERT(slot->type() == Slot::LOCAL);
+ cgen_->frame()->TakeLocalAt(slot->index());
+ }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->StoreToSlot(slot, init_state);
+ break;
+ }
+
+ case NAMED: {
+ Comment cmnt(masm, "[ Store to named Property");
+ cgen_->frame()->Push(GetName());
+ Result answer = cgen_->frame()->CallStoreIC();
+ cgen_->frame()->Push(&answer);
+ break;
+ }
+
+ case KEYED: {
+ Comment cmnt(masm, "[ Store to keyed Property");
+
+ // TODO(x64): Implement inlined version of keyed stores.
+
+ Result answer = cgen_->frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ Cmp(rax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ __ j(above_equal, &not_string);
+ __ and_(rcx, Immediate(kStringSizeMask));
+ __ cmpq(rcx, Immediate(kShortStringTag));
+ __ j(not_equal, &true_result); // Empty string is always short.
+ __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ shr(rdx, Immediate(String::kShortLengthShift));
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ Cmp(rdx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ // TODO(x64): Don't use fp stack, use MMX registers?
+ __ fldz(); // Load zero onto fp stack
+ // Load heap-number double value onto fp stack
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ fucompp(); // Compare and pop both values.
+ __ movq(kScratchRegister, rax);
+ __ fnstsw_ax(); // Store fp status word in ax, no checking for exceptions.
+ __ testb(rax, Immediate(0x08)); // Test FP condition flag C3.
+ __ movq(rax, kScratchRegister);
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in rax.
+ __ bind(&true_result);
+ __ movq(rax, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ xor_(rax, rax);
+ __ ret(1 * kPointerSize);
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+ // TODO(X64): This method is identical to the ia32 version.
+ // Either find a reason to change it, or move it somewhere where it can be
+ // shared. (Notice: It assumes that a Smi can fit in an int).
+
+ Object* answer_object = Heap::undefined_value();
+ switch (op) {
+ case Token::ADD:
+ if (Smi::IsValid(left + right)) {
+ answer_object = Smi::FromInt(left + right);
+ }
+ break;
+ case Token::SUB:
+ if (Smi::IsValid(left - right)) {
+ answer_object = Smi::FromInt(left - right);
+ }
+ break;
+ case Token::MUL: {
+ double answer = static_cast<double>(left) * right;
+ if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+ // If the product is zero and the non-zero factor is negative,
+ // the spec requires us to return floating point negative zero.
+ if (answer != 0 || (left >= 0 && right >= 0)) {
+ answer_object = Smi::FromInt(static_cast<int>(answer));
+ }
+ }
+ }
+ break;
+ case Token::DIV:
+ case Token::MOD:
+ break;
+ case Token::BIT_OR:
+ answer_object = Smi::FromInt(left | right);
+ break;
+ case Token::BIT_AND:
+ answer_object = Smi::FromInt(left & right);
+ break;
+ case Token::BIT_XOR:
+ answer_object = Smi::FromInt(left ^ right);
+ break;
+
+ case Token::SHL: {
+ int shift_amount = right & 0x1F;
+ if (Smi::IsValid(left << shift_amount)) {
+ answer_object = Smi::FromInt(left << shift_amount);
+ }
+ break;
+ }
+ case Token::SHR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ unsigned_left >>= shift_amount;
+ if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+ answer_object = Smi::FromInt(unsigned_left);
+ }
+ break;
+ }
+ case Token::SAR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ if (left < 0) {
+ // Perform arithmetic shift of a negative number by
+ // complementing number, logical shifting, complementing again.
+ unsigned_left = ~unsigned_left;
+ unsigned_left >>= shift_amount;
+ unsigned_left = ~unsigned_left;
+ } else {
+ unsigned_left >>= shift_amount;
+ }
+ ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
+ answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (answer_object == Heap::undefined_value()) {
+ return false;
+ }
+ frame_->Push(Handle<Object>(answer_object));
+ return true;
+}
+
+
+
+
// End of CodeGenerator implementation.
-// -----------------------------------------------------------------------------
-// Implementation of stubs.
+void UnarySubStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label call_builtin, done;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ if (cc_ == equal) { // Both strict and non-strict.
+ Label slow; // Fallthrough label.
+ // Equality is almost reflexive (everything but NaN), so start by testing
+ // for "identity and not NaN".
+ {
+ Label not_identical;
+ __ cmpq(rax, rdx);
+ __ j(not_equal, &not_identical);
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+
+ Label return_equal;
+ Label heap_number;
+ // If it's not a heap number, then return equal.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ __ bind(&return_equal);
+ __ xor_(rax, rax);
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read double representation into rax.
+ __ movq(rbx, 0x7ff0000000000000, RelocInfo::NONE);
+ __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Test that exponent bits are all set.
+ __ or_(rbx, rax);
+ __ cmpq(rbx, rax);
+ __ j(not_equal, &return_equal);
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ shl(rax, Immediate(12));
+ // If all bits in the mantissa are zero the number is Infinity, and
+ // we return zero. Otherwise it is a NaN, and we return non-zero.
+ // So just return rax.
+ __ ret(0);
+
+ __ bind(&not_identical);
+ }
+
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ if (strict_) {
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ {
+ Label not_smis;
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ movq(rcx, Immediate(kSmiTagMask));
+ __ and_(rcx, rax);
+ __ testq(rcx, rdx);
+ __ j(not_zero, &not_smis);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ ASSERT_EQ(1, kSmiTagMask);
+ // rcx still holds rax & kSmiTag, which is either zero or one.
+ __ decq(rcx); // If rax is a smi, all 1s, else all 0s.
+ __ movq(rbx, rdx);
+ __ xor_(rbx, rax);
+ __ and_(rbx, rcx); // rbx holds either 0 or rax ^ rdx.
+ __ xor_(rbx, rax);
+ // if rax was smi, rbx is now rdx, else rax.
+
+ // Check if the non-smi operand is a heap number.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ }
+
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // If the first object is a JS object, we have done pointer comparison.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object);
+ // Return non-zero (rax is not zero)
+ Label return_not_equal;
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ }
+ __ bind(&slow);
+ }
+
+ // Push arguments below the return address to prepare jump to builtin.
+ __ pop(rcx);
+ __ push(rax);
+ __ push(rdx);
+ __ push(rcx);
+
+ // Inlined floating point compare.
+ // Call builtin if operands are not floating point or smi.
+ Label check_for_symbols;
+ // Push arguments on stack, for helper functions.
+ FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
+ FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
+ __ FCmp();
+
+ // Jump to builtin for NaN.
+ __ j(parity_even, &call_builtin);
+
+ // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
+ Label below_lbl, above_lbl;
+ // use rdx, rax to convert unsigned to signed comparison
+ __ j(below, &below_lbl);
+ __ j(above, &above_lbl);
+
+ __ xor_(rax, rax); // equal
+ __ ret(2 * kPointerSize);
+
+ __ bind(&below_lbl);
+ __ movq(rax, Immediate(-1));
+ __ ret(2 * kPointerSize);
+
+ __ bind(&above_lbl);
+ __ movq(rax, Immediate(1));
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+
+ // Fast negative check for symbol-to-symbol equality.
+ __ bind(&check_for_symbols);
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &call_builtin, rax);
+ BranchIfNonSymbol(masm, &call_builtin, rdx);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register rax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(2 * kPointerSize);
+ }
+
+ __ bind(&call_builtin);
+ // must swap argument order
+ __ pop(rcx);
+ __ pop(rdx);
+ __ pop(rax);
+ __ push(rdx);
+ __ push(rax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == less || cc_ == less_equal) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
+ ncr = LESS;
+ }
+ __ push(Immediate(Smi::FromInt(ncr)));
+ }
+
+ // Restore return address on the stack.
+ __ push(rcx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object) {
+ __ testl(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ movq(kScratchRegister, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ and_(kScratchRegister, Immediate(kIsSymbolMask | kIsNotStringMask));
+ __ cmpb(kScratchRegister, Immediate(kSymbolTag | kStringTag));
+ __ j(not_equal, label);
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Record the position for debugging purposes.
+ CodeForSourcePosition(position);
+
+ // Use the shared code stub to call the function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ Result answer = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore context and replace function on the stack with the
+ // result of the stub invocation.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(not_equal, &runtime);
+ // Value in rcx is Smi encoded.
+
+ // Patch the arguments.length and the parameters pointer.
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_4, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in rdx and the parameter count is in rax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register rax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpq(rdx, rax);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ // Shifting code depends on SmiEncoding being equivalent to left shift:
+ // we multiply by four to get pointer alignment.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ lea(rbx, Operand(rbp, rax, times_4, 0));
+ __ neg(rdx);
+ __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpq(rdx, rcx);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ // Shifting code depends on SmiEncoding being equivalent to left shift:
+ // we multiply by four to get pointer alignment.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ lea(rbx, Operand(rbx, rcx, times_4, 0));
+ __ neg(rdx);
+ __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(rbx); // Return address.
+ __ push(rdx);
+ __ push(rbx);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(equal, &adaptor);
+
+ // Nothing to do: The formal number of parameters has already been
+ // passed in register rax by calling function. Just return it.
+ __ ret(0);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame and return it.
+ __ bind(&adaptor);
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ret(0);
+}
-// Stub classes have public member named masm, not masm_.
-#define __ ACCESS_MASM(masm)
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain frame pointer, code pointer, state and
+ // Check that stack should contain next handler, frame pointer, state and
// return address in that order.
ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
StackHandlerConstants::kStateOffset);
@@ -445,13 +5849,11 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
ExternalReference handler_address(Top::k_handler_address);
__ movq(kScratchRegister, handler_address);
- __ movq(rdx, Operand(kScratchRegister, 0));
+ __ movq(rsp, Operand(kScratchRegister, 0));
// get next in chain
- __ movq(rcx, Operand(rdx, 0));
+ __ pop(rcx);
__ movq(Operand(kScratchRegister, 0), rcx);
- __ movq(rsp, rdx);
__ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove code pointer
__ pop(rdx); // remove state
// Before returning we restore the context from the frame pointer if not NULL.
@@ -462,12 +5864,10 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ j(equal, &skip);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
-
__ ret(0);
}
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_out_of_memory_exception,
@@ -534,7 +5934,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testq(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry);
Label continue_exception;
@@ -616,6 +6016,34 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
}
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
@@ -781,6 +6209,591 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
+// -----------------------------------------------------------------------------
+// Implementation of stubs.
+
+// Stub classes have public member named masm, not masm_.
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(rax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch,
+ Register result) {
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ movq(scratch, allocation_top); // scratch: address of allocation top.
+ __ movq(result, Operand(scratch, 0));
+ __ addq(result, Immediate(HeapNumber::kSize)); // New top.
+ __ movq(kScratchRegister, allocation_limit);
+ __ cmpq(result, Operand(kScratchRegister, 0));
+ __ j(above, need_gc);
+
+ __ movq(Operand(scratch, 0), result); // store new top
+ __ addq(result, Immediate(kHeapObjectTag - HeapNumber::kSize));
+ __ movq(kScratchRegister,
+ Factory::heap_number_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ // Tag old top and use as result.
+}
+
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register src,
+ XMMRegister dst) {
+ Label load_smi, done;
+
+ __ testl(src, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ sar(src, Immediate(kSmiTagSize));
+ __ cvtlsi2sd(dst, src);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2) {
+ __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+ LoadFloatOperand(masm, kScratchRegister, dst1);
+ __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ LoadFloatOperand(masm, kScratchRegister, dst2);
+}
+
+
+void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst) {
+ // TODO(X64): Convert number operands to int32 values.
+ // Don't convert a Smi to a double first.
+ UNIMPLEMENTED();
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1);
+ __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2);
+ __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+
+ __ bind(&done);
+}
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
+ __ testl(lhs, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_lhs);
+ __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
+ __ bind(&done_load_lhs);
+
+ __ testl(rhs, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_rhs);
+ __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_lhs);
+ ASSERT(kSmiTagSize == 1);
+ ASSERT(kSmiTag == 0);
+ __ lea(kScratchRegister, Operand(lhs, lhs, times_1, 0));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+ __ jmp(&done_load_lhs);
+
+ __ bind(&load_smi_rhs);
+ __ movq(kScratchRegister, rhs);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+
+ __ bind(&done);
+}
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other); // argument in rdx is OK
+ __ movq(kScratchRegister,
+ Factory::heap_number_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ j(not_equal, non_float); // argument in rdx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in rax is OK
+ __ movq(kScratchRegister,
+ Factory::heap_number_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, non_float); // argument in rax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+}
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // Perform fast-case smi code for the operation (rax <op> rbx) and
+ // leave result in register rax.
+
+ // Prepare the smi check of both operands by or'ing them together
+ // before checking against the smi mask.
+ __ movq(rcx, rbx);
+ __ or_(rcx, rax);
+
+ switch (op_) {
+ case Token::ADD:
+ __ addl(rax, rbx); // add optimistically
+ __ j(overflow, slow);
+ __ movsxlq(rax, rax); // Sign extend eax into rax.
+ break;
+
+ case Token::SUB:
+ __ subl(rax, rbx); // subtract optimistically
+ __ j(overflow, slow);
+ __ movsxlq(rax, rax); // Sign extend eax into rax.
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
+ break;
+
+ default:
+ // Fall-through to smi check.
+ break;
+ }
+
+ // Perform the actual smi check.
+ ASSERT(kSmiTag == 0); // adjust zero check if not the case
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, slow);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ // Do nothing here.
+ break;
+
+ case Token::MUL:
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // adjust code below if not the case
+ // Remove tag from one of the operands (but keep sign).
+ __ sar(rax, Immediate(kSmiTagSize));
+ // Do multiplication.
+ __ imull(rax, rbx); // multiplication of smis; result in eax
+ // Go slow on overflows.
+ __ j(overflow, slow);
+ // Check for negative zero result.
+ __ movsxlq(rax, rax); // Sign extend eax into rax.
+ __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
+ break;
+
+ case Token::DIV:
+ // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
+ __ idiv(rbx);
+ // Check that the remainder is zero.
+ __ testq(rdx, rdx);
+ __ j(not_zero, slow);
+ // Check for the corner case of dividing the most negative smi
+ // by -1. We cannot use the overflow flag, since it is not set
+ // by idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ // TODO(X64): TODO(Smi): Smi implementation dependent constant.
+ // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
+ __ cmpq(rax, Immediate(0x40000000));
+ __ j(equal, slow);
+ // Check for negative zero result.
+ __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
+ // Tag the result and store it in register rax.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ break;
+
+ case Token::MOD:
+ // Divide rdx:rax by rbx.
+ __ idiv(rbx);
+ // Check for negative zero result.
+ __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
+ // Move remainder to register rax.
+ __ movq(rax, rdx);
+ break;
+
+ case Token::BIT_OR:
+ __ or_(rax, rbx);
+ break;
+
+ case Token::BIT_AND:
+ __ and_(rax, rbx);
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT_EQ(0, kSmiTag);
+ __ xor_(rax, rbx);
+ break;
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Move the second operand into register ecx.
+ __ movq(rcx, rbx);
+ // Remove tags from operands (but keep sign).
+ __ sar(rax, Immediate(kSmiTagSize));
+ __ sar(rcx, Immediate(kSmiTagSize));
+ // Perform the operation.
+ switch (op_) {
+ case Token::SAR:
+ __ sar(rax);
+ // No checks of result necessary
+ break;
+ case Token::SHR:
+ __ shrl(rax); // rcx is implicit shift register
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ testq(rax, Immediate(0xc0000000));
+ __ j(not_zero, slow);
+ break;
+ case Token::SHL:
+ __ shll(rax);
+ // TODO(Smi): Significant change if Smi changes.
+ // Check that the *signed* result fits in a smi.
+ // It does, if the 30th and 31st bits are equal, since then
+ // shifting the SmiTag in at the bottom doesn't change the sign.
+ ASSERT(kSmiTagSize == 1);
+ __ cmpl(rax, Immediate(0xc0000000));
+ __ j(sign, slow);
+ __ movsxlq(rax, rax); // Extend new sign of eax into rax.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Tag the result and store it in register eax.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (flags_ == SMI_CODE_IN_STUB) {
+ // The fast case smi code wasn't inlined in the stub caller
+ // code. Generate it here to speed up common operations.
+ Label slow;
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
+ GenerateSmiCode(masm, &slow);
+ __ ret(2 * kPointerSize); // remove both operands
+
+ // Too bad. The fast case smi code didn't succeed.
+ __ bind(&slow);
+ }
+
+ // Setup registers.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
+
+ // Floating point case.
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ // rax: y
+ // rdx: x
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+ // Fast-case: Both operands are numbers.
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ movq(rax, rdx);
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm,
+ &call_runtime,
+ rcx,
+ rax);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // xmm4 and xmm5 are volatile XMM registers.
+ FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
+ __ ret(2 * kPointerSize);
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+ // TODO(X64): Don't convert a Smi to float and then back to int32
+ // afterwards.
+ FloatingPointHelper::LoadFloatOperands(masm);
+
+ Label skip_allocation, non_smi_result, operand_conversion_failure;
+
+ // Reserve space for converted numbers.
+ __ subq(rsp, Immediate(2 * kPointerSize));
+
+ bool use_sse3 = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+ if (use_sse3) {
+ // Truncate the operands to 32-bit integers and check for
+ // exceptions in doing so.
+ CpuFeatures::Scope scope(CpuFeatures::SSE3);
+ __ fisttp_s(Operand(rsp, 0 * kPointerSize));
+ __ fisttp_s(Operand(rsp, 1 * kPointerSize));
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(1));
+ __ j(not_zero, &operand_conversion_failure);
+ } else {
+ // Check if right operand is int32.
+ __ fist_s(Operand(rsp, 0 * kPointerSize));
+ __ fild_s(Operand(rsp, 0 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf(); // TODO(X64): Not available.
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+
+ // Check if left operand is int32.
+ __ fist_s(Operand(rsp, 1 * kPointerSize));
+ __ fild_s(Operand(rsp, 1 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf(); // TODO(X64): Not available. Test bits in ax directly
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+ }
+
+ // Get int32 operands and perform bitop.
+ __ pop(rcx);
+ __ pop(rax);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(rax, rcx); break;
+ case Token::BIT_AND: __ and_(rax, rcx); break;
+ case Token::BIT_XOR: __ xor_(rax, rcx); break;
+ case Token::SAR: __ sar(rax); break;
+ case Token::SHL: __ shl(rax); break;
+ case Token::SHR: __ shr(rax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ testl(rax, Immediate(0xc0000000));
+ __ j(not_zero, &non_smi_result);
+ } else {
+ // Check if result fits in a smi.
+ __ cmpl(rax, Immediate(0xc0000000));
+ __ j(negative, &non_smi_result);
+ }
+ // Tag smi result and return.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ __ ret(2 * kPointerSize);
+
+ // All ops except SHR return a signed int32 that we load in a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+ rcx, rax);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ fild_s(Operand(rsp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ }
+
+ // Clear the FPU exception flag and reset the stack before calling
+ // the runtime system.
+ __ bind(&operand_conversion_failure);
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ if (use_sse3) {
+ // If we've used the SSE3 instructions for truncating the
+ // floating point values to integers and it failed, we have a
+ // pending #IA exception. Clear it.
+ __ fnclex();
+ } else {
+ // The non-SSE3 variant does early bailout if the right
+ // operand isn't a 32-bit integer, so we may have a single
+ // value on the FPU stack we need to get rid of.
+ __ ffree(0);
+ }
+
+ // SHR should return uint32 - go to runtime for non-smi/negative result.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_result);
+ }
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result.
+ __ bind(&call_runtime);
+ // Disable builtin-calls until JS builtins can compile and run.
+ __ Abort("Disabled until builtins compile and run.");
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 19ad8a3667..af82de8ff1 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -273,6 +273,14 @@ class CodeGenState BASE_EMBEDDED {
};
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+ NO_ARGUMENTS_ALLOCATION,
+ EAGER_ARGUMENTS_ALLOCATION,
+ LAZY_ARGUMENTS_ALLOCATION
+};
// -------------------------------------------------------------------------
@@ -383,6 +391,12 @@ class CodeGenerator: public AstVisitor {
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
+ // Returns the arguments allocation mode.
+ ArgumentsAllocationMode ArgumentsMode() const;
+
+ // Store the arguments object and allocate it if necessary.
+ Result StoreArgumentsObject(bool initial);
+
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index cb97ff60af..fe224ad998 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -32,40 +32,81 @@
namespace v8 {
namespace internal {
-StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
- StackFrame::State* b) {
- // TODO(X64): UNIMPLEMENTED
- return NONE;
-}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- UNIMPLEMENTED();
- return 0;
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
+ }
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
-StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
- UNIMPLEMENTED();
- return NONE;
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute the stack pointer.
+ Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+ // Fill in the state.
+ state->fp = fp;
+ state->sp = sp;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ // Determine frame type.
+ if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+ return EXIT_DEBUG;
+ } else {
+ return EXIT;
+ }
}
-byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED();
- return NULL;
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
}
void ExitFrame::Iterate(ObjectVisitor* a) const {
- UNIMPLEMENTED();
+ // Exit frames on X64 do not contain any pointers. The arguments
+ // are traversed as part of the expression stack of the calling
+ // frame.
}
byte* InternalFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED();
- return NULL;
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
}
byte* JavaScriptFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED();
- return NULL;
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+ // The arguments for cooked frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when the stack is cooked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
}
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 31d8a2d9e2..d4ab2c62e7 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -59,12 +59,12 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerFPOffset = -6 * kPointerSize;
- static const int kFunctionArgOffset = 1 * kPointerSize;
- static const int kReceiverArgOffset = 2 * kPointerSize;
- static const int kArgcOffset = 3 * kPointerSize;
- static const int kArgvOffset = 4 * kPointerSize;
+ static const int kFunctionArgOffset = +3 * kPointerSize;
+ static const int kReceiverArgOffset = +4 * kPointerSize;
+ static const int kArgcOffset = +5 * kPointerSize;
+ static const int kArgvOffset = +6 * kPointerSize;
};
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 858f3a28d7..abaffb338d 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -35,6 +35,12 @@
namespace v8 {
namespace internal {
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
void KeyedLoadIC::ClearInlinedVersion(Address address) {
UNIMPLEMENTED();
@@ -48,17 +54,37 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
UNIMPLEMENTED();
}
+
void KeyedLoadIC::Generate(MacroAssembler* masm,
ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+ // Move the return address below the arguments.
+ __ pop(rbx);
+ __ push(rcx);
+ __ push(rax);
+ __ push(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 2);
}
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC0AB)); // Debugging aid.
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC1AB)); // Debugging aid.
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
@@ -118,15 +144,32 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
+
+ // Move the return address below the arguments.
+ __ pop(rcx);
+ __ push(Operand(rsp, 1 * kPointerSize));
+ __ push(Operand(rsp, 1 * kPointerSize));
+ __ push(rax);
+ __ push(rcx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(f, 3);
}
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC2AB)); // Debugging aid.
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC3AB)); // Debugging aid.
}
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
@@ -137,36 +180,121 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
return NULL;
}
+
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ ExternalReference const& f) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ // Get the name of the function to call from the stack.
+ // 2 ~ receiver, return address.
+ __ movq(rbx, Operand(rsp, (argc + 2) * kPointerSize));
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push the receiver and the name of the function.
+ __ push(rdx);
+ __ push(rbx);
+
+ // Call the entry.
+ CEntryStub stub;
+ __ movq(rax, Immediate(2));
+ __ movq(rbx, f);
+ __ CallStub(&stub);
+
+ // Move result to rdi and exit the internal frame.
+ __ movq(rdi, rax);
+ __ LeaveInternalFrame();
+
+ // Check if the receiver is a global object of some sort.
+ Label invoke, global;
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &invoke);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+ __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_GLOBAL_OBJECT_TYPE)));
+ __ j(equal, &global);
+ __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_BUILTINS_OBJECT_TYPE)));
+ __ j(not_equal, &invoke);
+
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ bind(&invoke);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+}
+
+void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+void CallIC::GenerateNormal(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+
+const int LoadIC::kOffsetToLoadInstruction = 20;
+
+
void LoadIC::ClearInlinedVersion(Address address) {
UNIMPLEMENTED();
}
+
void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ // Move the return address below the arguments.
+ __ pop(rbx);
+ __ push(rax);
+ __ push(rcx);
+ __ push(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 2);
}
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC4AB)); // Debugging aid.
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC5AB)); // Debugging aid.
}
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC6AB)); // Debugging aid.
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC7AB)); // Debugging aid.
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC8AB)); // Debugging aid.
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC9AB)); // Debugging aid.
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
@@ -175,15 +303,35 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
}
void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ // Move the return address below the arguments.
+ __ pop(rbx);
+ __ push(Operand(rsp, 0));
+ __ push(rcx);
+ __ push(rax);
+ __ push(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 3);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xCAAB)); // Debugging aid.
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xCBAB)); // Debugging aid.
}
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/jump-target-x64.cc b/deps/v8/src/x64/jump-target-x64.cc
index 209aa2d307..b804044ec7 100644
--- a/deps/v8/src/x64/jump-target-x64.cc
+++ b/deps/v8/src/x64/jump-target-x64.cc
@@ -25,3 +25,344 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+ ASSERT(cgen()->has_valid_frame());
+ // Live non-frame registers are not allowed at unconditional jumps
+ // because we have no way of invalidating the corresponding results
+ // which are still live in the C++ code.
+ ASSERT(cgen()->HasValidEntryRegisters());
+
+ if (is_bound()) {
+ // Backward jump. There is an expected frame to merge to.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else if (entry_frame_ != NULL) {
+ // Forward jump with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and jump to the block.
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else {
+ // Forward jump. Remember the current frame and emit a jump to
+ // its merge code.
+ AddReachingFrame(cgen()->frame());
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ __ jmp(&merge_labels_.last());
+ }
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint b) {
+ ASSERT(cgen() != NULL);
+ ASSERT(cgen()->has_valid_frame());
+
+ if (is_bound()) {
+ ASSERT(direction_ == BIDIRECTIONAL);
+ // Backward branch. We have an expected frame to merge to on the
+ // backward edge.
+
+ // Swap the current frame for a copy (we do the swapping to get
+ // the off-frame registers off the fall through) to use for the
+ // branch.
+ VirtualFrame* fall_through_frame = cgen()->frame();
+ VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
+ RegisterFile non_frame_registers;
+ cgen()->SetFrame(branch_frame, &non_frame_registers);
+
+ // Check if we can avoid merge code.
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ if (cgen()->frame()->Equals(entry_frame_)) {
+ // Branch right in to the block.
+ cgen()->DeleteFrame();
+ __ j(cc, &entry_label_);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ return;
+ }
+
+ // Check if we can reuse existing merge code.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL &&
+ cgen()->frame()->Equals(reaching_frames_[i])) {
+ // Branch to the merge code.
+ cgen()->DeleteFrame();
+ __ j(cc, &merge_labels_[i]);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ return;
+ }
+ }
+
+ // To emit the merge code here, we negate the condition and branch
+ // around the merge code on the fall through path.
+ Label original_fall_through;
+ __ j(NegateCondition(cc), &original_fall_through);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ __ bind(&original_fall_through);
+
+ } else if (entry_frame_ != NULL) {
+ // Forward branch with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and branch to the block.
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ // Explicitly use the macro assembler instead of __ as forward
+ // branches are expected to be a fixed size (no inserted
+ // coverage-checking instructions please). This is used in
+ // Reference::GetValue.
+ cgen()->masm()->j(cc, &entry_label_);
+
+ } else {
+ // Forward branch. A copy of the current frame is remembered and
+ // a branch to the merge code is emitted. Explicitly use the
+ // macro assembler instead of __ as forward branches are expected
+ // to be a fixed size (no inserted coverage-checking instructions
+ // please). This is used in Reference::GetValue.
+ AddReachingFrame(new VirtualFrame(cgen()->frame()));
+ cgen()->masm()->j(cc, &merge_labels_.last());
+ }
+}
+
+
+void JumpTarget::Call() {
+ // Call is used to push the address of the catch block on the stack as
+ // a return address when compiling try/catch and try/finally. We
+ // fully spill the frame before making the call. The expected frame
+ // at the label (which should be the only one) is the spilled current
+ // frame plus an in-memory return address. The "fall-through" frame
+ // at the return site is the spilled current frame.
+ ASSERT(cgen() != NULL);
+ ASSERT(cgen()->has_valid_frame());
+ // There are no non-frame references across the call.
+ ASSERT(cgen()->HasValidEntryRegisters());
+ ASSERT(!is_linked());
+
+ cgen()->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+ target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
+ AddReachingFrame(target_frame);
+ __ call(&merge_labels_.last());
+}
+
+
+void JumpTarget::DoBind() {
+ ASSERT(cgen() != NULL);
+ ASSERT(!is_bound());
+
+ // Live non-frame registers are not allowed at the start of a basic
+ // block.
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+ // Fast case: the jump target was manually configured with an entry
+ // frame to use.
+ if (entry_frame_ != NULL) {
+ // Assert no reaching frames to deal with.
+ ASSERT(reaching_frames_.is_empty());
+ ASSERT(!cgen()->has_valid_frame());
+
+ RegisterFile empty;
+ if (direction_ == BIDIRECTIONAL) {
+ // Copy the entry frame so the original can be used for a
+ // possible backward jump.
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ } else {
+ // Take ownership of the entry frame.
+ cgen()->SetFrame(entry_frame_, &empty);
+ entry_frame_ = NULL;
+ }
+ __ bind(&entry_label_);
+ return;
+ }
+
+ if (!is_linked()) {
+ ASSERT(cgen()->has_valid_frame());
+ if (direction_ == FORWARD_ONLY) {
+ // Fast case: no forward jumps and no possible backward jumps.
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ VirtualFrame* frame = cgen()->frame();
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ addq(rsp, Immediate(difference * kPointerSize));
+ }
+ } else {
+ ASSERT(direction_ == BIDIRECTIONAL);
+ // Fast case: no forward jumps, possible backward ones. Remove
+ // constants and copies above the watermark on the fall-through
+ // frame and use it as the entry frame.
+ cgen()->frame()->MakeMergable();
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ }
+ __ bind(&entry_label_);
+ return;
+ }
+
+ if (direction_ == FORWARD_ONLY &&
+ !cgen()->has_valid_frame() &&
+ reaching_frames_.length() == 1) {
+ // Fast case: no fall-through, a single forward jump, and no
+ // possible backward jumps. Pick up the only reaching frame, take
+ // ownership of it, and use it for the block about to be emitted.
+ VirtualFrame* frame = reaching_frames_[0];
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[0] = NULL;
+ __ bind(&merge_labels_[0]);
+
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ addq(rsp, Immediate(difference * kPointerSize));
+ }
+
+ __ bind(&entry_label_);
+ return;
+ }
+
+ // If there is a current frame, record it as the fall-through. It
+ // is owned by the reaching frames for now.
+ bool had_fall_through = false;
+ if (cgen()->has_valid_frame()) {
+ had_fall_through = true;
+ AddReachingFrame(cgen()->frame()); // Return value ignored.
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ }
+
+ // Compute the frame to use for entry to the block.
+ ComputeEntryFrame();
+
+ // Some moves required to merge to an expected frame require purely
+ // frame state changes, and do not require any code generation.
+ // Perform those first to increase the possibility of finding equal
+ // frames below.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL) {
+ reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+ }
+ }
+
+ if (is_linked()) {
+ // There were forward jumps. Handle merging the reaching frames
+ // to the entry frame.
+
+ // Loop over the (non-null) reaching frames and process any that
+ // need merge code. Iterate backwards through the list to handle
+ // the fall-through frame first. Set frames that will be
+ // processed after 'i' to NULL if we want to avoid processing
+ // them.
+ for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
+ VirtualFrame* frame = reaching_frames_[i];
+
+ if (frame != NULL) {
+ // Does the frame (probably) need merge code?
+ if (!frame->Equals(entry_frame_)) {
+ // We could have a valid frame as the fall through to the
+ // binding site or as the fall through from a previous merge
+ // code block. Jump around the code we are about to
+ // generate.
+ if (cgen()->has_valid_frame()) {
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ }
+ // Pick up the frame for this block. Assume ownership if
+ // there cannot be backward jumps.
+ RegisterFile empty;
+ if (direction_ == BIDIRECTIONAL) {
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
+ } else {
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[i] = NULL;
+ }
+ __ bind(&merge_labels_[i]);
+
+ // Loop over the remaining (non-null) reaching frames,
+ // looking for any that can share merge code with this one.
+ for (int j = 0; j < i; j++) {
+ VirtualFrame* other = reaching_frames_[j];
+ if (other != NULL && other->Equals(cgen()->frame())) {
+ // Set the reaching frame element to null to avoid
+ // processing it later, and then bind its entry label.
+ reaching_frames_[j] = NULL;
+ __ bind(&merge_labels_[j]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen()->frame()->MergeTo(entry_frame_);
+ } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
+ // If this is the fall through frame, and it didn't need
+ // merge code, we need to pick up the frame so we can jump
+ // around subsequent merge blocks if necessary.
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[i] = NULL;
+ }
+ }
+ }
+
+ // The code generator may not have a current frame if there was no
+ // fall through and none of the reaching frames needed merging.
+ // In that case, clone the entry frame as the current frame.
+ if (!cgen()->has_valid_frame()) {
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ }
+
+ // There may be unprocessed reaching frames that did not need
+ // merge code. They will have unbound merge labels. Bind their
+ // merge labels to be the same as the entry label and deallocate
+ // them.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (!merge_labels_[i].is_bound()) {
+ reaching_frames_[i] = NULL;
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // There are non-NULL reaching frames with bound labels for each
+ // merge block, but only on backward targets.
+ } else {
+ // There were no forward jumps. There must be a current frame and
+ // this must be a bidirectional target.
+ ASSERT(reaching_frames_.length() == 1);
+ ASSERT(reaching_frames_[0] != NULL);
+ ASSERT(direction_ == BIDIRECTIONAL);
+
+ // Use a copy of the reaching frame so the original can be saved
+ // for possible reuse as a backward merge block.
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
+ __ bind(&merge_labels_[0]);
+ cgen()->frame()->MergeTo(entry_frame_);
+ }
+
+ __ bind(&entry_label_);
+}
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index c47bfccf13..f6a5ffecc7 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -45,6 +45,14 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
+// TODO(x64): For now, the write barrier is disabled on x64 and we
+// therefore generate no code. This should be fixed when the write
+// barrier is enabled.
+void MacroAssembler::RecordWrite(Register object, int offset,
+ Register value, Register scratch) {
+}
+
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -59,6 +67,18 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
}
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op,
+ Label* then_label) {
+ Label ok;
+ testq(result, result);
+ j(not_zero, &ok);
+ testq(op, op);
+ j(sign, then_label);
+ bind(&ok);
+}
+
+
void MacroAssembler::ConstructAndTestJSFunction() {
const int initial_buffer_size = 4 * KB;
char* buffer = new char[initial_buffer_size];
@@ -194,6 +214,44 @@ void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
}
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+
+ movq(target, code, RelocInfo::EXTERNAL_REFERENCE); // Is external reference?
+ if (!resolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
+ Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+ Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
+ unresolved_.Add(entry);
+ }
+ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+ bool* resolved) {
+ // Move the builtin function into the temporary function slot by
+ // reading it from the builtins object. NOTE: We should be able to
+ // reduce this to two instructions by putting the function table in
+ // the global object instead of the "builtins" object and by using a
+ // real register for the function.
+ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
+ int builtins_offset =
+ JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+ movq(rdi, FieldOperand(rdx, builtins_offset));
+
+
+ return Builtins::GetCode(id, resolved);
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(x));
@@ -217,6 +275,52 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
+bool MacroAssembler::IsUnsafeSmi(Smi* value) {
+ return false;
+}
+
+void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
+ UNIMPLEMENTED();
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> source) {
+ if (source->IsSmi()) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(dst, source);
+ } else {
+ movq(dst, source, RelocInfo::NONE);
+ }
+ } else {
+ movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+ Move(kScratchRegister, source);
+ movq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
+ Move(kScratchRegister, source);
+ cmpq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+ Move(kScratchRegister, source);
+ cmpq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Push(Handle<Object> source) {
+ Move(kScratchRegister, source);
+ push(kScratchRegister);
+}
+
+
void MacroAssembler::Jump(ExternalReference ext) {
movq(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -229,6 +333,22 @@ void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
}
+void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ WriteRecordedPositions();
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ movq(kScratchRegister, code_object, rmode);
+#ifdef DEBUG
+ Label target;
+ bind(&target);
+#endif
+ jmp(kScratchRegister);
+#ifdef DEBUG
+ ASSERT_EQ(kTargetAddrToReturnAddrDist,
+ SizeOfCodeGeneratedSince(&target) + kPointerSize);
+#endif
+}
+
+
void MacroAssembler::Call(ExternalReference ext) {
movq(kScratchRegister, ext);
call(kScratchRegister);
@@ -241,6 +361,22 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
}
+void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ WriteRecordedPositions();
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ movq(kScratchRegister, code_object, rmode);
+#ifdef DEBUG
+ Label target;
+ bind(&target);
+#endif
+ call(kScratchRegister);
+#ifdef DEBUG
+ ASSERT_EQ(kTargetAddrToReturnAddrDist,
+ SizeOfCodeGeneratedSince(&target) + kPointerSize);
+#endif
+}
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@@ -284,6 +420,16 @@ void MacroAssembler::Ret() {
}
+void MacroAssembler::FCmp() {
+ fcompp();
+ push(rax);
+ fnstsw_ax();
+ // TODO(X64): Check that sahf is safe to use, using CPUProbe.
+ sahf();
+ pop(rax);
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -422,6 +568,36 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
#endif // ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ // Calls are not allowed in some stubs.
+ ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+
+ // Rely on the assertion to check that the number of provided
+ // arguments match the expected number of arguments. Fake a
+ // parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ InvokeCode(Handle<Code>(code), expected, expected,
+ RelocInfo::CODE_TARGET, flag);
+
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+ // The target address for the jump is stored as an immediate at offset
+ // kInvokeCodeAddressOffset.
+ if (!resolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+ Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+ Unresolved entry =
+ { pc_offset() - kTargetAddrToReturnAddrDist, flags, name };
+ unresolved_.Add(entry);
+ }
+}
+
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
@@ -488,8 +664,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-
-
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -514,12 +688,11 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
Label done;
Register dummy = rax;
InvokePrologue(expected, actual, code, dummy, &done, flag);
- movq(kScratchRegister, code, rmode);
if (flag == CALL_FUNCTION) {
- call(kScratchRegister);
+ Call(code, rmode);
} else {
ASSERT(flag == JUMP_FUNCTION);
- jmp(kScratchRegister);
+ Jump(code, rmode);
}
bind(&done);
}
@@ -531,9 +704,10 @@ void MacroAssembler::InvokeFunction(Register function,
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movl(rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movsxlq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- // Advances rdx to the end of the Code object headers, to the start of
+ // Advances rdx to the end of the Code object header, to the start of
// the executable code.
lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
@@ -599,7 +773,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, rdi, kTimesPointerSize, offset));
+ lea(r15, Operand(rbp, rdi, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 51e998c318..3ae78ba63a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -158,11 +158,31 @@ class MacroAssembler: public Assembler {
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // Handle support
+ bool IsUnsafeSmi(Smi* value);
+ bool IsUnsafeSmi(Handle<Object> value) {
+ return IsUnsafeSmi(Smi::cast(*value));
+ }
+
+ void LoadUnsafeSmi(Register dst, Smi* source);
+ void LoadUnsafeSmi(Register dst, Handle<Object> source) {
+ LoadUnsafeSmi(dst, Smi::cast(*source));
+ }
+
+ void Move(Register dst, Handle<Object> source);
+ void Move(const Operand& dst, Handle<Object> source);
+ void Cmp(Register dst, Handle<Object> source);
+ void Cmp(const Operand& dst, Handle<Object> source);
+ void Push(Handle<Object> source);
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
diff --git a/deps/v8/src/x64/register-allocator-x64.cc b/deps/v8/src/x64/register-allocator-x64.cc
index 3aba60fb2d..deb2318564 100644
--- a/deps/v8/src/x64/register-allocator-x64.cc
+++ b/deps/v8/src/x64/register-allocator-x64.cc
@@ -39,19 +39,11 @@ namespace internal {
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
- // TODO(X64): Handle constant results.
- /*
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
- if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
- } else {
- CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
- Immediate(handle()));
- }
+ CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
// This result becomes a copy of the fresh one.
*this = fresh;
- */
}
ASSERT(is_register());
}
@@ -66,15 +58,7 @@ void Result::ToRegister(Register target) {
CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
} else {
ASSERT(is_constant());
- /*
- TODO(X64): Handle constant results.
- if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
- } else {
- CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
- Immediate(handle()));
- }
- */
+ CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
}
*this = fresh;
} else if (is_register() && reg().is(target)) {
@@ -87,4 +71,14 @@ void Result::ToRegister(Register target) {
}
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+ // This function is not used in 64-bit code.
+ UNREACHABLE();
+ return Result();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 209aa2d307..fdfa67fcc8 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -25,3 +25,130 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM((&masm_))
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* a,
+ JSObject* b,
+ JSFunction* c,
+ StubCompiler::CheckType d,
+ Code::Flags flags) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* CallStubCompiler::CompileCallField(Object* a,
+ JSObject* b,
+ int c,
+ String* d,
+ Code::Flags flags) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
+ JSObject* b,
+ AccessorInfo* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
+ JSObject* b,
+ Object* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* a,
+ JSObject* b,
+ int c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
+ AccessorInfo* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* a,
+ int b,
+ Map* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+// TODO(1241006): Avoid having lazy compile stubs specialized by the
+// number of arguments. It is not needed anymore.
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+
+ __ push(rdi); // function is also the parameter to the runtime call
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ pop(rdi);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rcx);
+
+ return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index e6975fa434..888fdc20c8 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -65,7 +65,7 @@ void VirtualFrame::Enter() {
#ifdef DEBUG
// Verify that rdi contains a JS function. The following code
// relies on rax being available for use.
- __ testq(rdi, Immediate(kSmiTagMask));
+ __ testl(rdi, Immediate(kSmiTagMask));
__ Check(not_zero,
"VirtualFrame::Enter - rdi is not a function (smi check).");
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
@@ -85,7 +85,7 @@ void VirtualFrame::Enter() {
// Store the function in the frame. The frame owns the register
// reference now (ie, it can keep it in rdi or spill it later).
Push(rdi);
- // SyncElementAt(element_count() - 1);
+ SyncElementAt(element_count() - 1);
cgen()->allocator()->Unuse(rdi);
}
@@ -99,7 +99,7 @@ void VirtualFrame::Exit() {
// Avoid using the leave instruction here, because it is too
// short. We need the return sequence to be a least the size of a
// call instruction to support patching the exit code in the
- // debugger. See VisitReturnStatement for the full return sequence.
+ // debugger. See GenerateReturnSequence for the full return sequence.
// TODO(X64): A patched call will be very long now. Make sure we
// have enough room.
__ movq(rsp, rbp);
@@ -115,6 +115,48 @@ void VirtualFrame::Exit() {
}
+void VirtualFrame::AllocateStackSlots() {
+ int count = local_count();
+ if (count > 0) {
+ Comment cmnt(masm(), "[ Allocate space for locals");
+ // The locals are initialized to a constant (the undefined value), but
+ // we sync them with the actual frame to allocate space for spilling
+ // them later. First sync everything above the stack pointer so we can
+ // use pushes to allocate and initialize the locals.
+ SyncRange(stack_pointer_ + 1, element_count() - 1);
+ Handle<Object> undefined = Factory::undefined_value();
+ FrameElement initial_value =
+ FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+ __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ elements_.Add(initial_value);
+ stack_pointer_++;
+ __ push(kScratchRegister);
+ }
+ }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+ ASSERT(elements_[context_index()].is_memory());
+ __ movq(Operand(rbp, fp_relative(context_index())), rsi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+ ASSERT(elements_[context_index()].is_memory());
+ __ movq(rsi, Operand(rbp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ lea(temp.reg(), ParameterAt(-1));
+ Push(&temp);
+}
+
+
void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
@@ -155,39 +197,853 @@ void VirtualFrame::EmitPush(Immediate immediate) {
}
-void VirtualFrame::Drop(int a) {
- UNIMPLEMENTED();
+void VirtualFrame::EmitPush(Handle<Object> value) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ Push(value);
+}
+
+
+void VirtualFrame::Drop(int count) {
+ ASSERT(height() >= count);
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+ // Emit code to lower the stack pointer if necessary.
+ if (num_virtual_elements < count) {
+ int num_dropped = count - num_virtual_elements;
+ stack_pointer_ -= num_dropped;
+ __ addq(rsp, Immediate(num_dropped * kPointerSize));
+ }
+
+ // Discard elements from the virtual frame and free any registers.
+ for (int i = 0; i < count; i++) {
+ FrameElement dropped = elements_.RemoveLast();
+ if (dropped.is_register()) {
+ Unuse(dropped.reg());
+ }
+ }
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+ FrameElement original = elements_[index];
+
+ // Is this element the backing store of any copies?
+ int new_backing_index = kIllegalIndex;
+ if (original.is_copied()) {
+ // Verify it is copied, and find first copy.
+ for (int i = index + 1; i < element_count(); i++) {
+ if (elements_[i].is_copy() && elements_[i].index() == index) {
+ new_backing_index = i;
+ break;
+ }
+ }
+ }
+
+ if (new_backing_index == kIllegalIndex) {
+ // No copies found, return kIllegalIndex.
+ if (original.is_register()) {
+ Unuse(original.reg());
+ }
+ elements_[index] = FrameElement::InvalidElement();
+ return kIllegalIndex;
+ }
+
+ // This is the backing store of copies.
+ Register backing_reg;
+ if (original.is_memory()) {
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ Use(fresh.reg(), new_backing_index);
+ backing_reg = fresh.reg();
+ __ movq(backing_reg, Operand(rbp, fp_relative(index)));
+ } else {
+ // The original was in a register.
+ backing_reg = original.reg();
+ set_register_location(backing_reg, new_backing_index);
+ }
+ // Invalidate the element at index.
+ elements_[index] = FrameElement::InvalidElement();
+ // Set the new backing element.
+ if (elements_[new_backing_index].is_synced()) {
+ elements_[new_backing_index] =
+ FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+ } else {
+ elements_[new_backing_index] =
+ FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+ }
+ // Update the other copies.
+ for (int i = new_backing_index + 1; i < element_count(); i++) {
+ if (elements_[i].is_copy() && elements_[i].index() == index) {
+ elements_[i].set_index(new_backing_index);
+ elements_[new_backing_index].set_copied();
+ }
+ }
+ return new_backing_index;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index <= element_count());
+ FrameElement original = elements_[index];
+ int new_backing_store_index = InvalidateFrameSlotAt(index);
+ if (new_backing_store_index != kIllegalIndex) {
+ elements_.Add(CopyElementAt(new_backing_store_index));
+ return;
+ }
+
+ switch (original.type()) {
+ case FrameElement::MEMORY: {
+ // Emit code to load the original element's data into a register.
+ // Push that register as a FrameElement on top of the frame.
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ FrameElement new_element =
+ FrameElement::RegisterElement(fresh.reg(),
+ FrameElement::NOT_SYNCED);
+ Use(fresh.reg(), element_count());
+ elements_.Add(new_element);
+ __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
+ break;
+ }
+ case FrameElement::REGISTER:
+ Use(original.reg(), element_count());
+ // Fall through.
+ case FrameElement::CONSTANT:
+ case FrameElement::COPY:
+ original.clear_sync();
+ elements_.Add(original);
+ break;
+ case FrameElement::INVALID:
+ UNREACHABLE();
+ break;
+ }
}
-int VirtualFrame::InvalidateFrameSlotAt(int a) {
- UNIMPLEMENTED();
- return -1;
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+ // Store the value on top of the frame to the virtual frame slot at
+ // a given index. The value on top of the frame is left in place.
+ // This is a duplicating operation, so it can create copies.
+ ASSERT(index >= 0);
+ ASSERT(index < element_count());
+
+ int top_index = element_count() - 1;
+ FrameElement top = elements_[top_index];
+ FrameElement original = elements_[index];
+ if (top.is_copy() && top.index() == index) return;
+ ASSERT(top.is_valid());
+
+ InvalidateFrameSlotAt(index);
+
+ // InvalidateFrameSlotAt can potentially change any frame element, due
+ // to spilling registers to allocate temporaries in order to preserve
+ // the copy-on-write semantics of aliased elements. Reload top from
+ // the frame.
+ top = elements_[top_index];
+
+ if (top.is_copy()) {
+ // There are two cases based on the relative positions of the
+ // stored-to slot and the backing slot of the top element.
+ int backing_index = top.index();
+ ASSERT(backing_index != index);
+ if (backing_index < index) {
+ // 1. The top element is a copy of a slot below the stored-to
+ // slot. The stored-to slot becomes an unsynced copy of that
+ // same backing slot.
+ elements_[index] = CopyElementAt(backing_index);
+ } else {
+ // 2. The top element is a copy of a slot above the stored-to
+ // slot. The stored-to slot becomes the new (unsynced) backing
+ // slot and both the top element and the element at the former
+ // backing slot become copies of it. The sync state of the top
+ // and former backing elements is preserved.
+ FrameElement backing_element = elements_[backing_index];
+ ASSERT(backing_element.is_memory() || backing_element.is_register());
+ if (backing_element.is_memory()) {
+ // Because sets of copies are canonicalized to be backed by
+ // their lowest frame element, and because memory frame
+ // elements are backed by the corresponding stack address, we
+ // have to move the actual value down in the stack.
+ //
+ // TODO(209): considering allocating the stored-to slot to the
+ // temp register. Alternatively, allow copies to appear in
+ // any order in the frame and lazily move the value down to
+ // the slot.
+ __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
+ __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+ } else {
+ set_register_location(backing_element.reg(), index);
+ if (backing_element.is_synced()) {
+ // If the element is a register, we will not actually move
+ // anything on the stack but only update the virtual frame
+ // element.
+ backing_element.clear_sync();
+ }
+ }
+ elements_[index] = backing_element;
+
+ // The old backing element becomes a copy of the new backing
+ // element.
+ FrameElement new_element = CopyElementAt(index);
+ elements_[backing_index] = new_element;
+ if (backing_element.is_synced()) {
+ elements_[backing_index].set_sync();
+ }
+
+ // All the copies of the old backing element (including the top
+ // element) become copies of the new backing element.
+ for (int i = backing_index + 1; i < element_count(); i++) {
+ if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
+ elements_[i].set_index(index);
+ }
+ }
+ }
+ return;
+ }
+
+ // Move the top element to the stored-to slot and replace it (the
+ // top element) with a copy.
+ elements_[index] = top;
+ if (top.is_memory()) {
+ // TODO(209): consider allocating the stored-to slot to the temp
+ // register. Alternatively, allow copies to appear in any order
+ // in the frame and lazily move the value down to the slot.
+ FrameElement new_top = CopyElementAt(index);
+ new_top.set_sync();
+ elements_[top_index] = new_top;
+
+ // The sync state of the former top element is correct (synced).
+ // Emit code to move the value down in the frame.
+ __ movq(kScratchRegister, Operand(rsp, 0));
+ __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+ } else if (top.is_register()) {
+ set_register_location(top.reg(), index);
+ // The stored-to slot has the (unsynced) register reference and
+ // the top element becomes a copy. The sync state of the top is
+ // preserved.
+ FrameElement new_top = CopyElementAt(index);
+ if (top.is_synced()) {
+ new_top.set_sync();
+ elements_[index].clear_sync();
+ }
+ elements_[top_index] = new_top;
+ } else {
+ // The stored-to slot holds the same value as the top but
+ // unsynced. (We do not have copies of constants yet.)
+ ASSERT(top.is_constant());
+ elements_[index].clear_sync();
+ }
+}
+
+
+void VirtualFrame::MakeMergable() {
+ for (int i = 0; i < element_count(); i++) {
+ FrameElement element = elements_[i];
+
+ if (element.is_constant() || element.is_copy()) {
+ if (element.is_synced()) {
+ // Just spill.
+ elements_[i] = FrameElement::MemoryElement();
+ } else {
+ // Allocate to a register.
+ FrameElement backing_element; // Invalid if not a copy.
+ if (element.is_copy()) {
+ backing_element = elements_[element.index()];
+ }
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
+ elements_[i] =
+ FrameElement::RegisterElement(fresh.reg(),
+ FrameElement::NOT_SYNCED);
+ Use(fresh.reg(), i);
+
+ // Emit a move.
+ if (element.is_constant()) {
+ __ Move(fresh.reg(), element.handle());
+ } else {
+ ASSERT(element.is_copy());
+ // Copies are only backed by register or memory locations.
+ if (backing_element.is_register()) {
+ // The backing store may have been spilled by allocating,
+ // but that's OK. If it was, the value is right where we
+ // want it.
+ if (!fresh.reg().is(backing_element.reg())) {
+ __ movq(fresh.reg(), backing_element.reg());
+ }
+ } else {
+ ASSERT(backing_element.is_memory());
+ __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
+ }
+ }
+ }
+ // No need to set the copied flag --- there are no copies.
+ } else {
+ // Clear the copy flag of non-constant, non-copy elements.
+ // They cannot be copied because copies are not allowed.
+ // The copy flag is not relied on before the end of this loop,
+ // including when registers are spilled.
+ elements_[i].clear_copied();
+ }
+ }
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+ Comment cmnt(masm(), "[ Merge frame");
+ // We should always be merging the code generator's current frame to an
+ // expected frame.
+ ASSERT(cgen()->frame() == this);
+
+ // Adjust the stack pointer upward (toward the top of the virtual
+ // frame) if necessary.
+ if (stack_pointer_ < expected->stack_pointer_) {
+ int difference = expected->stack_pointer_ - stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ subq(rsp, Immediate(difference * kPointerSize));
+ }
+
+ MergeMoveRegistersToMemory(expected);
+ MergeMoveRegistersToRegisters(expected);
+ MergeMoveMemoryToRegisters(expected);
+
+ // Adjust the stack pointer downward if necessary.
+ if (stack_pointer_ > expected->stack_pointer_) {
+ int difference = stack_pointer_ - expected->stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ addq(rsp, Immediate(difference * kPointerSize));
+ }
+
+ // At this point, the frames should be identical.
+ ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ // Move registers, constants, and copies to memory. Perform moves
+ // from the top downward in the frame in order to leave the backing
+ // stores of copies in registers.
+ for (int i = element_count() - 1; i >= 0; i--) {
+ FrameElement target = expected->elements_[i];
+ if (target.is_register()) continue; // Handle registers later.
+ if (target.is_memory()) {
+ FrameElement source = elements_[i];
+ switch (source.type()) {
+ case FrameElement::INVALID:
+ // Not a legal merge move.
+ UNREACHABLE();
+ break;
+
+ case FrameElement::MEMORY:
+ // Already in place.
+ break;
+
+ case FrameElement::REGISTER:
+ Unuse(source.reg());
+ if (!source.is_synced()) {
+ __ movq(Operand(rbp, fp_relative(i)), source.reg());
+ }
+ break;
+
+ case FrameElement::CONSTANT:
+ if (!source.is_synced()) {
+ __ Move(Operand(rbp, fp_relative(i)), source.handle());
+ }
+ break;
+
+ case FrameElement::COPY:
+ if (!source.is_synced()) {
+ int backing_index = source.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ __ movq(kScratchRegister,
+ Operand(rbp, fp_relative(backing_index)));
+ __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
+ } else {
+ ASSERT(backing_element.is_register());
+ __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
+ }
+ }
+ break;
+ }
+ }
+ elements_[i] = target;
+ }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+ // We have already done X-to-memory moves.
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ // Move the right value into register i if it is currently in a register.
+ int index = expected->register_location(i);
+ int use_index = register_location(i);
+ // Skip if register i is unused in the target or else if source is
+ // not a register (this is not a register-to-register move).
+ if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+ Register target = RegisterAllocator::ToRegister(i);
+ Register source = elements_[index].reg();
+ if (index != use_index) {
+ if (use_index == kIllegalIndex) { // Target is currently unused.
+ // Copy contents of source from source to target.
+ // Set frame element register to target.
+ Use(target, index);
+ Unuse(source);
+ __ movq(target, source);
+ } else {
+ // Exchange contents of registers source and target.
+ // Nothing except the register backing use_index has changed.
+ elements_[use_index].set_reg(source);
+ set_register_location(target, index);
+ set_register_location(source, use_index);
+ __ xchg(source, target);
+ }
+ }
+
+ if (!elements_[index].is_synced() &&
+ expected->elements_[index].is_synced()) {
+ __ movq(Operand(rbp, fp_relative(index)), target);
+ }
+ elements_[index] = expected->elements_[index];
+ }
}
-void VirtualFrame::MergeTo(VirtualFrame* a) {
- UNIMPLEMENTED();
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+ // Move memory, constants, and copies to registers. This is the
+ // final step and since it is not done from the bottom up, but in
+ // register code order, we have special code to ensure that the backing
+ // elements of copies are in their correct locations when we
+ // encounter the copies.
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int index = expected->register_location(i);
+ if (index != kIllegalIndex) {
+ FrameElement source = elements_[index];
+ FrameElement target = expected->elements_[index];
+ Register target_reg = RegisterAllocator::ToRegister(i);
+ ASSERT(target.reg().is(target_reg));
+ switch (source.type()) {
+ case FrameElement::INVALID: // Fall through.
+ UNREACHABLE();
+ break;
+ case FrameElement::REGISTER:
+ ASSERT(source.Equals(target));
+ // Go to next iteration. Skips Use(target_reg) and syncing
+ // below. It is safe to skip syncing because a target
+ // register frame element would only be synced if all source
+ // elements were.
+ continue;
+ break;
+ case FrameElement::MEMORY:
+ ASSERT(index <= stack_pointer_);
+ __ movq(target_reg, Operand(rbp, fp_relative(index)));
+ break;
+
+ case FrameElement::CONSTANT:
+ __ Move(target_reg, source.handle());
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = source.index();
+ FrameElement backing = elements_[backing_index];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ ASSERT(backing_index <= stack_pointer_);
+ // Code optimization if backing store should also move
+ // to a register: move backing store to its register first.
+ if (expected->elements_[backing_index].is_register()) {
+ FrameElement new_backing = expected->elements_[backing_index];
+ Register new_backing_reg = new_backing.reg();
+ ASSERT(!is_used(new_backing_reg));
+ elements_[backing_index] = new_backing;
+ Use(new_backing_reg, backing_index);
+ __ movq(new_backing_reg,
+ Operand(rbp, fp_relative(backing_index)));
+ __ movq(target_reg, new_backing_reg);
+ } else {
+ __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
+ }
+ } else {
+ __ movq(target_reg, backing.reg());
+ }
+ }
+ }
+ // Ensure the proper sync state.
+ if (target.is_synced() && !source.is_synced()) {
+ __ movq(Operand(rbp, fp_relative(index)), target_reg);
+ }
+ Use(target_reg, index);
+ elements_[index] = target;
+ }
+ }
}
+
Result VirtualFrame::Pop() {
- UNIMPLEMENTED();
- return Result(NULL);
+ FrameElement element = elements_.RemoveLast();
+ int index = element_count();
+ ASSERT(element.is_valid());
+
+ bool pop_needed = (stack_pointer_ == index);
+ if (pop_needed) {
+ stack_pointer_--;
+ if (element.is_memory()) {
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ pop(temp.reg());
+ return temp;
+ }
+
+ __ addq(rsp, Immediate(kPointerSize));
+ }
+ ASSERT(!element.is_memory());
+
+ // The top element is a register, constant, or a copy. Unuse
+ // registers and follow copies to their backing store.
+ if (element.is_register()) {
+ Unuse(element.reg());
+ } else if (element.is_copy()) {
+ ASSERT(element.index() < index);
+ index = element.index();
+ element = elements_[index];
+ }
+ ASSERT(!element.is_copy());
+
+ // The element is memory, a register, or a constant.
+ if (element.is_memory()) {
+ // Memory elements could only be the backing store of a copy.
+ // Allocate the original to a register.
+ ASSERT(index <= stack_pointer_);
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ Use(temp.reg(), index);
+ FrameElement new_element =
+ FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+ // Preserve the copy flag on the element.
+ if (element.is_copied()) new_element.set_copied();
+ elements_[index] = new_element;
+ __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
+ return Result(temp.reg());
+ } else if (element.is_register()) {
+ return Result(element.reg());
+ } else {
+ ASSERT(element.is_constant());
+ return Result(element.handle());
+ }
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallStub(stub);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+ PrepareForCall(0, 0);
+ arg->ToRegister(rax);
+ arg->Unuse();
+ return RawCallStub(stub);
}
-Result VirtualFrame::RawCallStub(CodeStub* a) {
- UNIMPLEMENTED();
- return Result(NULL);
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+ PrepareForCall(0, 0);
+
+ if (arg0->is_register() && arg0->reg().is(rax)) {
+ if (arg1->is_register() && arg1->reg().is(rdx)) {
+ // Wrong registers.
+ __ xchg(rax, rdx);
+ } else {
+ // Register rdx is free for arg0, which frees rax for arg1.
+ arg0->ToRegister(rdx);
+ arg1->ToRegister(rax);
+ }
+ } else {
+ // Register rax is free for arg1, which guarantees rdx is free for
+ // arg0.
+ arg1->ToRegister(rax);
+ arg0->ToRegister(rdx);
+ }
+
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallStub(stub);
}
-void VirtualFrame::SyncElementBelowStackPointer(int a) {
- UNIMPLEMENTED();
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+ // Emit code to write elements below the stack pointer to their
+ // (already allocated) stack address.
+ ASSERT(index <= stack_pointer_);
+ FrameElement element = elements_[index];
+ ASSERT(!element.is_synced());
+ switch (element.type()) {
+ case FrameElement::INVALID:
+ break;
+
+ case FrameElement::MEMORY:
+ // This function should not be called with synced elements.
+ // (memory elements are always synced).
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ movq(Operand(rbp, fp_relative(index)), element.reg());
+ break;
+
+ case FrameElement::CONSTANT:
+ __ Move(Operand(rbp, fp_relative(index)), element.handle());
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
+ __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+ } else {
+ ASSERT(backing_element.is_register());
+ __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
+ }
+ break;
+ }
+ }
+ elements_[index].set_sync();
}
-void VirtualFrame::SyncElementByPushing(int a) {
- UNIMPLEMENTED();
+
+void VirtualFrame::SyncElementByPushing(int index) {
+ // Sync an element of the frame that is just above the stack pointer
+ // by pushing it.
+ ASSERT(index == stack_pointer_ + 1);
+ stack_pointer_++;
+ FrameElement element = elements_[index];
+
+ switch (element.type()) {
+ case FrameElement::INVALID:
+ __ push(Immediate(Smi::FromInt(0)));
+ break;
+
+ case FrameElement::MEMORY:
+ // No memory elements exist above the stack pointer.
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ push(element.reg());
+ break;
+
+ case FrameElement::CONSTANT:
+ __ Move(kScratchRegister, element.handle());
+ __ push(kScratchRegister);
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing = elements_[backing_index];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ __ push(Operand(rbp, fp_relative(backing_index)));
+ } else {
+ __ push(backing.reg());
+ }
+ break;
+ }
+ }
+ elements_[index].set_sync();
}
-void VirtualFrame::SyncRange(int a, int b) {
- UNIMPLEMENTED();
+
+// Clear the dirty bits for the range of elements in
+// [min(stack_pointer_ + 1,begin), end].
+void VirtualFrame::SyncRange(int begin, int end) {
+ ASSERT(begin >= 0);
+ ASSERT(end < element_count());
+ // Sync elements below the range if they have not been materialized
+ // on the stack.
+ int start = Min(begin, stack_pointer_ + 1);
+
+ // If positive we have to adjust the stack pointer.
+ int delta = end - stack_pointer_;
+ if (delta > 0) {
+ stack_pointer_ = end;
+ __ subq(rsp, Immediate(delta * kPointerSize));
+ }
+
+ for (int i = start; i <= end; i++) {
+ if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+ }
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ InvokeBuiltin(id, flag);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+//------------------------------------------------------------------------------
+// Virtual frame stub and IC calling functions.
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(f, arg_count);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(id, arg_count);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
+ // Name and receiver are on the top of the frame. The IC expects
+ // name in rcx and receiver on the stack. It does not drop the
+ // receiver.
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Result name = Pop();
+ PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+ name.ToRegister(rcx);
+ name.Unuse();
+ return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
+ // Key and receiver are on top of the frame. The IC expects them on
+ // the stack. It does not drop them.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
+ return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedStoreIC() {
+ // Value, key, and receiver are on the top of the frame. The IC
+ // expects value in rax and key and receiver on the stack. It does
+ // not drop the key and receiver.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ Result value = Pop();
+ PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
+ value.ToRegister(rax);
+ value.Unuse();
+ return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
+ int arg_count,
+ int loop_nesting) {
+ // Arguments, receiver, and function name are on top of the frame.
+ // The IC expects them on the stack. It does not drop the function
+ // name slot (but it does drop the rest).
+ InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+ // Spill args, receiver, and function. The call will drop args and
+ // receiver.
+ PrepareForCall(arg_count + 2, arg_count + 1);
+ return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallConstructor(int arg_count) {
+ // Arguments, receiver, and function are on top of the frame. The
+ // IC expects arg count in rax, function in rdi, and the arguments
+ // and receiver on the stack.
+ Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ // Duplicate the function before preparing the frame.
+ PushElementAt(arg_count + 1);
+ Result function = Pop();
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ function.ToRegister(rdi);
+
+ // Constructors are called with the number of arguments in register
+ // eax for now. Another option would be to have separate construct
+ // call trampolines per different arguments counts encountered.
+ Result num_args = cgen()->allocator()->Allocate(rax);
+ ASSERT(num_args.is_valid());
+ __ movq(num_args.reg(), Immediate(arg_count));
+
+ function.Unuse();
+ num_args.Unuse();
+ return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
+}
+
+
+Result VirtualFrame::CallStoreIC() {
+ // Name, value, and receiver are on top of the frame. The IC
+ // expects name in rcx, value in rax, and receiver on the stack. It
+ // does not drop the receiver.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Result name = Pop();
+ Result value = Pop();
+ PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+
+ if (value.is_register() && value.reg().is(rcx)) {
+ if (name.is_register() && name.reg().is(rax)) {
+ // Wrong registers.
+ __ xchg(rax, rcx);
+ } else {
+ // Register rax is free for value, which frees rcx for name.
+ value.ToRegister(rax);
+ name.ToRegister(rcx);
+ }
+ } else {
+ // Register rcx is free for name, which guarantees rax is free for
+ // value.
+ name.ToRegister(rcx);
+ value.ToRegister(rax);
+ }
+
+ name.Unuse();
+ value.Unuse();
+ return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ // Grow the expression stack by handler size less one (the return
+ // address is already pushed by a call instruction).
+ Adjust(kHandlerSize - 1);
+ __ PushTryHandler(IN_JAVASCRIPT, type);
}
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 2d3bf3022d..577a18bd47 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -153,11 +153,8 @@ class VirtualFrame : public ZoneObject {
void SyncRange(int begin, int end);
// Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the
- // topmost mergable_elements elements of the frame. A
- // mergable_elements of JumpTarget::kAllElements indicates constants
- // and copies are should be removed from the entire frame.
- void MakeMergable(int mergable_elements);
+ // be merged to it. Copies and constants are removed from the frame.
+ void MakeMergable();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
@@ -310,8 +307,8 @@ class VirtualFrame : public ZoneObject {
// even a register. The argument is consumed by the call.
Result CallStub(CodeStub* stub, Result* arg);
- // Call stub that takes a pair of arguments passed in edx (arg0) and
- // eax (arg1). The arguments are given as results which do not have
+ // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
+ // eax (arg1, rax). The arguments are given as results which do not have
// to be in the proper registers or even in registers. The
// arguments are consumed by the call.
Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
@@ -379,9 +376,11 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Register reg);
void EmitPush(const Operand& operand);
void EmitPush(Immediate immediate);
+ // Uses kScratchRegister, emits appropriate relocation info.
+ void EmitPush(Handle<Object> value);
// Push an element on the virtual frame.
- void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
@@ -389,7 +388,7 @@ class VirtualFrame : public ZoneObject {
// frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg(), result->static_type());
+ Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index a8c2180166..68aabb5165 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -30,6 +30,10 @@ prefix cctest
# BUG(281): This test fails on some Linuxes.
test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
+# BUG(382): Weird test. Can't guarantee that it never times out.
+test-api/ApplyInterruption: PASS || TIMEOUT
+
+
[ $arch == arm ]
test-debug: SKIP
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index b1c1d4019d..a884d773b1 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -5257,3 +5257,40 @@ TEST(ExceptionMessageWhenMessageHandlerIsReset) {
CHECK_EQ(1, exception_event_count);
}
+
+
+// Tests after compile event is sent when there are some provisional
+// breakpoints out of the scripts lines range.
+TEST(ProvisionalBreakpointOnLineOutOfRange) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+ const char* script = "function f() {};";
+ const char* resource_name = "test_resource";
+
+ // Set a couple of provisional breakpoint on lines out of the script lines
+ // range.
+ int sbp1 = SetScriptBreakPointByNameFromJS(resource_name, 3,
+ -1 /* no column */);
+ int sbp2 = SetScriptBreakPointByNameFromJS(resource_name, 5, 5);
+
+ after_compile_message_count = 0;
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+
+ v8::ScriptOrigin origin(
+ v8::String::New(resource_name),
+ v8::Integer::New(10),
+ v8::Integer::New(1));
+ // Compile a script whose first line number is greater than the breakpoints'
+ // lines.
+ v8::Script::Compile(v8::String::New(script), &origin)->Run();
+
+ // If the script is compiled successfully there is exactly one after compile
+ // event. In case of an exception in debugger code after compile event is not
+ // sent.
+ CHECK_EQ(1, after_compile_message_count);
+
+ ClearBreakPointFromJS(sbp1);
+ ClearBreakPointFromJS(sbp2);
+ v8::Debug::SetMessageHandler2(NULL);
+}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 1bfc8834ab..28e8649f9f 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -251,3 +251,17 @@ TEST(MultipleFuncsInLiteral) {
CheckFunctionName(script, "return 1", "MyClass.method1");
CheckFunctionName(script, "return 2", "MyClass.method1");
}
+
+
+// See http://code.google.com/p/v8/issues/detail?id=380
+TEST(Issue380) {
+ InitializeVM();
+ v8::HandleScope scope;
+
+ v8::Handle<v8::Script> script = Compile(
+ "function a() {\n"
+ "var result = function(p,a,c,k,e,d)"
+ "{return p}(\"if blah blah\",62,1976,\'a|b\'.split(\'|\'),0,{})\n"
+ "}");
+ CheckFunctionName(script, "return p", "");
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 515657f712..396bcc50b6 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -208,7 +208,7 @@ TEST(GarbageCollection) {
v8::HandleScope sc;
// check GC when heap is empty
- int free_bytes = Heap::MaxHeapObjectSize();
+ int free_bytes = Heap::MaxObjectSizeInPagedSpace();
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
// allocate a function and keep it in global object's property
@@ -782,7 +782,7 @@ TEST(Iteration) {
Factory::NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
- int large_size = Heap::MaxHeapObjectSize() + 1;
+ int large_size = Heap::MaxObjectSizeInPagedSpace() + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 53cff688af..8db7339b54 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -86,8 +86,8 @@ TEST(Promotion) {
v8::HandleScope sc;
// Allocate a fixed array in the new space.
- int array_size =
- (Heap::MaxHeapObjectSize() - Array::kHeaderSize) / (kPointerSize * 4);
+ int array_size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
+ (kPointerSize * 4);
Object* obj = Heap::AllocateFixedArray(array_size);
CHECK(!obj->IsFailure());
@@ -118,7 +118,8 @@ TEST(NoPromotion) {
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// Allocate a big Fixed array in the new space.
- int size = (Heap::MaxHeapObjectSize() - Array::kHeaderSize) / kPointerSize;
+ int size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
+ kPointerSize;
Object* obj = Heap::AllocateFixedArray(size);
Handle<FixedArray> array(FixedArray::cast(obj));
diff --git a/deps/v8/test/message/overwritten-builtins.js b/deps/v8/test/message/overwritten-builtins.js
new file mode 100644
index 0000000000..8a838de1dd
--- /dev/null
+++ b/deps/v8/test/message/overwritten-builtins.js
@@ -0,0 +1,31 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+String.prototype.split = function() { return "SPLIT ERROR"; };
+Array.prototype.join = function() { return []; };
+
+undefined.x
diff --git a/deps/v8/test/message/overwritten-builtins.out b/deps/v8/test/message/overwritten-builtins.out
new file mode 100644
index 0000000000..ccf292463b
--- /dev/null
+++ b/deps/v8/test/message/overwritten-builtins.out
@@ -0,0 +1,30 @@
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*%(basename)s:31: TypeError: Cannot read property 'x' of undefined
+undefined.x
+ ^
diff --git a/deps/v8/test/mjsunit/arguments-apply.js b/deps/v8/test/mjsunit/arguments-apply.js
new file mode 100644
index 0000000000..5a9122859b
--- /dev/null
+++ b/deps/v8/test/mjsunit/arguments-apply.js
@@ -0,0 +1,134 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function ReturnArguments() {
+ return arguments;
+}
+
+function ReturnReceiver() {
+ return this;
+}
+
+
+function Global() {
+ return ReturnArguments.apply(this, arguments);
+}
+
+assertEquals(0, Global().length);
+assertEquals(1, Global(1).length);
+assertEquals(2, Global(2)[0]);
+assertEquals(2, Global(3, 4).length);
+assertEquals(3, Global(3, 4)[0]);
+assertEquals(4, Global(3, 4)[1]);
+
+
+function Local() {
+ var object = { f: ReturnArguments };
+ return object.f.apply(this, arguments);
+}
+
+assertEquals(0, Local().length);
+assertEquals(1, Local(1).length);
+assertEquals(2, Local(2)[0]);
+assertEquals(2, Local(3, 4).length);
+assertEquals(3, Local(3, 4)[0]);
+assertEquals(4, Local(3, 4)[1]);
+
+
+function ShadowArguments() {
+ var arguments = [3, 4];
+ return ReturnArguments.apply(this, arguments);
+}
+
+assertEquals(2, ShadowArguments().length);
+assertEquals(3, ShadowArguments()[0]);
+assertEquals(4, ShadowArguments()[1]);
+
+
+function NonObjectReceiver(receiver) {
+ return ReturnReceiver.apply(receiver, arguments);
+}
+
+assertEquals(42, NonObjectReceiver(42));
+assertEquals("object", typeof NonObjectReceiver(42));
+assertTrue(NonObjectReceiver(42) instanceof Number);
+assertTrue(this === NonObjectReceiver(null));
+assertTrue(this === NonObjectReceiver(void 0));
+
+
+function FunctionReceiver() {
+ return ReturnReceiver.apply(Object, arguments);
+}
+
+assertTrue(Object === FunctionReceiver());
+
+
+function ShadowApply() {
+ function f() { return 42; }
+ f.apply = function() { return 87; }
+ return f.apply(this, arguments);
+}
+
+assertEquals(87, ShadowApply());
+assertEquals(87, ShadowApply(1));
+assertEquals(87, ShadowApply(1, 2));
+
+
+function CallNonFunction() {
+ var object = { apply: Function.prototype.apply };
+ return object.apply(this, arguments);
+}
+
+assertThrows(CallNonFunction, TypeError);
+
+
+// Make sure that the stack after the apply optimization is
+// in a valid state.
+function SimpleStackCheck() {
+ var sentinel = 42;
+ var result = ReturnArguments.apply(this, arguments);
+ assertTrue(result != null);
+ assertEquals(42, sentinel);
+}
+
+SimpleStackCheck();
+
+
+function ShadowArgumentsWithConstant() {
+ var arguments = null;
+ return ReturnArguments.apply(this, arguments);
+}
+
+assertEquals(0, ShadowArgumentsWithConstant().length);
+assertEquals(0, ShadowArgumentsWithConstant(1).length);
+assertEquals(0, ShadowArgumentsWithConstant(1, 2).length);
+
+
+// Make sure we can deal with unfolding lots of arguments on the
+// stack even in the presence of the apply optimizations.
+var array = new Array(2048);
+assertEquals(2048, Global.apply(this, array).length);
diff --git a/deps/v8/test/mjsunit/arguments-lazy.js b/deps/v8/test/mjsunit/arguments-lazy.js
new file mode 100644
index 0000000000..794afc36b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/arguments-lazy.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure we don't allocate the arguments object over and
+// over again.
+function SharedLazyArguments() {
+ return arguments === arguments;
+}
+
+assertTrue(SharedLazyArguments());
+
+
+// Make sure that accessing arguments doesn't clobber any
+// local variables called arguments.
+function ArgumentsOverride(x) {
+ var arguments = 42;
+ x = x ? x : 0;
+ return x + arguments;
+}
+
+assertEquals(42, ArgumentsOverride());
+assertEquals(43, ArgumentsOverride(1));
+assertEquals(44, ArgumentsOverride(2,3));
diff --git a/deps/v8/test/mjsunit/date-parse.js b/deps/v8/test/mjsunit/date-parse.js
index 446472730f..56ceba3841 100644
--- a/deps/v8/test/mjsunit/date-parse.js
+++ b/deps/v8/test/mjsunit/date-parse.js
@@ -254,7 +254,7 @@ testCasesMisc.forEach(testDateParseMisc);
for (var i = 0; i < 24 * 365 * 100; i += 95) {
var ms = i * (3600 * 1000);
var s = (new Date(ms)).toString();
- assertEquals(ms, Date.parse(s), s);
+ assertEquals(ms, Date.parse(s), "parse own: " + s);
}
// Negative tests.
diff --git a/deps/v8/test/mjsunit/debug-sourceinfo.js b/deps/v8/test/mjsunit/debug-sourceinfo.js
index b62a742481..ddf80dc51c 100644
--- a/deps/v8/test/mjsunit/debug-sourceinfo.js
+++ b/deps/v8/test/mjsunit/debug-sourceinfo.js
@@ -64,12 +64,12 @@ var comment_lines = 29;
// This is the last position in the entire file (note: this equals
// file size of <debug-sourceinfo.js> - 1, since starting at 0).
-var last_position = 14072;
+var last_position = 14312;
// This is the last line of entire file (note: starting at 0).
-var last_line = 345;
+var last_line = 351;
// This is the last column of last line (note: starting at 0 and +2, due
// to trailing <CR><LF>).
-var last_column = 48;
+var last_column = 2;
// This magic number is the length or the first line comment (actually number
// of characters before 'function a(...'.
@@ -344,3 +344,9 @@ assertEquals(' c(tru', location.sourceText());
location = script.locationFromLine(1, 0, start_b);
location.restrict(7, 6);
assertEquals(' c(tru', location.sourceText());
+
+// Test that script.sourceLine(line) works.
+for (line = 0; line < num_lines_d; line++) {
+ var line_content_regexp = new RegExp(" x = " + (line + 1));
+ assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
+}
diff --git a/deps/v8/test/mjsunit/regexp-captures.js b/deps/v8/test/mjsunit/regexp-captures.js
new file mode 100644
index 0000000000..91548d643b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-captures.js
@@ -0,0 +1,31 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var re = /^(((N({)?)|(R)|(U)|(V)|(B)|(H)|(n((n)|(r)|(v)|(h))?)|(r(r)?)|(v)|(b((n)|(b))?)|(h))|((Y)|(A)|(E)|(o(u)?)|(p(u)?)|(q(u)?)|(s)|(t)|(u)|(w)|(x(u)?)|(y)|(z)|(a((T)|(A)|(L))?)|(c)|(e)|(f(u)?)|(g(u)?)|(i)|(j)|(l)|(m(u)?)))+/;
+var r = new RegExp(re)
+var str = "Avtnennan gunzvmu pubExnY nEvln vaTxh rmuhguhaTxnY"
+assertTrue(r.test(str));
diff --git a/deps/v8/test/mjsunit/regress/regress-1919169.js b/deps/v8/test/mjsunit/regress/regress-1919169.js
new file mode 100644
index 0000000000..774f26558d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1919169.js
@@ -0,0 +1,40 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+function test() {
+ var s2 = "s2";
+ for (var i = 0; i < 2; i++) {
+ // Crashes in round i==1 with IllegalAccess in %StringAdd(x,y)
+ var res = 1 + s2;
+ s2 = 2;
+ }
+}
+
+// Crash does not occur when code is run at the top level.
+test();
+
diff --git a/deps/v8/test/mjsunit/regress/regress-386.js b/deps/v8/test/mjsunit/regress/regress-386.js
new file mode 100644
index 0000000000..06e4b8edfe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-386.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Test for http://code.google.com/p/v8/issues/detail?id=386
+// This test creates enough properties in A so that adding i as
+// a constant function, in the first call to the constructor, leaves
+// the object's map in the fast case and adds a constant function map
+// transition.
+// Adding i in the second call to the constructor creates a real property,
+// and simultaneously converts the object from fast case to slow case
+// and changes i from a map transition to a real property. There was
+// a flaw in the code that handled this combination of events.
+
+function A() {
+ for (var i = 0; i < 13; i++) {
+ this['a' + i] = i;
+ }
+ this.i = function(){};
+};
+
+new A();
+new A();
diff --git a/deps/v8/test/mjsunit/regress/regress-392.js b/deps/v8/test/mjsunit/regress/regress-392.js
new file mode 100644
index 0000000000..3cabcacf11
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-392.js
@@ -0,0 +1,34 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for issue 392 reported by nth10sd; see
+// http://code.google.com/p/v8/issues/detail?id=392
+
+assertTrue(isNaN((function(){return arguments++})()));
+assertTrue(isNaN((function(){return ++arguments})()));
+assertTrue(isNaN((function(){return arguments--})()));
+assertTrue(isNaN((function(){return --arguments})()));
diff --git a/deps/v8/test/mjsunit/regress/regress-6-9-regexp.js b/deps/v8/test/mjsunit/regress/regress-6-9-regexp.js
new file mode 100644
index 0000000000..c73b37d669
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6-9-regexp.js
@@ -0,0 +1,30 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that the perfect mask check isn't overly optimistic.
+
+assertFalse(/[6-9]/.test('2'));
diff --git a/deps/v8/test/mjsunit/toint32.js b/deps/v8/test/mjsunit/toint32.js
index a5582956bf..9dad9c9dc1 100644
--- a/deps/v8/test/mjsunit/toint32.js
+++ b/deps/v8/test/mjsunit/toint32.js
@@ -29,19 +29,19 @@ function toInt32(x) {
return x | 0;
}
-assertEquals(0, toInt32(Infinity));
-assertEquals(0, toInt32(-Infinity));
-assertEquals(0, toInt32(NaN));
-assertEquals(0, toInt32(0.0));
-assertEquals(0, toInt32(-0.0));
+assertEquals(0, toInt32(Infinity), "Inf");
+assertEquals(0, toInt32(-Infinity), "-Inf");
+assertEquals(0, toInt32(NaN), "NaN");
+assertEquals(0, toInt32(0.0), "zero");
+assertEquals(0, toInt32(-0.0), "-zero");
assertEquals(0, toInt32(Number.MIN_VALUE));
assertEquals(0, toInt32(-Number.MIN_VALUE));
assertEquals(0, toInt32(0.1));
assertEquals(0, toInt32(-0.1));
-assertEquals(1, toInt32(1));
-assertEquals(1, toInt32(1.1));
-assertEquals(-1, toInt32(-1));
+assertEquals(1, toInt32(1), "one");
+assertEquals(1, toInt32(1.1), "onepointone");
+assertEquals(-1, toInt32(-1), "-one");
assertEquals(0, toInt32(0.6), "truncate positive (0.6)");
assertEquals(1, toInt32(1.6), "truncate positive (1.6)");
assertEquals(0, toInt32(-0.6), "truncate negative (-0.6)");
diff --git a/deps/v8/test/mjsunit/tools/logreader.js b/deps/v8/test/mjsunit/tools/logreader.js
new file mode 100644
index 0000000000..dfd7f9f54e
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/logreader.js
@@ -0,0 +1,82 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Load CSV Parser and Log Reader implementations from <project root>/tools.
+// Files: tools/csvparser.js tools/logreader.js
+
+
+(function testAddressParser() {
+ var reader = new devtools.profiler.LogReader({});
+ var parser = reader.createAddressParser('test');
+
+ // Test that 0x values are parsed, and prevAddresses_ are untouched.
+ assertFalse('test' in reader.prevAddresses_);
+ assertEquals(0, parser('0x0'));
+ assertFalse('test' in reader.prevAddresses_);
+ assertEquals(0x100, parser('0x100'));
+ assertFalse('test' in reader.prevAddresses_);
+ assertEquals(0xffffffff, parser('0xffffffff'));
+ assertFalse('test' in reader.prevAddresses_);
+
+ // Test that values that has no '+' or '-' prefix are parsed
+ // and saved to prevAddresses_.
+ assertEquals(0, parser('0'));
+ assertEquals(0, reader.prevAddresses_.test);
+ assertEquals(0x100, parser('100'));
+ assertEquals(0x100, reader.prevAddresses_.test);
+ assertEquals(0xffffffff, parser('ffffffff'));
+ assertEquals(0xffffffff, reader.prevAddresses_.test);
+
+ // Test that values prefixed with '+' or '-' are treated as deltas,
+ // and prevAddresses_ is updated.
+ // Set base value.
+ assertEquals(0x100, parser('100'));
+ assertEquals(0x100, reader.prevAddresses_.test);
+ assertEquals(0x200, parser('+100'));
+ assertEquals(0x200, reader.prevAddresses_.test);
+ assertEquals(0x100, parser('-100'));
+ assertEquals(0x100, reader.prevAddresses_.test);
+})();
+
+
+(function testAddressParser() {
+ var reader = new devtools.profiler.LogReader({});
+
+ assertEquals([0x10000000, 0x10001000, 0xffff000, 0x10000000],
+ reader.processStack(0x10000000, ['overflow',
+ '+1000', '-2000', '+1000']));
+})();
+
+
+(function testExpandBackRef() {
+ var reader = new devtools.profiler.LogReader({});
+
+ assertEquals('aaaaaaaa', reader.expandBackRef_('aaaaaaaa'));
+ assertEquals('aaaaaaaa', reader.expandBackRef_('#1'));
+ assertEquals('bbbbaaaa', reader.expandBackRef_('bbbb#2:4'));
+ assertEquals('"#1:1"', reader.expandBackRef_('"#1:1"'));
+})();
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 97182f3b17..760ed4100c 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -88,17 +88,18 @@ js1_5/GC/regress-348532: SLOW
##################### FLAKY TESTS #####################
# These tests time out in debug mode but pass in product mode
+js1_5/Regress/regress-360969-03: PASS || TIMEOUT if $mode == debug
+js1_5/Regress/regress-360969-04: PASS || TIMEOUT if $mode == debug
+js1_5/Regress/regress-360969-05: PASS || TIMEOUT if $mode == debug
+js1_5/Regress/regress-360969-06: PASS || TIMEOUT if $mode == debug
+js1_5/extensions/regress-365527: PASS || TIMEOUT if $mode == debug
+
js1_5/Regress/regress-280769-3: PASS || FAIL if $mode == debug
js1_5/Regress/regress-203278-1: PASS || FAIL if $mode == debug
js1_5/GC/regress-203278-2: PASS || FAIL if $mode == debug
js1_5/Regress/regress-244470: PASS || FAIL if $mode == debug
ecma_3/RegExp/regress-209067: PASS || FAIL if $mode == debug
js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-03: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-04: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-05: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-06: PASS || FAIL if $mode == debug
-js1_5/extensions/regress-365527: PASS || FAIL if $mode == debug
# http://b/issue?id=1206983
js1_5/Regress/regress-367561-03: PASS || FAIL if $mode == debug
ecma/Date/15.9.5.10-2: PASS || FAIL if $mode == debug
@@ -148,7 +149,7 @@ js1_5/String/regress-322772: PASS || FAIL
js1_5/Array/regress-99120-01: PASS || FAIL
js1_5/Array/regress-99120-02: PASS || FAIL
js1_5/Regress/regress-347306-01: PASS || FAIL
-js1_5/Regress/regress-416628: PASS || FAIL
+js1_5/Regress/regress-416628: PASS || FAIL || TIMEOUT if $mode == debug
# The following two tests assume that daylight savings time starts first Sunday
@@ -203,7 +204,7 @@ ecma/String/15.5.4.12-4: FAIL_OK
ecma/String/15.5.4.12-5: FAIL_OK
# Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: FAIL_OK
+js1_5/Regress/regress-312588: FAIL || TIMEOUT
# Runs out of memory because it compiles huge functions.
@@ -247,14 +248,14 @@ js1_5/extensions/regress-459606: PASS || FAIL_OK
# PCRE's match limit is reached. SpiderMonkey hangs on the first one,
# JSC returns true somehow. Maybe they up the match limit? There is
# an open V8 bug 676063 about this.
-ecma_3/RegExp/regress-330684: FAIL_OK
+ecma_3/RegExp/regress-330684: TIMEOUT
# This test contains a regexp that runs exponentially long. Spidermonkey
# standalone will hang, though apparently inside Firefox it will trigger a
# long-running-script timeout. JSCRE passes by hitting the matchLimit and
# just pretending that an exhaustive search found no match.
-ecma_3/RegExp/regress-307456: PASS || FAIL_OK
+ecma_3/RegExp/regress-307456: PASS || TIMEOUT
# We do not detect overflow in bounds for back references and {}
@@ -594,7 +595,7 @@ js1_5/Regress/regress-306633: FAIL
# This test seems designed to fail (it produces a 700Mbyte string).
# We fail on out of memory. The important thing is not to crash.
-js1_5/Regress/regress-303213: FAIL
+js1_5/Regress/regress-303213: FAIL || TIMEOUT if $mode == debug
# Bug 1202592: New ecma_3/String/15.5.4.11 is failing.
@@ -630,7 +631,6 @@ js1_5/extensions/regress-313803: FAIL_OK
js1_5/extensions/regress-314874: FAIL_OK
js1_5/extensions/regress-322957: FAIL_OK
js1_5/extensions/regress-328556: FAIL_OK
-js1_5/extensions/regress-330569: FAIL_OK
js1_5/extensions/regress-333541: FAIL_OK
js1_5/extensions/regress-335700: FAIL_OK
js1_5/extensions/regress-336409-1: FAIL_OK
@@ -640,7 +640,6 @@ js1_5/extensions/regress-336410-2: FAIL_OK
js1_5/extensions/regress-341956-01: FAIL_OK
js1_5/extensions/regress-341956-02: FAIL_OK
js1_5/extensions/regress-341956-03: FAIL_OK
-js1_5/extensions/regress-342960: FAIL_OK
js1_5/extensions/regress-345967: FAIL_OK
js1_5/extensions/regress-346494-01: FAIL_OK
js1_5/extensions/regress-346494: FAIL_OK
@@ -653,7 +652,6 @@ js1_5/extensions/regress-350531: FAIL_OK
js1_5/extensions/regress-351102-01: FAIL_OK
js1_5/extensions/regress-351102-02: FAIL_OK
js1_5/extensions/regress-351102-06: FAIL_OK
-js1_5/extensions/regress-351448: FAIL_OK
js1_5/extensions/regress-351973: FAIL_OK
js1_5/extensions/regress-352060: FAIL_OK
js1_5/extensions/regress-352094: FAIL_OK
@@ -716,6 +714,10 @@ js1_5/extensions/scope-001: FAIL_OK
js1_5/extensions/toLocaleFormat-01: FAIL_OK
js1_5/extensions/toLocaleFormat-02: FAIL_OK
+js1_5/extensions/regress-330569: TIMEOUT
+js1_5/extensions/regress-351448: TIMEOUT
+js1_5/extensions/regress-342960: FAIL_OK || TIMEOUT if $mode == debug
+
##################### DECOMPILATION TESTS #####################
@@ -776,13 +778,11 @@ js1_5/decompilation/regress-383721: PASS || FAIL
js1_5/decompilation/regress-406555: PASS || FAIL
-[ $FAST == yes ]
-
# These tests take an unreasonable amount of time so we skip them
# in fast mode.
-js1_5/Regress/regress-312588: SKIP
-js1_5/Regress/regress-271716-n: SKIP
+js1_5/Regress/regress-312588: TIMEOUT || SKIP if $FAST == yes
+js1_5/Regress/regress-271716-n: PASS || SKIP if $FAST == yes
[ $FAST == yes && $ARCH == arm ]
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
index 3766db0481..d6df7fa969 100644
--- a/deps/v8/tools/codemap.js
+++ b/deps/v8/tools/codemap.js
@@ -126,7 +126,7 @@ devtools.profiler.CodeMap.prototype.addStaticCode = function(
devtools.profiler.CodeMap.prototype.markPages_ = function(start, end) {
for (var addr = start; addr <= end;
addr += devtools.profiler.CodeMap.PAGE_SIZE) {
- this.pages_[addr >> devtools.profiler.CodeMap.PAGE_ALIGNMENT] = 1;
+ this.pages_[addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT] = 1;
}
};
@@ -155,7 +155,7 @@ devtools.profiler.CodeMap.prototype.findInTree_ = function(tree, addr) {
* @param {number} addr Address.
*/
devtools.profiler.CodeMap.prototype.findEntry = function(addr) {
- var pageAddr = addr >> devtools.profiler.CodeMap.PAGE_ALIGNMENT;
+ var pageAddr = addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT;
if (pageAddr in this.pages_) {
return this.findInTree_(this.statics_, addr);
}
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 66e1bb66d9..8815456d79 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -164,6 +164,7 @@
'../../src/list-inl.h',
'../../src/list.h',
'../../src/log.cc',
+ '../../src/log-inl.h',
'../../src/log.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 968c24129f..c5130ff125 100644
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -1,15 +1,23 @@
#!/bin/sh
tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
[ "$D8_PATH" ] || D8_PATH=$tools_path/..
d8_exec=$D8_PATH/d8
+if [ "$1" == "--no-build" ]; then
+ shift
+else
# compile d8 if it doesn't exist, assuming this script
# resides in the repository.
-[ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
+ [ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
+fi
# nm spits out 'no symbols found' messages to stderr.
$d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
$tools_path/profile.js $tools_path/profile_view.js \
- $tools_path/tickprocessor.js -- $@ 2>/dev/null
+ $tools_path/logreader.js $tools_path/tickprocessor.js -- $@ 2>/dev/null
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
new file mode 100644
index 0000000000..78085a451e
--- /dev/null
+++ b/deps/v8/tools/logreader.js
@@ -0,0 +1,317 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * @fileoverview Log Reader is used to process log file produced by V8.
+ */
+
+// Initlialize namespaces
+var devtools = devtools || {};
+devtools.profiler = devtools.profiler || {};
+
+
+/**
+ * Base class for processing log files.
+ *
+ * @param {Array.<Object>} dispatchTable A table used for parsing and processing
+ * log records.
+ * @constructor
+ */
+devtools.profiler.LogReader = function(dispatchTable) {
+ /**
+ * @type {Array.<Object>}
+ */
+ this.dispatchTable_ = dispatchTable;
+ this.dispatchTable_['alias'] =
+ { parsers: [null, null], processor: this.processAlias_ };
+ this.dispatchTable_['repeat'] =
+ { parsers: [parseInt, 'var-args'], processor: this.processRepeat_,
+ backrefs: true };
+
+ /**
+ * A key-value map for aliases. Translates short name -> full name.
+ * @type {Object}
+ */
+ this.aliases_ = {};
+
+ /**
+ * A key-value map for previous address values.
+ * @type {Object}
+ */
+ this.prevAddresses_ = {};
+
+ /**
+ * A key-value map for events than can be backreference-compressed.
+ * @type {Object}
+ */
+ this.backRefsCommands_ = {};
+ this.initBackRefsCommands_();
+
+ /**
+ * Back references for decompression.
+ * @type {Array.<string>}
+ */
+ this.backRefs_ = [];
+};
+
+
+/**
+ * Creates a parser for an address entry.
+ *
+ * @param {string} addressTag Address tag to perform offset decoding.
+ * @return {function(string):number} Address parser.
+ */
+devtools.profiler.LogReader.prototype.createAddressParser = function(
+ addressTag) {
+ var self = this;
+ return (function (str) {
+ var value = parseInt(str, 16);
+ var firstChar = str.charAt(0);
+ if (firstChar == '+' || firstChar == '-') {
+ var addr = self.prevAddresses_[addressTag];
+ addr += value;
+ self.prevAddresses_[addressTag] = addr;
+ return addr;
+ } else if (firstChar != '0' || str.charAt(1) != 'x') {
+ self.prevAddresses_[addressTag] = value;
+ }
+ return value;
+ });
+};
+
+
+/**
+ * Expands an alias symbol, if applicable.
+ *
+ * @param {string} symbol Symbol to expand.
+ * @return {string} Expanded symbol, or the input symbol itself.
+ */
+devtools.profiler.LogReader.prototype.expandAlias = function(symbol) {
+ return symbol in this.aliases_ ? this.aliases_[symbol] : symbol;
+};
+
+
+/**
+ * Used for printing error messages.
+ *
+ * @param {string} str Error message.
+ */
+devtools.profiler.LogReader.prototype.printError = function(str) {
+ // Do nothing.
+};
+
+
+/**
+ * Processes a portion of V8 profiler event log.
+ *
+ * @param {string} chunk A portion of log.
+ */
+devtools.profiler.LogReader.prototype.processLogChunk = function(chunk) {
+ this.processLog_(chunk.split('\n'));
+};
+
+
+/**
+ * Processes stack record.
+ *
+ * @param {number} pc Program counter.
+ * @param {Array.<string>} stack String representation of a stack.
+ * @return {Array.<number>} Processed stack.
+ */
+devtools.profiler.LogReader.prototype.processStack = function(pc, stack) {
+ var fullStack = [pc];
+ var prevFrame = pc;
+ for (var i = 0, n = stack.length; i < n; ++i) {
+ var frame = stack[i];
+ var firstChar = frame.charAt(0);
+ if (firstChar == '+' || firstChar == '-') {
+ // An offset from the previous frame.
+ prevFrame += parseInt(frame, 16);
+ fullStack.push(prevFrame);
+ // Filter out possible 'overflow' string.
+ } else if (firstChar != 'o') {
+ fullStack.push(parseInt(frame, 16));
+ }
+ }
+ return fullStack;
+};
+
+
+/**
+ * Returns whether a particular dispatch must be skipped.
+ *
+ * @param {!Object} dispatch Dispatch record.
+ * @return {boolean} True if dispatch must be skipped.
+ */
+devtools.profiler.LogReader.prototype.skipDispatch = function(dispatch) {
+ return false;
+};
+
+
+/**
+ * Does a dispatch of a log record.
+ *
+ * @param {Array.<string>} fields Log record.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.dispatchLogRow_ = function(fields) {
+ // Obtain the dispatch.
+ var command = fields[0];
+ if (!(command in this.dispatchTable_)) {
+ throw new Error('unknown command: ' + command);
+ }
+ var dispatch = this.dispatchTable_[command];
+
+ if (dispatch === null || this.skipDispatch(dispatch)) {
+ return;
+ }
+
+ // Parse fields.
+ var parsedFields = [];
+ for (var i = 0; i < dispatch.parsers.length; ++i) {
+ var parser = dispatch.parsers[i];
+ if (parser === null) {
+ parsedFields.push(fields[1 + i]);
+ } else if (typeof parser == 'function') {
+ parsedFields.push(parser(fields[1 + i]));
+ } else {
+ // var-args
+ parsedFields.push(fields.slice(1 + i));
+ break;
+ }
+ }
+
+ // Run the processor.
+ dispatch.processor.apply(this, parsedFields);
+};
+
+
+/**
+ * Decompresses a line if it was backreference-compressed.
+ *
+ * @param {string} line Possibly compressed line.
+ * @return {string} Decompressed line.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.expandBackRef_ = function(line) {
+ var backRefPos;
+ // Filter out case when a regexp is created containing '#'.
+ if (line.charAt(line.length - 1) != '"'
+ && (backRefPos = line.lastIndexOf('#')) != -1) {
+ var backRef = line.substr(backRefPos + 1);
+ var backRefIdx = parseInt(backRef, 10) - 1;
+ var colonPos = backRef.indexOf(':');
+ var backRefStart =
+ colonPos != -1 ? parseInt(backRef.substr(colonPos + 1), 10) : 0;
+ line = line.substr(0, backRefPos) +
+ this.backRefs_[backRefIdx].substr(backRefStart);
+ }
+ this.backRefs_.unshift(line);
+ if (this.backRefs_.length > 10) {
+ this.backRefs_.length = 10;
+ }
+ return line;
+};
+
+
+/**
+ * Initializes the map of backward reference compressible commands.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.initBackRefsCommands_ = function() {
+ for (var event in this.dispatchTable_) {
+ var dispatch = this.dispatchTable_[event];
+ if (dispatch && dispatch.backrefs) {
+ this.backRefsCommands_[event] = true;
+ }
+ }
+};
+
+
+/**
+ * Processes alias log record. Adds an alias to a corresponding map.
+ *
+ * @param {string} symbol Short name.
+ * @param {string} expansion Long name.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.processAlias_ = function(
+ symbol, expansion) {
+ if (expansion in this.dispatchTable_) {
+ this.dispatchTable_[symbol] = this.dispatchTable_[expansion];
+ if (expansion in this.backRefsCommands_) {
+ this.backRefsCommands_[symbol] = true;
+ }
+ } else {
+ this.aliases_[symbol] = expansion;
+ }
+};
+
+
+/**
+ * Processes log lines.
+ *
+ * @param {Array.<string>} lines Log lines.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.processLog_ = function(lines) {
+ var csvParser = new devtools.profiler.CsvParser();
+ try {
+ for (var i = 0, n = lines.length; i < n; ++i) {
+ var line = lines[i];
+ if (!line) {
+ continue;
+ }
+ if (line.charAt(0) == '#' ||
+ line.substr(0, line.indexOf(',')) in this.backRefsCommands_) {
+ line = this.expandBackRef_(line);
+ }
+ var fields = csvParser.parseLine(line);
+ this.dispatchLogRow_(fields);
+ }
+ } catch (e) {
+ this.printError('line ' + (i + 1) + ': ' + (e.message || e));
+ throw e;
+ }
+};
+
+
+/**
+ * Processes repeat log record. Expands it according to calls count and
+ * invokes processing.
+ *
+ * @param {number} count Count.
+ * @param {Array.<string>} cmd Parsed command.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.processRepeat_ = function(count, cmd) {
+ // Replace the repeat-prefixed command from backrefs list with a non-prefixed.
+ this.backRefs_[0] = cmd.join(',');
+ for (var i = 0; i < count; ++i) {
+ this.dispatchLogRow_(cmd);
+ }
+};
diff --git a/deps/v8/tools/oprofile/annotate b/deps/v8/tools/oprofile/annotate
new file mode 100644
index 0000000000..a6a8545bd6
--- /dev/null
+++ b/deps/v8/tools/oprofile/annotate
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+opannotate --assembly --session-dir="$OPROFILE_SESSION_DIR" "$shell_exec" "$@"
+
diff --git a/deps/v8/tools/oprofile/common b/deps/v8/tools/oprofile/common
new file mode 100644
index 0000000000..fd00207ab0
--- /dev/null
+++ b/deps/v8/tools/oprofile/common
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# Determine the session directory to use for oprofile.
+[ "$OPROFILE_SESSION_DIR" ] || OPROFILE_SESSION_DIR=/tmp/oprofv8
+
+# If no executable passed as the first parameter assume V8 release mode shell.
+if [[ -x $1 ]]
+then
+ shell_exec=`readlink -f "$1"`
+ # Any additional parameters are for the oprofile command.
+ shift
+else
+ oprofile_tools_path=`cd $(dirname "$0");pwd`
+ [ "$V8_SHELL_DIR" ] || V8_SHELL_DIR=$oprofile_tools_path/../..
+ shell_exec=$V8_SHELL_DIR/shell
+fi
+
+alias sudo_opcontrol='sudo opcontrol --session-dir="$OPROFILE_SESSION_DIR"'
+
diff --git a/deps/v8/tools/oprofile/dump b/deps/v8/tools/oprofile/dump
new file mode 100644
index 0000000000..17bb0a1b08
--- /dev/null
+++ b/deps/v8/tools/oprofile/dump
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --dump "@$"
+
diff --git a/deps/v8/tools/oprofile/report b/deps/v8/tools/oprofile/report
new file mode 100644
index 0000000000..b7f28b9c45
--- /dev/null
+++ b/deps/v8/tools/oprofile/report
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+opreport --symbols --session-dir="$OPROFILE_SESSION_DIR" "$shell_exec" "$@"
+
diff --git a/deps/v8/tools/oprofile/reset b/deps/v8/tools/oprofile/reset
new file mode 100644
index 0000000000..edb707110f
--- /dev/null
+++ b/deps/v8/tools/oprofile/reset
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --reset "$@"
+
diff --git a/deps/v8/tools/oprofile/run b/deps/v8/tools/oprofile/run
new file mode 100644
index 0000000000..0a92470a01
--- /dev/null
+++ b/deps/v8/tools/oprofile/run
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+# Reset oprofile samples.
+sudo_opcontrol --reset
+
+# Run the executable to profile with the correct arguments.
+"$shell_exec" --oprofile "$@"
+
+# Flush oprofile data including the generated code into ELF binaries.
+sudo_opcontrol --dump
+
diff --git a/deps/v8/tools/oprofile/shutdown b/deps/v8/tools/oprofile/shutdown
new file mode 100644
index 0000000000..8ebb72f06b
--- /dev/null
+++ b/deps/v8/tools/oprofile/shutdown
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --shutdown "$@"
+
diff --git a/deps/v8/tools/oprofile/start b/deps/v8/tools/oprofile/start
new file mode 100644
index 0000000000..059e4b84c1
--- /dev/null
+++ b/deps/v8/tools/oprofile/start
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --start --no-vmlinux "$@"
+
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index 6bd536b388..f701ceb778 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -372,6 +372,8 @@ class TestOutput(object):
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
+ elif self.HasTimedOut():
+ outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
@@ -390,7 +392,7 @@ class TestOutput(object):
def HasTimedOut(self):
return self.output.timed_out;
-
+
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index cce579bb5f..4afc69f613 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -52,8 +52,35 @@ function readFile(fileName) {
}
+function inherits(childCtor, parentCtor) {
+ function tempCtor() {};
+ tempCtor.prototype = parentCtor.prototype;
+ childCtor.prototype = new tempCtor();
+};
+
+
function TickProcessor(
cppEntriesProvider, separateIc, ignoreUnknown, stateFilter) {
+ devtools.profiler.LogReader.call(this, {
+ 'shared-library': { parsers: [null, parseInt, parseInt],
+ processor: this.processSharedLibrary },
+ 'code-creation': {
+ parsers: [null, this.createAddressParser('code'), parseInt, null],
+ processor: this.processCodeCreation, backrefs: true },
+ 'code-move': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('code-move-to')],
+ processor: this.processCodeMove, backrefs: true },
+ 'code-delete': { parsers: [this.createAddressParser('code')],
+ processor: this.processCodeDelete, backrefs: true },
+ 'tick': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('stack'), parseInt, 'var-args'],
+ processor: this.processTick, backrefs: true },
+ 'profiler': null,
+ // Obsolete row types.
+ 'code-allocate': null,
+ 'begin-code-region': null,
+ 'end-code-region': null });
+
this.cppEntriesProvider_ = cppEntriesProvider;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
@@ -86,8 +113,8 @@ function TickProcessor(
// Count each tick as a time unit.
this.viewBuilder_ = new devtools.profiler.ViewBuilder(1);
this.lastLogFileName_ = null;
- this.aliases_ = {};
};
+inherits(TickProcessor, devtools.profiler.LogReader);
TickProcessor.VmStates = {
@@ -107,27 +134,17 @@ TickProcessor.CodeTypes = {
// codeTypes_ map because there can be zillions of them.
-TickProcessor.RecordsDispatch = {
- 'shared-library': { parsers: [null, parseInt, parseInt],
- processor: 'processSharedLibrary' },
- 'code-creation': { parsers: [null, parseInt, parseInt, null],
- processor: 'processCodeCreation' },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: 'processCodeMove' },
- 'code-delete': { parsers: [parseInt], processor: 'processCodeDelete' },
- 'tick': { parsers: [parseInt, parseInt, parseInt, 'var-args'],
- processor: 'processTick' },
- 'alias': { parsers: [null, null], processor: 'processAlias' },
- 'profiler': null,
- // Obsolete row types.
- 'code-allocate': null,
- 'begin-code-region': null,
- 'end-code-region': null
-};
-
TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
+/**
+ * @override
+ */
+TickProcessor.prototype.printError = function(str) {
+ print(str);
+};
+
+
TickProcessor.prototype.setCodeType = function(name, type) {
this.codeTypes_[name] = TickProcessor.CodeTypes[type];
};
@@ -151,57 +168,7 @@ TickProcessor.prototype.isJsCode = function(name) {
TickProcessor.prototype.processLogFile = function(fileName) {
this.lastLogFileName_ = fileName;
var contents = readFile(fileName);
- this.processLog(contents.split('\n'));
-};
-
-
-TickProcessor.prototype.processLog = function(lines) {
- var csvParser = new devtools.profiler.CsvParser();
- try {
- for (var i = 0, n = lines.length; i < n; ++i) {
- var line = lines[i];
- if (!line) {
- continue;
- }
- var fields = csvParser.parseLine(line);
- this.dispatchLogRow(fields);
- }
- } catch (e) {
- print('line ' + (i + 1) + ': ' + (e.message || e));
- throw e;
- }
-};
-
-
-TickProcessor.prototype.dispatchLogRow = function(fields) {
- // Obtain the dispatch.
- var command = fields[0];
- if (!(command in TickProcessor.RecordsDispatch)) {
- throw new Error('unknown command: ' + command);
- }
- var dispatch = TickProcessor.RecordsDispatch[command];
-
- if (dispatch === null) {
- return;
- }
-
- // Parse fields.
- var parsedFields = [];
- for (var i = 0; i < dispatch.parsers.length; ++i) {
- var parser = dispatch.parsers[i];
- if (parser === null) {
- parsedFields.push(fields[1 + i]);
- } else if (typeof parser == 'function') {
- parsedFields.push(parser(fields[1 + i]));
- } else {
- // var-args
- parsedFields.push(fields.slice(1 + i));
- break;
- }
- }
-
- // Run the processor.
- this[dispatch.processor].apply(this, parsedFields);
+ this.processLogChunk(contents);
};
@@ -219,22 +186,10 @@ TickProcessor.prototype.processSharedLibrary = function(
};
-TickProcessor.prototype.processAlias = function(symbol, expansion) {
- if (expansion in TickProcessor.RecordsDispatch) {
- TickProcessor.RecordsDispatch[symbol] =
- TickProcessor.RecordsDispatch[expansion];
- } else {
- this.aliases_[symbol] = expansion;
- }
-};
-
-
TickProcessor.prototype.processCodeCreation = function(
type, start, size, name) {
- if (type in this.aliases_) {
- type = this.aliases_[type];
- }
- var entry = this.profile_.addCode(type, name, start, size);
+ var entry = this.profile_.addCode(
+ this.expandAlias(type), name, start, size);
};
@@ -261,21 +216,7 @@ TickProcessor.prototype.processTick = function(pc, sp, vmState, stack) {
return;
}
- var fullStack = [pc];
- var prevFrame = pc;
- for (var i = 0, n = stack.length; i < n; ++i) {
- var frame = stack[i];
- var firstChar = frame.charAt(0);
- // Leave only numbers starting with 0x. Filter possible 'overflow' string.
- if (firstChar == '0') {
- fullStack.push(parseInt(frame, 16));
- } else if (firstChar == '+' || firstChar == '-') {
- // An offset from the previous frame.
- prevFrame += parseInt(frame, 16);
- fullStack.push(prevFrame);
- }
- }
- this.profile_.recordTick(fullStack);
+ this.profile_.recordTick(this.processStack(pc, stack));
};
@@ -438,7 +379,9 @@ CppEntriesProvider.prototype.parseVmSymbols = function(
function addPrevEntry(end) {
// Several functions can be mapped onto the same address. To avoid
// creating zero-sized entries, skip such duplicates.
- if (prevEntry && prevEntry.start != end) {
+ // Also double-check that function belongs to the library address space.
+ if (prevEntry && prevEntry.start < end &&
+ prevEntry.start >= libStart && end <= libEnd) {
processorFunc(prevEntry.name, prevEntry.start, end);
}
}
@@ -469,29 +412,28 @@ CppEntriesProvider.prototype.parseNextLine = function() {
};
-function inherits(childCtor, parentCtor) {
- function tempCtor() {};
- tempCtor.prototype = parentCtor.prototype;
- childCtor.prototype = new tempCtor();
-};
-
-
-function UnixCppEntriesProvider() {
+function UnixCppEntriesProvider(nmExec) {
this.symbols = [];
this.parsePos = 0;
+ this.nmExec = nmExec;
};
inherits(UnixCppEntriesProvider, CppEntriesProvider);
-UnixCppEntriesProvider.FUNC_RE = /^([0-9a-fA-F]{8}) . (.*)$/;
+UnixCppEntriesProvider.FUNC_RE = /^([0-9a-fA-F]{8}) [tTwW] (.*)$/;
UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
- this.symbols = [
- os.system('nm', ['-C', '-n', libName], -1, -1),
- os.system('nm', ['-C', '-n', '-D', libName], -1, -1)
- ];
this.parsePos = 0;
+ try {
+ this.symbols = [
+ os.system(this.nmExec, ['-C', '-n', libName], -1, -1),
+ os.system(this.nmExec, ['-C', '-n', '-D', libName], -1, -1)
+ ];
+ } catch (e) {
+ // If the library cannot be found on this system let's not panic.
+ this.symbols = ['', ''];
+ }
};
@@ -584,7 +526,8 @@ function processArguments(args) {
platform: 'unix',
stateFilter: null,
ignoreUnknown: false,
- separateIc: false
+ separateIc: false,
+ nm: 'nm'
};
var argsDispatch = {
'-j': ['stateFilter', TickProcessor.VmStates.JS,
@@ -604,7 +547,9 @@ function processArguments(args) {
'--unix': ['platform', 'unix',
'Specify that we are running on *nix platform'],
'--windows': ['platform', 'windows',
- 'Specify that we are running on Windows platform']
+ 'Specify that we are running on Windows platform'],
+ '--nm': ['nm', 'nm',
+ 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)']
};
argsDispatch['--js'] = argsDispatch['-j'];
argsDispatch['--gc'] = argsDispatch['-g'];
@@ -636,9 +581,15 @@ function processArguments(args) {
break;
}
args.shift();
+ var userValue = null;
+ var eqPos = arg.indexOf('=');
+ if (eqPos != -1) {
+ userValue = arg.substr(eqPos + 1);
+ arg = arg.substr(0, eqPos);
+ }
if (arg in argsDispatch) {
var dispatch = argsDispatch[arg];
- result[dispatch[0]] = dispatch[1];
+ result[dispatch[0]] = userValue == null ? dispatch[1] : userValue;
} else {
printUsageAndExit();
}
@@ -653,7 +604,7 @@ function processArguments(args) {
var params = processArguments(arguments);
var tickProcessor = new TickProcessor(
- params.platform == 'unix' ? new UnixCppEntriesProvider() :
+ params.platform == 'unix' ? new UnixCppEntriesProvider(params.nm) :
new WindowsCppEntriesProvider(),
params.separateIc,
params.ignoreUnknown,
diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj
index 2a7cb2db5a..6e3d2765e5 100755
--- a/deps/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj
@@ -273,6 +273,7 @@
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
+ 22A76C900FF259E600FDC694 /* log-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-inl.h"; sourceTree = "<group>"; };
58242A1E0FA1F14D00BD6F59 /* json-delay.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = "json-delay.js"; sourceTree = "<group>"; };
58950D4E0F55514900F3E8BA /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; };
58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-ia32.cc"; path = "ia32/jump-target-ia32.cc"; sourceTree = "<group>"; };
@@ -622,6 +623,7 @@
897FF0D70E719AB300D62E90 /* C++ */ = {
isa = PBXGroup;
children = (
+ 22A76C900FF259E600FDC694 /* log-inl.h */,
897FF0F60E719B8F00D62E90 /* accessors.cc */,
897FF0F70E719B8F00D62E90 /* accessors.h */,
897FF0F80E719B8F00D62E90 /* allocation.cc */,
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index afd73f4cb0..bfdcec922c 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -541,6 +541,10 @@
>
</File>
<File
+ RelativePath="..\..\src\log-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.h"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index ca0a2daeb0..8ebe386c39 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -537,6 +537,10 @@
>
</File>
<File
+ RelativePath="..\..\src\log-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.h"
>
</File>
diff --git a/deps/v8/tools/windows-tick-processor.bat b/deps/v8/tools/windows-tick-processor.bat
index 52454e34b3..67cbe98d7c 100644
--- a/deps/v8/tools/windows-tick-processor.bat
+++ b/deps/v8/tools/windows-tick-processor.bat
@@ -2,4 +2,4 @@
SET tools_dir=%~dp0
-%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%tickprocessor.js -- --windows %*
+%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js -- --windows %*