summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2009-10-07 11:53:03 +0200
committerRyan Dahl <ry@tinyclouds.org>2009-10-07 11:53:45 +0200
commit1f31a7dbfe792fa6eee8a9cdcdfd662aad5cde06 (patch)
treec83b724056517e4bf71f203b74ad1d832d0ca7f0 /deps/v8
parent1a2762b78e496dac4cc9fd0fb4ffb1d4f036692b (diff)
downloadandroid-node-v8-1f31a7dbfe792fa6eee8a9cdcdfd662aad5cde06.tar.gz
android-node-v8-1f31a7dbfe792fa6eee8a9cdcdfd662aad5cde06.tar.bz2
android-node-v8-1f31a7dbfe792fa6eee8a9cdcdfd662aad5cde06.zip
Upgrade v8 to 1.3.14
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/ChangeLog43
-rw-r--r--deps/v8/LICENSE7
-rw-r--r--deps/v8/SConstruct1
-rw-r--r--deps/v8/include/v8.h79
-rwxr-xr-xdeps/v8/src/SConscript61
-rw-r--r--deps/v8/src/api.cc84
-rw-r--r--deps/v8/src/api.h85
-rw-r--r--deps/v8/src/arguments.h26
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h8
-rw-r--r--deps/v8/src/arm/assembler-arm.h4
-rw-r--r--deps/v8/src/arm/builtins-arm.cc413
-rw-r--r--deps/v8/src/arm/cfg-arm.cc301
-rw-r--r--deps/v8/src/arm/codegen-arm.cc134
-rw-r--r--deps/v8/src/arm/codegen-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc77
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h44
-rw-r--r--deps/v8/src/arm/simulator-arm.cc3
-rw-r--r--deps/v8/src/arm/simulator-arm.h37
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc12
-rw-r--r--deps/v8/src/array.js4
-rw-r--r--deps/v8/src/assembler.h1
-rw-r--r--deps/v8/src/ast.cc1
-rw-r--r--deps/v8/src/ast.h49
-rw-r--r--deps/v8/src/bootstrapper.cc19
-rw-r--r--deps/v8/src/bootstrapper.h6
-rw-r--r--deps/v8/src/builtins.cc4
-rw-r--r--deps/v8/src/cfg.cc763
-rw-r--r--deps/v8/src/cfg.h871
-rw-r--r--deps/v8/src/codegen.cc40
-rw-r--r--deps/v8/src/compiler.cc21
-rw-r--r--deps/v8/src/debug-agent.cc5
-rw-r--r--deps/v8/src/debug-agent.h5
-rw-r--r--deps/v8/src/debug-delay.js24
-rw-r--r--deps/v8/src/debug.cc5
-rw-r--r--deps/v8/src/debug.h4
-rw-r--r--deps/v8/src/execution.cc114
-rw-r--r--deps/v8/src/execution.h64
-rw-r--r--deps/v8/src/factory.cc5
-rw-r--r--deps/v8/src/factory.h2
-rw-r--r--deps/v8/src/flag-definitions.h2
-rw-r--r--deps/v8/src/handles.cc30
-rw-r--r--deps/v8/src/heap-profiler.cc210
-rw-r--r--deps/v8/src/heap-profiler.h21
-rw-r--r--deps/v8/src/heap.cc74
-rw-r--r--deps/v8/src/heap.h3
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h12
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc90
-rw-r--r--deps/v8/src/ia32/cfg-ia32.cc315
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc141
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc17
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc46
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h64
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h19
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc12
-rw-r--r--deps/v8/src/list.h7
-rw-r--r--deps/v8/src/log-utils.cc2
-rw-r--r--deps/v8/src/macro-assembler.h2
-rw-r--r--deps/v8/src/mark-compact.cc10
-rw-r--r--deps/v8/src/memory.h4
-rw-r--r--deps/v8/src/messages.js17
-rw-r--r--deps/v8/src/mirror-delay.js2
-rw-r--r--deps/v8/src/objects.cc157
-rw-r--r--deps/v8/src/objects.h59
-rw-r--r--deps/v8/src/parser.cc183
-rw-r--r--deps/v8/src/platform-freebsd.cc14
-rw-r--r--deps/v8/src/platform-macos.cc16
-rw-r--r--deps/v8/src/prettyprinter.cc10
-rw-r--r--deps/v8/src/regexp-stack.cc8
-rw-r--r--deps/v8/src/regexp-stack.h2
-rw-r--r--deps/v8/src/rewriter.cc12
-rw-r--r--deps/v8/src/runtime.cc40
-rw-r--r--deps/v8/src/serialize.cc32
-rw-r--r--deps/v8/src/spaces.cc136
-rw-r--r--deps/v8/src/spaces.h74
-rw-r--r--deps/v8/src/string.js78
-rw-r--r--deps/v8/src/stub-cache.cc41
-rw-r--r--deps/v8/src/top.cc3
-rw-r--r--deps/v8/src/top.h7
-rw-r--r--deps/v8/src/uri.js10
-rw-r--r--deps/v8/src/usage-analyzer.cc53
-rw-r--r--deps/v8/src/utils.cc2
-rw-r--r--deps/v8/src/v8.cc16
-rw-r--r--deps/v8/src/v8.h2
-rw-r--r--deps/v8/src/v8threads.cc34
-rw-r--r--deps/v8/src/v8threads.h1
-rw-r--r--deps/v8/src/variables.h8
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h65
-rw-r--r--deps/v8/src/x64/assembler-x64.cc39
-rw-r--r--deps/v8/src/x64/assembler-x64.h45
-rw-r--r--deps/v8/src/x64/builtins-x64.cc449
-rw-r--r--deps/v8/src/x64/cfg-x64.cc324
-rw-r--r--deps/v8/src/x64/codegen-x64.cc142
-rw-r--r--deps/v8/src/x64/codegen-x64.h2
-rw-r--r--deps/v8/src/x64/ic-x64.cc84
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc109
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h74
-rw-r--r--deps/v8/src/x64/simulator-x64.h19
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc13
-rw-r--r--deps/v8/test/cctest/test-alloc.cc69
-rw-r--r--deps/v8/test/cctest/test-api.cc128
-rw-r--r--deps/v8/test/cctest/test-debug.cc5
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc84
-rw-r--r--deps/v8/test/cctest/test-log.cc7
-rw-r--r--deps/v8/test/cctest/test-sockets.cc1
-rw-r--r--deps/v8/test/mjsunit/class-of-builtins.js2
-rw-r--r--deps/v8/test/mjsunit/debug-compile-event.js4
-rw-r--r--deps/v8/test/mjsunit/invalid-lhs.js11
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-220.js2
-rw-r--r--deps/v8/test/mjsunit/switch.js4
-rw-r--r--deps/v8/test/mjsunit/third_party/object-keys.js2
-rw-r--r--deps/v8/tools/gyp/v8.gyp7
-rwxr-xr-xdeps/v8/tools/js2c.py20
-rw-r--r--deps/v8/tools/jsmin.py496
-rw-r--r--deps/v8/tools/visual_studio/v8_base.vcproj12
-rw-r--r--deps/v8/tools/visual_studio/v8_base_arm.vcproj12
-rw-r--r--deps/v8/tools/visual_studio/v8_base_x64.vcproj12
119 files changed, 3441 insertions, 4348 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 8c7459168d..88c34f9f11 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,46 @@
+2009-10-07: Version 1.3.14
+
+ Added GetRealNamedProperty to the API to lookup real properties
+ located on the object or in the prototype chain skipping any
+ interceptors.
+
+ Fix the stack limits setting API to work correctly with threads. The
+ stack limit now needs to be set to each thread thich is used with V8.
+
+ Remove the high-priority flag from IdleNotification()
+
+ Ensure V8 is initialized before locking and unlocking threads.
+
+ Implemented a new JavaScript minifier for compressing the source of
+ the built-in JavaScript. This Remove non-Open Source code from Douglas
+ Crockford from the project.
+
+ Added a missing optimization in StringCharAt.
+
+ Fixed some flaky socket tests.
+
+ Change by Alexander Botero-Lowry to fix profiler sampling on FreeBSD
+ in 64-bit mode.
+
+ Fixed memory leaks in the thread management code.
+
+ Fixed the result of assignment to a pixel array. The assigned value
+ is now the result.
+
+ Error reporting for invalid left-hand sides in for-in statements, pre-
+ and postfix count expressions, and assignments now matches the JSC
+ behavior in Safari 4.
+
+ Follow the spec in disallowing function declarations without a name.
+
+ Always allocate code objects within a 2 GB range. On x64 architecture
+ this is used to use near calls (32-bit displacement) in Code objects.
+
+ Optimized array construction ported to x64 and ARM architectures.
+
+ [ES5] Changed Object.keys to return strings for element indices.
+
+
2009-09-23: Version 1.3.13
Fixed uninitialized memory problem.
diff --git a/deps/v8/LICENSE b/deps/v8/LICENSE
index d2862b4ee8..e3ed242d42 100644
--- a/deps/v8/LICENSE
+++ b/deps/v8/LICENSE
@@ -21,13 +21,6 @@ are:
This code is copyrighted by Sun Microsystems Inc. and released
under a 3-clause BSD license.
- - JSMin JavaScript minifier, located at tools/jsmin.py. This code is
- copyrighted by Douglas Crockford and Baruch Even and released under
- an MIT license.
-
- - Valgrind client API header, located at third_party/valgrind/valgrind.h
- This is release under the BSD license.
-
- Valgrind client API header, located at third_party/valgrind/valgrind.h
This is release under the BSD license.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index e1a37f34f5..b5aa7abadb 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -238,6 +238,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 1a3177bb0d..adb9f43176 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -130,6 +130,7 @@ class Data;
namespace internal {
class Object;
+class Arguments;
}
@@ -1205,7 +1206,14 @@ class V8EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- Handle<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+ Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+
+ /**
+ * If result.IsEmpty() no real property was located on the object or
+ * in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ Local<Value> GetRealNamedProperty(Handle<String> key);
/** Tests for a named lookup interceptor.*/
bool HasNamedLookupInterceptor();
@@ -1401,17 +1409,13 @@ class V8EXPORT Arguments {
*/
class V8EXPORT AccessorInfo {
public:
- inline AccessorInfo(Local<Object> self,
- Local<Value> data,
- Local<Object> holder)
- : self_(self), data_(data), holder_(holder) { }
+ inline AccessorInfo(internal::Object** args)
+ : args_(args) { }
inline Local<Value> Data() const;
inline Local<Object> This() const;
inline Local<Object> Holder() const;
private:
- Local<Object> self_;
- Local<Value> data_;
- Local<Object> holder_;
+ internal::Object** args_;
};
@@ -1567,7 +1571,10 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
/**
* A FunctionTemplate is used to create functions at runtime. There
* can only be one function created from a FunctionTemplate in a
- * context.
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
*
* A FunctionTemplate can have properties, these properties are added to the
* function object when it is created.
@@ -1974,8 +1981,13 @@ Handle<Boolean> V8EXPORT False();
/**
- * A set of constraints that specifies the limits of the runtime's
- * memory use.
+ * A set of constraints that specifies the limits of the runtime's memory use.
+ * You must set the heap size before initializing the VM - the size cannot be
+ * adjusted after the VM is initialized.
+ *
+ * If you are using threads then you should hold the V8::Locker lock while
+ * setting the stack limit and you must set a non-default stack limit separately
+ * for each thread.
*/
class V8EXPORT ResourceConstraints {
public:
@@ -1985,6 +1997,7 @@ class V8EXPORT ResourceConstraints {
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
+ // Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
private:
int max_young_space_size_;
@@ -2192,7 +2205,8 @@ class V8EXPORT V8 {
/**
* Initializes from snapshot if possible. Otherwise, attempts to
- * initialize from scratch.
+ * initialize from scratch. This function is called implicitly if
+ * you use the API without calling it first.
*/
static bool Initialize();
@@ -2335,12 +2349,11 @@ class V8EXPORT V8 {
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
- * \param is_high_priority tells whether the embedder is high priority.
* Returns true if the embedder should stop calling IdleNotification
* until real work has been done. This indicates that V8 has done
* as much cleanup as it will be able to do.
*/
- static bool IdleNotification(bool is_high_priority);
+ static bool IdleNotification();
/**
* Optional notification that the system is running low on memory.
@@ -2742,15 +2755,15 @@ class Internals {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
-
+
static inline bool HasSmiTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
}
-
+
static inline int SmiValue(internal::Object* value) {
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
}
-
+
static inline bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
@@ -2863,21 +2876,6 @@ int Arguments::Length() const {
}
-Local<Value> AccessorInfo::Data() const {
- return data_;
-}
-
-
-Local<Object> AccessorInfo::This() const {
- return self_;
-}
-
-
-Local<Object> AccessorInfo::Holder() const {
- return holder_;
-}
-
-
template <class T>
Local<T> HandleScope::Close(Handle<T> value) {
internal::Object** before = reinterpret_cast<internal::Object**>(*value);
@@ -3075,6 +3073,21 @@ External* External::Cast(v8::Value* value) {
}
+Local<Value> AccessorInfo::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[-3]));
+}
+
+
+Local<Object> AccessorInfo::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[0]));
+}
+
+
+Local<Object> AccessorInfo::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[-1]));
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 423064782a..b6c2b4d266 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -36,49 +36,48 @@ Import('context')
SOURCES = {
'all': [
'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
- 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
- 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
- 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
- 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
- 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
- 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
- 'hashmap.cc', 'heap.cc', 'heap-profiler.cc', 'ic.cc',
- 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
- 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
- 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
- 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
- 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc',
- 'runtime.cc', 'scanner.cc', 'scopeinfo.cc', 'scopes.cc',
- 'serialize.cc', 'snapshot-common.cc', 'spaces.cc',
- 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+ 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
+ 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
+ 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
+ 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+ 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
+ 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+ 'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+ 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
+ 'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
+ 'property.cc', 'regexp-macro-assembler.cc',
+ 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+ 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+ 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+ 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
- 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
- 'arm/codegen-arm.cc', 'arm/constants-arm.cc', 'arm/cpu-arm.cc',
- 'arm/disasm-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc',
- 'arm/ic-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
- 'arm/regexp-macro-assembler-arm.cc',
- 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
- 'arm/virtual-frame-arm.cc'
+ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
+ 'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
+ 'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
+ 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
+ 'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
+ 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
- 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
- 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
+ 'ia32/regexp-macro-assembler-ia32.cc',
+ 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+ 'ia32/virtual-frame-ia32.cc'
],
'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
- 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
- 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
- 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
- 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
- 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
+ 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
+ 'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
+ 'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
+ 'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
+ 'x64/virtual-frame-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index eaa4f5a45e..00f1e0b7e1 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
@@ -71,7 +72,7 @@ namespace v8 {
thread_local.DecrementCallDepth(); \
if (has_pending_exception) { \
if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
- if (!thread_local.IgnoreOutOfMemory()) \
+ if (!thread_local.ignore_out_of_memory()) \
i::V8::FatalProcessOutOfMemory(NULL); \
} \
bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
@@ -341,9 +342,12 @@ ResourceConstraints::ResourceConstraints()
bool SetResourceConstraints(ResourceConstraints* constraints) {
- bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
- constraints->max_old_space_size());
- if (!result) return false;
+ int semispace_size = constraints->max_young_space_size();
+ int old_gen_size = constraints->max_old_space_size();
+ if (semispace_size != 0 || old_gen_size != 0) {
+ bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+ if (!result) return false;
+ }
if (constraints->stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
i::StackGuard::SetStackLimit(limit);
@@ -1898,6 +1902,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::Set()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -1918,6 +1923,7 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::ForceSet()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -1936,6 +1942,7 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::ForceDelete()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE();
@@ -2121,7 +2128,7 @@ bool v8::Object::HasIndexedLookupInterceptor() {
}
-Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
return Local<Value>());
@@ -2142,12 +2149,32 @@ Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
}
+Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
+ ENTER_V8;
+ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::LookupResult lookup;
+ self_obj->LookupRealNamedProperty(*key_obj, &lookup);
+ if (lookup.IsValid()) {
+ PropertyAttributes attributes;
+ i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+ &lookup,
+ *key_obj,
+ &attributes));
+ return Utils::ToLocal(result);
+ }
+ return Local<Value>(); // No real property was found in prototype chain.
+}
+
+
// Turns on access checks by copying the map and setting the check flag.
// Because the object gets a new map, existing inline cache caching
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Map> new_map =
@@ -2177,6 +2204,7 @@ Local<v8::Object> v8::Object::Clone() {
int v8::Object::GetIdentityHash() {
ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
@@ -2206,6 +2234,7 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
@@ -2245,6 +2274,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ON_BAILOUT("v8::DeleteHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
@@ -2259,6 +2289,7 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT("v8::SetElementsToPixelData()", return);
ENTER_V8;
+ HandleScope scope;
if (!ApiCheck(i::Smi::IsValid(length),
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
@@ -2419,20 +2450,14 @@ int String::Write(uint16_t* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlattenIfNotFlat();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
- int i;
- for (i = 0; i < end; i++)
- buffer[i] = write_input_buffer.GetNext();
- if (length == -1 || i < length)
- buffer[i] = '\0';
- return i;
+ i::String::WriteToFlat(*str, buffer, start, end);
+ if (length == -1 || end < length)
+ buffer[end] = '\0';
+ return end;
}
@@ -2577,9 +2602,11 @@ bool v8::V8::Dispose() {
}
-bool v8::V8::IdleNotification(bool is_high_priority) {
- if (!i::V8::IsRunning()) return false;
- return i::V8::IdleNotification(is_high_priority);
+bool v8::V8::IdleNotification() {
+ // Returning true tells the caller that it need not
+ // continue to call IdleNotification.
+ if (!i::V8::IsRunning()) return true;
+ return i::V8::IdleNotification();
}
@@ -2740,7 +2767,9 @@ v8::Local<v8::Context> Context::GetCurrent() {
v8::Local<v8::Context> Context::GetCalling() {
if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
- i::Handle<i::Context> context(i::Top::GetCallingGlobalContext());
+ i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+ if (calling.is_null()) return Local<Context>();
+ i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
}
@@ -3187,7 +3216,7 @@ Local<Integer> v8::Integer::New(int32_t value) {
void V8::IgnoreOutOfMemoryException() {
- thread_local.SetIgnoreOutOfMemory(true);
+ thread_local.set_ignore_out_of_memory(true);
}
@@ -3669,6 +3698,11 @@ HandleScopeImplementer* HandleScopeImplementer::instance() {
}
+void HandleScopeImplementer::FreeThreadResources() {
+ thread_local.Free();
+}
+
+
char* HandleScopeImplementer::ArchiveThread(char* storage) {
return thread_local.ArchiveThreadHelper(storage);
}
@@ -3680,7 +3714,7 @@ char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
- Initialize();
+ ResetAfterArchive();
current->Initialize();
return storage + ArchiveSpacePerThread();
@@ -3706,14 +3740,14 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
// Iterate over all handles in the blocks except for the last.
- for (int i = Blocks()->length() - 2; i >= 0; --i) {
- Object** block = Blocks()->at(i);
+ for (int i = blocks()->length() - 2; i >= 0; --i) {
+ Object** block = blocks()->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
}
// Iterate over live handles in the last block (if any).
- if (!Blocks()->is_empty()) {
- v->VisitPointers(Blocks()->last(), handle_scope_data_.next);
+ if (!blocks()->is_empty()) {
+ v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
if (!saved_contexts_.is_empty()) {
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 9ae6307b4d..1221f352cc 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -311,20 +311,12 @@ class HandleScopeImplementer {
public:
HandleScopeImplementer()
- : blocks(0),
+ : blocks_(0),
entered_contexts_(0),
- saved_contexts_(0) {
- Initialize();
- }
-
- void Initialize() {
- blocks.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
- spare = NULL;
- ignore_out_of_memory = false;
- call_depth = 0;
- }
+ saved_contexts_(0),
+ spare_(NULL),
+ ignore_out_of_memory_(false),
+ call_depth_(0) { }
static HandleScopeImplementer* instance();
@@ -332,6 +324,7 @@ class HandleScopeImplementer {
static int ArchiveSpacePerThread();
static char* RestoreThread(char* from);
static char* ArchiveThread(char* to);
+ static void FreeThreadResources();
// Garbage collection support.
static void Iterate(v8::internal::ObjectVisitor* v);
@@ -341,9 +334,9 @@ class HandleScopeImplementer {
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions);
- inline void IncrementCallDepth() {call_depth++;}
- inline void DecrementCallDepth() {call_depth--;}
- inline bool CallDepthIsZero() { return call_depth == 0; }
+ inline void IncrementCallDepth() {call_depth_++;}
+ inline void DecrementCallDepth() {call_depth_--;}
+ inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Object> context);
inline bool LeaveLastContext();
@@ -356,20 +349,44 @@ class HandleScopeImplementer {
inline Context* RestoreContext();
inline bool HasSavedContexts();
- inline List<internal::Object**>* Blocks() { return &blocks; }
-
- inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
- inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+ inline List<internal::Object**>* blocks() { return &blocks_; }
+ inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+ inline void set_ignore_out_of_memory(bool value) {
+ ignore_out_of_memory_ = value;
+ }
private:
- List<internal::Object**> blocks;
- Object** spare;
- int call_depth;
+ void ResetAfterArchive() {
+ blocks_.Initialize(0);
+ entered_contexts_.Initialize(0);
+ saved_contexts_.Initialize(0);
+ spare_ = NULL;
+ ignore_out_of_memory_ = false;
+ call_depth_ = 0;
+ }
+
+ void Free() {
+ ASSERT(blocks_.length() == 0);
+ ASSERT(entered_contexts_.length() == 0);
+ ASSERT(saved_contexts_.length() == 0);
+ blocks_.Free();
+ entered_contexts_.Free();
+ saved_contexts_.Free();
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
+ }
+ ASSERT(call_depth_ == 0);
+ }
+
+ List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
- bool ignore_out_of_memory;
+ Object** spare_;
+ bool ignore_out_of_memory_;
+ int call_depth_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
@@ -419,32 +436,32 @@ Handle<Object> HandleScopeImplementer::LastEnteredContext() {
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare != NULL) ?
- spare :
+ internal::Object** block = (spare_ != NULL) ?
+ spare_ :
NewArray<internal::Object*>(kHandleBlockSize);
- spare = NULL;
+ spare_ = NULL;
return block;
}
void HandleScopeImplementer::DeleteExtensions(int extensions) {
- if (spare != NULL) {
- DeleteArray(spare);
- spare = NULL;
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
}
for (int i = extensions; i > 1; --i) {
- internal::Object** block = blocks.RemoveLast();
+ internal::Object** block = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]);
#endif
DeleteArray(block);
}
- spare = blocks.RemoveLast();
+ spare_ = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(
- spare,
- &spare[kHandleBlockSize]);
+ spare_,
+ &spare_[kHandleBlockSize]);
#endif
}
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 80f90063ba..d2f1bfce54 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -45,6 +45,9 @@ namespace internal {
class Arguments BASE_EMBEDDED {
public:
+ Arguments(int length, Object** arguments)
+ : length_(length), arguments_(arguments) { }
+
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
return arguments_[-index];
@@ -61,11 +64,34 @@ class Arguments BASE_EMBEDDED {
// Get the total number of arguments including the receiver.
int length() const { return length_; }
+ Object** arguments() { return arguments_; }
+
private:
int length_;
Object** arguments_;
};
+
+// Cursom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+class CustomArguments : public Relocatable {
+ public:
+ inline CustomArguments(Object *data,
+ JSObject *self,
+ JSObject *holder) {
+ values_[3] = self;
+ values_[2] = holder;
+ values_[1] = Smi::FromInt(0);
+ values_[0] = data;
+ }
+ void IterateInstance(ObjectVisitor* v);
+ Object** end() { return values_ + 3; }
+ private:
+ Object* values_[4];
+};
+
+
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index cd5a1bbfd7..5417ed7d36 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -81,7 +81,13 @@ void RelocInfo::set_target_address(Address target) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 7e43f2e5da..d1df08c571 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -645,8 +645,8 @@ class Assembler : public Malloced {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
}
- void pop(Register dst) {
- ldr(dst, MemOperand(sp, 4, PostIndex), al);
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
}
void pop() {
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index cdea1cbf6d..d7afb37af1 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -44,15 +44,379 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ str(r1, MemOperand(ip, 0));
// The actual argument count has already been loaded into register
- // r0, but JumpToBuiltin expects r0 to contain the number of
+ // r0, but JumpToRuntime expects r0 to contain the number of
// arguments including the receiver.
__ add(r0, r0, Operand(1));
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ ldr(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, Operand(0));
+ __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, MemOperand(result, JSArray::kSize));
+ __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ __ mov(scratch3, Operand(initial_capacity));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ ldr(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ tst(array_size, array_size);
+ __ b(nz, &not_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ mov(elements_array_end,
+ Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+ __ add(elements_array_end,
+ elements_array_end,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ AllocateInNewSpace(elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ add(elements_array_storage, result, Operand(JSArray::kSize));
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ // Convert array_size from smi to value.
+ __ mov(array_size,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ tst(array_size, array_size);
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is not stored as a smi.
+ __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(array_size,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: size of elements array
+ __ add(elements_array_end,
+ elements_array_storage,
+ Operand(array_size, LSL, kPointerSizeLog2));
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ str(scratch1,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(elements_array_storage, elements_array_end);
+ __ b(lt, &loop);
+ }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// r0: argc
+// r1: constructor (built-in Array function)
+// lr: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ cmp(r0, Operand(0));
+ __ b(ne, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(r0, r2);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmp(r0, Operand(1));
+ __ b(ne, &argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
+ __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+ __ b(ne, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+ __ b(ge, call_generic_code);
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(r0, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Jump(lr);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store therefore PreIndex is
+ // used when filling the backing store.
+ // r0: argc
+ // r3: JSArray
+ // r4: elements_array storage start (untagged)
+ // r5: elements_array_end (untagged)
+ // sp[0]: last argument
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ cmp(r4, r5);
+ __ b(lt, &loop);
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // r0: argc
+ // r3: JSArray
+ // sp[0]: receiver
+ __ add(sp, sp, Operand(kPointerSize));
+ __ mov(r0, r3);
+ __ Jump(lr);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // Just jump to the generic array code.
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, r1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
@@ -60,7 +424,34 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // Just jump to the generic construct code.
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // always have a map.
+ GenerateLoadArrayFunction(masm, r2);
+ __ cmp(r1, r2);
+ __ Assert(eq, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -149,7 +540,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -220,12 +611,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// r5: start of next object
// r7: undefined
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateObjectInNewSpace(r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// r1: constructor
diff --git a/deps/v8/src/arm/cfg-arm.cc b/deps/v8/src/arm/cfg-arm.cc
deleted file mode 100644
index e0e563cd87..0000000000
--- a/deps/v8/src/arm/cfg-arm.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-arm.h" // Include after codegen-inl.h.
-#include "macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- StackCheckStub stub;
- __ CallStub(&stub);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ add(sp, sp, Operand((count + 1) * kPointerSize));
- __ Jump(lr);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
-
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Discard key and receiver.
- __ add(sp, sp, Operand(2 * kPointerSize));
- } else {
- key()->Get(masm, r2);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(); // Discard receiver.
- }
- location()->Set(masm, r0);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Move left to r1 and right to r0.
- left()->Get(masm, r1);
- right()->Get(masm, r0);
- GenericBinaryOpStub stub(op(), mode);
- __ CallStub(&stub);
- location()->Set(masm, r0);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, r0);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Operand(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ mov(ip, Operand(handle_));
- __ push(ip);
-}
-
-
-static MemOperand ToMemOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return MemOperand(fp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return MemOperand(r0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ip, Operand(handle_));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ ldr(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ str(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ ldr(ip, ToMemOperand(this));
- __ push(ip); // Push will not destroy ip.
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // Double dispatch.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ ldr(ip, ToMemOperand(this));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(reg, r0);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(r0, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(r0);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, r0);
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ str(r0, ToMemOperand(loc));
- case STACK:
- __ pop(ip);
- __ str(ip, ToMemOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 477ea0519b..cdd32f30f8 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1188,7 +1188,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -2811,7 +2810,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -2909,13 +2907,11 @@ void CodeGenerator::VisitCall(Call* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
// Standard function call.
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
@@ -2928,7 +2924,56 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ // Prepare stack for call to resolved function.
+ LoadAndSpill(function);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPush(r2); // Slot for receiver
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Prepare stack for call to ResolvePossiblyDirectEval.
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+ frame_->EmitPush(r1);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
+ } else {
+ frame_->EmitPush(r2);
+ }
+
+ // Resolve the call.
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up stack with the right values for the function and the receiver.
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+ __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ frame_->CallStub(&call_function, arg_count + 1);
+
+ __ ldr(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->Drop();
+ frame_->EmitPush(r0);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -3053,72 +3098,12 @@ void CodeGenerator::VisitCall(Call* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare stack for call to resolved function.
- LoadAndSpill(function);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2); // Slot for receiver
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Prepare stack for call to ResolvePossiblyDirectEval.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up stack with the right values for the function and the receiver.
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -4960,12 +4945,12 @@ static void AllocateHeapNumber(
Register scratch2) { // Another scratch register.
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Get heap number map and store it in the allocated object.
__ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
@@ -5076,11 +5061,14 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// r5: Address of heap number for result.
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
+ __ AlignStack(0);
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
__ Call(r5);
+ __ pop(r4); // Address of heap number.
+ __ cmp(r4, Operand(Smi::FromInt(0)));
+ __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
// Store answer in the overwritable heap number.
- __ pop(r4);
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index b28e96594f..1eb0932eb6 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -370,7 +370,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 6dd9b8faab..45c6540eeb 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -291,27 +291,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Align the stack at this point. After this point we have 5 pushes,
// so in fact we have to unalign here! See also the assert on the
- // alignment immediately below.
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so we will always align at
- // this point here.
- int activation_frame_alignment = 2 * kPointerSize;
-#endif // defined(V8_HOST_ARCH_ARM)
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand(activation_frame_alignment - 1));
- push(r7, eq); // Conditional push instruction.
- }
+ // alignment in AlignStack.
+ AlignStack(1);
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@@ -343,6 +324,30 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_ARM)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so we will always align at
+ // this point here.
+ int activation_frame_alignment = 2 * kPointerSize;
+#endif // defined(V8_HOST_ARCH_ARM)
+ if (activation_frame_alignment != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(activation_frame_alignment == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(activation_frame_alignment - offset));
+ push(r7, eq); // Conditional push instruction.
+ }
+}
+
+
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
@@ -763,12 +768,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
@@ -813,12 +818,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
@@ -1001,11 +1006,11 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@@ -1046,7 +1051,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
@@ -1064,7 +1068,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 03aa4d0c2b..ee9d70d310 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -96,6 +96,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(StackFrame::Type type);
+ // Align the stack by optionally pushing a Smi zero.
+ void AlignStack(int offset);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -171,18 +173,18 @@ class MacroAssembler: public Assembler {
// bytes). If the new space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
@@ -257,14 +259,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& builtin);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -329,8 +331,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 70dfcd2a9d..22bec82201 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -409,7 +409,7 @@ void Simulator::Initialize() {
Simulator::Simulator() {
- ASSERT(initialized_);
+ Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -501,6 +501,7 @@ void* Simulator::RedirectExternalReference(void* external_function,
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
+ Initialize();
Simulator* sim = reinterpret_cast<Simulator*>(
v8::internal::Thread::GetThreadLocal(simulator_key));
if (sim == NULL) {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 3917d6a5af..ff6bbf4302 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -36,18 +36,23 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
+#include "allocation.h"
+
#if defined(__arm__)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) - limit)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
@@ -64,12 +69,6 @@
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (assembler::arm::Simulator::current()->StackLimit())
-
-
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
@@ -219,6 +218,20 @@ class Simulator {
} } // namespace assembler::arm
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return assembler::arm::Simulator::current()->StackLimit();
+ }
+};
+
+
#endif // defined(__arm__)
#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 9e44cfa510..8282655f7a 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1390,12 +1390,12 @@ Object* ConstructStubCompiler::CompileConstructStub(
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3,
+ r4,
+ r5,
+ r6,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index eb69f97c18..f8e63d084b 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -709,6 +709,8 @@ function ArraySort(comparefn) {
QuickSort(a, high_start, to);
}
+ var length;
+
// Copies elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Returns one more than the maximal index
// of a prototype property.
@@ -826,7 +828,7 @@ function ArraySort(comparefn) {
return first_undefined;
}
- var length = ToUint32(this.length);
+ length = ToUint32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 827389a1b6..323e06aff5 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -191,6 +191,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Address target_address());
INLINE(void set_target_address(Address target));
INLINE(Object* target_object());
+ INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target));
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 2b6074200f..692bec01df 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -40,7 +40,6 @@ VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
Call Call::sentinel_(NULL, NULL, 0);
-CallEval CallEval::sentinel_(NULL, NULL, 0);
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index ea83712137..6a1cdf51c6 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -85,7 +85,6 @@ namespace internal {
V(Throw) \
V(Property) \
V(Call) \
- V(CallEval) \
V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
@@ -116,7 +115,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject {
public:
- AstNode(): statement_pos_(RelocInfo::kNoPosition) { }
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@@ -140,21 +138,23 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
};
class Statement: public AstNode {
public:
+ Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
bool IsEmpty() { return AsEmptyStatement() != NULL; }
+
+ void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
+ int statement_pos() const { return statement_pos_; }
+
+ private:
+ int statement_pos_;
};
@@ -954,12 +954,8 @@ class Property: public Expression {
class Call: public Expression {
public:
- Call(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : expression_(expression),
- arguments_(arguments),
- pos_(pos) { }
+ Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v);
@@ -981,30 +977,21 @@ class Call: public Expression {
};
-class CallNew: public Call {
+class CallNew: public Expression {
public:
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : Call(expression, arguments, pos) { }
-
- virtual void Accept(AstVisitor* v);
-};
-
-
-// The CallEval class represents a call of the form 'eval(...)' where eval
-// cannot be seen to be overwritten at compile time. It is potentially a
-// direct (i.e. not aliased) eval call. The real nature of the call is
-// determined at runtime.
-class CallEval: public Call {
- public:
- CallEval(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : Call(expression, arguments, pos) { }
+ : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v);
- static CallEval* sentinel() { return &sentinel_; }
+ Expression* expression() const { return expression_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+ int position() { return pos_; }
private:
- static CallEval sentinel_;
+ Expression* expression_;
+ ZoneList<Expression*>* arguments_;
+ int pos_;
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 5f38485ed7..43aa1a3b89 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -201,20 +201,13 @@ bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
}
Code* code = Code::cast(code_[i]);
Address pc = code->instruction_start() + pc_[i];
- bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags);
+ RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
-
if (use_code_object) {
- if (is_pc_relative) {
- Assembler::set_target_address_at(
- pc, reinterpret_cast<Address>(f->code()));
- } else {
- *reinterpret_cast<Object**>(pc) = f->code();
- }
+ target.set_target_object(f->code());
} else {
- Assembler::set_target_address_at(pc, f->code()->instruction_start());
+ target.set_target_address(f->code()->instruction_start());
}
-
LOG(StringEvent("resolved", name));
}
Clear();
@@ -1586,6 +1579,12 @@ char* Bootstrapper::RestoreState(char* from) {
}
+// Called when the top-level V8 mutex is destroyed.
+void Bootstrapper::FreeThreadResources() {
+ ASSERT(Genesis::current() == NULL);
+}
+
+
// Reserve space for statics needing saving and restoring.
int Genesis::ArchiveSpacePerThread() {
return sizeof(current_);
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 0d743e388f..15fc88dc06 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -66,14 +66,14 @@ class Bootstrapper : public AllStatic {
static bool IsActive();
// Encoding/decoding support for fixup flags.
- class FixupFlagsIsPCRelative: public BitField<bool, 0, 1> {};
- class FixupFlagsUseCodeObject: public BitField<bool, 1, 1> {};
- class FixupFlagsArgumentsCount: public BitField<uint32_t, 2, 32-2> {};
+ class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
+ class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
+ static void FreeThreadResources();
};
}} // namespace v8::internal
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 5fe4ba9a02..afb54275e6 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -172,7 +172,9 @@ BUILTIN(ArrayCodeGeneric) {
}
// Optimize the case where there are no parameters passed.
- if (args.length() == 1) return array->Initialize(4);
+ if (args.length() == 1) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
// Take the arguments as elements.
int number_of_elements = args.length() - 1;
diff --git a/deps/v8/src/cfg.cc b/deps/v8/src/cfg.cc
deleted file mode 100644
index d2dff522b5..0000000000
--- a/deps/v8/src/cfg.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "cfg.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-CfgGlobals* CfgGlobals::top_ = NULL;
-
-
-CfgGlobals::CfgGlobals(FunctionLiteral* fun)
- : global_fun_(fun),
- global_exit_(new ExitNode()),
- nowhere_(new Nowhere()),
-#ifdef DEBUG
- node_counter_(0),
- temp_counter_(0),
-#endif
- previous_(top_) {
- top_ = this;
-}
-
-
-#define BAILOUT(reason) \
- do { return NULL; } while (false)
-
-Cfg* Cfg::Build() {
- FunctionLiteral* fun = CfgGlobals::current()->fun();
- if (fun->scope()->num_heap_slots() > 0) {
- BAILOUT("function has context slots");
- }
- if (fun->scope()->num_stack_slots() > kBitsPerPointer) {
- BAILOUT("function has too many locals");
- }
- if (fun->scope()->num_parameters() > kBitsPerPointer - 1) {
- BAILOUT("function has too many parameters");
- }
- if (fun->scope()->arguments() != NULL) {
- BAILOUT("function uses .arguments");
- }
-
- ZoneList<Statement*>* body = fun->body();
- if (body->is_empty()) {
- BAILOUT("empty function body");
- }
-
- StatementCfgBuilder builder;
- builder.VisitStatements(body);
- Cfg* graph = builder.graph();
- if (graph == NULL) {
- BAILOUT("unsupported statement type");
- }
- if (graph->is_empty()) {
- BAILOUT("function body produces empty cfg");
- }
- if (graph->has_exit()) {
- BAILOUT("control path without explicit return");
- }
- graph->PrependEntryNode();
- return graph;
-}
-
-#undef BAILOUT
-
-
-void Cfg::PrependEntryNode() {
- ASSERT(!is_empty());
- entry_ = new EntryNode(InstructionBlock::cast(entry()));
-}
-
-
-void Cfg::Append(Instruction* instr) {
- ASSERT(is_empty() || has_exit());
- if (is_empty()) {
- entry_ = exit_ = new InstructionBlock();
- }
- InstructionBlock::cast(exit_)->Append(instr);
-}
-
-
-void Cfg::AppendReturnInstruction(Value* value) {
- Append(new ReturnInstr(value));
- ExitNode* global_exit = CfgGlobals::current()->exit();
- InstructionBlock::cast(exit_)->set_successor(global_exit);
- exit_ = NULL;
-}
-
-
-void Cfg::Concatenate(Cfg* other) {
- ASSERT(is_empty() || has_exit());
- if (other->is_empty()) return;
-
- if (is_empty()) {
- entry_ = other->entry();
- exit_ = other->exit();
- } else {
- // We have a pair of nonempty fragments and this has an available exit.
- // Destructively glue the fragments together.
- InstructionBlock* first = InstructionBlock::cast(exit_);
- InstructionBlock* second = InstructionBlock::cast(other->entry());
- first->instructions()->AddAll(*second->instructions());
- if (second->successor() != NULL) {
- first->set_successor(second->successor());
- exit_ = other->exit();
- }
- }
-}
-
-
-void InstructionBlock::Unmark() {
- if (is_marked_) {
- is_marked_ = false;
- successor_->Unmark();
- }
-}
-
-
-void EntryNode::Unmark() {
- if (is_marked_) {
- is_marked_ = false;
- successor_->Unmark();
- }
-}
-
-
-void ExitNode::Unmark() {
- is_marked_ = false;
-}
-
-
-Handle<Code> Cfg::Compile(Handle<Script> script) {
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler* masm = new MacroAssembler(NULL, kInitialBufferSize);
- entry()->Compile(masm);
- entry()->Unmark();
- CodeDesc desc;
- masm->GetCode(&desc);
- FunctionLiteral* fun = CfgGlobals::current()->fun();
- ZoneScopeInfo info(fun->scope());
- InLoopFlag in_loop = fun->loop_nesting() ? IN_LOOP : NOT_IN_LOOP;
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- Handle<Code> code = Factory::NewCode(desc, &info, flags, masm->CodeObject());
-
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, masm);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code) {
- // Print the source code if available.
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(fun->start_position());
- // fun->end_position() points to the last character in the
- // stream. We need to compensate by adding one to calculate the
- // length.
- int source_len = fun->end_position() - fun->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- PrintF("--- Code ---\n");
- code->Disassemble(*fun->name()->ToCString());
- }
-#endif
-
- return code;
-}
-
-
-void ZeroOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where(TempLocation::STACK);
-}
-
-
-void OneOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where((temp == value_)
- ? TempLocation::ACCUMULATOR
- : TempLocation::STACK);
-}
-
-
-void TwoOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where((temp == value0_ || temp == value1_)
- ? TempLocation::ACCUMULATOR
- : TempLocation::STACK);
-}
-
-
-void PositionInstr::Compile(MacroAssembler* masm) {
- if (FLAG_debug_info && pos_ != RelocInfo::kNoPosition) {
- masm->RecordStatementPosition(pos_);
- masm->RecordPosition(pos_);
- }
-}
-
-
-void MoveInstr::Compile(MacroAssembler* masm) {
- location()->Move(masm, value());
-}
-
-
-// The expression builder should not be used for declarations or statements.
-void ExpressionCfgBuilder::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-#define DEFINE_VISIT(type) \
- void ExpressionCfgBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
-STATEMENT_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-// Macros (temporarily) handling unsupported expression types.
-#define BAILOUT(reason) \
- do { \
- graph_ = NULL; \
- return; \
- } while (false)
-
-void ExpressionCfgBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- BAILOUT("FunctionLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
-}
-
-
-void ExpressionCfgBuilder::VisitSlot(Slot* expr) {
- BAILOUT("Slot");
-}
-
-
-void ExpressionCfgBuilder::VisitVariableProxy(VariableProxy* expr) {
- Expression* rewrite = expr->var()->rewrite();
- if (rewrite == NULL || rewrite->AsSlot() == NULL) {
- BAILOUT("unsupported variable (not a slot)");
- }
- Slot* slot = rewrite->AsSlot();
- if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
- BAILOUT("unsupported slot type (not a parameter or local)");
- }
- // Ignore the passed destination.
- value_ = new SlotLocation(slot->type(), slot->index());
-}
-
-
-void ExpressionCfgBuilder::VisitLiteral(Literal* expr) {
- // Ignore the passed destination.
- value_ = new Constant(expr->handle());
-}
-
-
-void ExpressionCfgBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- BAILOUT("RegExpLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- BAILOUT("ArrayLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
- if (expr->op() != Token::ASSIGN && expr->op() != Token::INIT_VAR) {
- BAILOUT("unsupported compound assignment");
- }
- Expression* lhs = expr->target();
- if (lhs->AsProperty() != NULL) {
- BAILOUT("unsupported property assignment");
- }
-
- Variable* var = lhs->AsVariableProxy()->AsVariable();
- if (var == NULL) {
- BAILOUT("unsupported invalid left-hand side");
- }
- if (var->is_global()) {
- BAILOUT("unsupported global variable");
- }
- Slot* slot = var->slot();
- ASSERT(slot != NULL);
- if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
- BAILOUT("unsupported slot lhs (not a parameter or local)");
- }
-
- // Parameter and local slot assignments.
- ExpressionCfgBuilder builder;
- SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
- builder.Build(expr->value(), loc);
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in assignment");
- }
- // If the expression did not come back in the slot location, append
- // a move to the CFG.
- graph_ = builder.graph();
- if (builder.value() != loc) {
- graph()->Append(new MoveInstr(loc, builder.value()));
- }
- // Record the assignment.
- assigned_vars_.AddElement(loc);
- // Ignore the destination passed to us.
- value_ = loc;
-}
-
-
-void ExpressionCfgBuilder::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
-}
-
-
-void ExpressionCfgBuilder::VisitProperty(Property* expr) {
- ExpressionCfgBuilder object, key;
- object.Build(expr->obj(), NULL);
- if (object.graph() == NULL) {
- BAILOUT("unsupported object subexpression in propload");
- }
- key.Build(expr->key(), NULL);
- if (key.graph() == NULL) {
- BAILOUT("unsupported key subexpression in propload");
- }
-
- if (destination_ == NULL) destination_ = new TempLocation();
-
- graph_ = object.graph();
- // Insert a move to a fresh temporary if the object value is in a slot
- // that's assigned in the key.
- Location* temp = NULL;
- if (object.value()->is_slot() &&
- key.assigned_vars()->Contains(SlotLocation::cast(object.value()))) {
- temp = new TempLocation();
- graph()->Append(new MoveInstr(temp, object.value()));
- }
- graph()->Concatenate(key.graph());
- graph()->Append(new PropLoadInstr(destination_,
- temp == NULL ? object.value() : temp,
- key.value()));
-
- assigned_vars_ = *object.assigned_vars();
- assigned_vars()->Union(key.assigned_vars());
-
- value_ = destination_;
-}
-
-
-void ExpressionCfgBuilder::VisitCall(Call* expr) {
- BAILOUT("Call");
-}
-
-
-void ExpressionCfgBuilder::VisitCallEval(CallEval* expr) {
- BAILOUT("CallEval");
-}
-
-
-void ExpressionCfgBuilder::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
-}
-
-
-void ExpressionCfgBuilder::VisitCallRuntime(CallRuntime* expr) {
- BAILOUT("CallRuntime");
-}
-
-
-void ExpressionCfgBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- Token::Value op = expr->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- BAILOUT("unsupported binary operation");
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- ExpressionCfgBuilder left, right;
- left.Build(expr->left(), NULL);
- if (left.graph() == NULL) {
- BAILOUT("unsupported left subexpression in binop");
- }
- right.Build(expr->right(), NULL);
- if (right.graph() == NULL) {
- BAILOUT("unsupported right subexpression in binop");
- }
-
- if (destination_ == NULL) destination_ = new TempLocation();
-
- graph_ = left.graph();
- // Insert a move to a fresh temporary if the left value is in a
- // slot that's assigned on the right.
- Location* temp = NULL;
- if (left.value()->is_slot() &&
- right.assigned_vars()->Contains(SlotLocation::cast(left.value()))) {
- temp = new TempLocation();
- graph()->Append(new MoveInstr(temp, left.value()));
- }
- graph()->Concatenate(right.graph());
- graph()->Append(new BinaryOpInstr(destination_, op,
- temp == NULL ? left.value() : temp,
- right.value()));
-
- assigned_vars_ = *left.assigned_vars();
- assigned_vars()->Union(right.assigned_vars());
-
- value_ = destination_;
- return;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void ExpressionCfgBuilder::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
-}
-
-#undef BAILOUT
-
-
-// Macros (temporarily) handling unsupported statement types.
-#define BAILOUT(reason) \
- do { \
- graph_ = NULL; \
- return; \
- } while (false)
-
-#define CHECK_BAILOUT() \
- if (graph() == NULL) { return; } else {}
-
-void StatementCfgBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT();
- if (!graph()->has_exit()) return;
- }
-}
-
-
-// The statement builder should not be used for declarations or expressions.
-void StatementCfgBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
-
-#define DEFINE_VISIT(type) \
- void StatementCfgBuilder::Visit##type(type* expr) { UNREACHABLE(); }
-EXPRESSION_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-void StatementCfgBuilder::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void StatementCfgBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- ExpressionCfgBuilder builder;
- builder.Build(stmt->expression(), CfgGlobals::current()->nowhere());
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in expression statement");
- }
- graph()->Append(new PositionInstr(stmt->statement_pos()));
- graph()->Concatenate(builder.graph());
-}
-
-
-void StatementCfgBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void StatementCfgBuilder::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
-}
-
-
-void StatementCfgBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- BAILOUT("ContinueStatement");
-}
-
-
-void StatementCfgBuilder::VisitBreakStatement(BreakStatement* stmt) {
- BAILOUT("BreakStatement");
-}
-
-
-void StatementCfgBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- ExpressionCfgBuilder builder;
- builder.Build(stmt->expression(), NULL);
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in return statement");
- }
-
- graph()->Append(new PositionInstr(stmt->statement_pos()));
- graph()->Concatenate(builder.graph());
- graph()->AppendReturnInstruction(builder.value());
-}
-
-
-void StatementCfgBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void StatementCfgBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
-
-
-void StatementCfgBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void StatementCfgBuilder::VisitLoopStatement(LoopStatement* stmt) {
- BAILOUT("LoopStatement");
-}
-
-
-void StatementCfgBuilder::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void StatementCfgBuilder::VisitTryCatch(TryCatch* stmt) {
- BAILOUT("TryCatch");
-}
-
-
-void StatementCfgBuilder::VisitTryFinally(TryFinally* stmt) {
- BAILOUT("TryFinally");
-}
-
-
-void StatementCfgBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
-}
-
-
-#ifdef DEBUG
-// CFG printing support (via depth-first, preorder block traversal).
-
-void Cfg::Print() {
- entry_->Print();
- entry_->Unmark();
-}
-
-
-void Constant::Print() {
- PrintF("Constant ");
- handle_->Print();
-}
-
-
-void Nowhere::Print() {
- PrintF("Nowhere");
-}
-
-
-void SlotLocation::Print() {
- PrintF("Slot ");
- switch (type_) {
- case Slot::PARAMETER:
- PrintF("(PARAMETER, %d)", index_);
- break;
- case Slot::LOCAL:
- PrintF("(LOCAL, %d)", index_);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Print() {
- PrintF("Temp %d", number());
-}
-
-
-void OneOperandInstruction::Print() {
- PrintF("(");
- location()->Print();
- PrintF(", ");
- value_->Print();
- PrintF(")");
-}
-
-
-void TwoOperandInstruction::Print() {
- PrintF("(");
- location()->Print();
- PrintF(", ");
- value0_->Print();
- PrintF(", ");
- value1_->Print();
- PrintF(")");
-}
-
-
-void MoveInstr::Print() {
- PrintF("Move ");
- OneOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void PropLoadInstr::Print() {
- PrintF("PropLoad ");
- TwoOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void BinaryOpInstr::Print() {
- switch (op()) {
- case Token::OR:
- // Two character operand.
- PrintF("BinaryOp[OR] ");
- break;
- case Token::AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Three character operands.
- PrintF("BinaryOp[%s] ", Token::Name(op()));
- break;
- case Token::COMMA:
- // Five character operand.
- PrintF("BinaryOp[COMMA] ");
- break;
- case Token::BIT_OR:
- // Six character operand.
- PrintF("BinaryOp[BIT_OR] ");
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Seven character operands.
- PrintF("BinaryOp[%s] ", Token::Name(op()));
- break;
- default:
- UNREACHABLE();
- }
- TwoOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void ReturnInstr::Print() {
- PrintF("Return ");
- OneOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void InstructionBlock::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- PrintF("L%d:\n", number());
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- instructions_[i]->Print();
- }
- PrintF("Goto L%d\n\n", successor_->number());
- successor_->Print();
- }
-}
-
-
-void EntryNode::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- successor_->Print();
- }
-}
-
-
-void ExitNode::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- PrintF("L%d:\nExit\n\n", number());
- }
-}
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/cfg.h b/deps/v8/src/cfg.h
deleted file mode 100644
index 0eb0f929df..0000000000
--- a/deps/v8/src/cfg.h
+++ /dev/null
@@ -1,871 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CFG_H_
-#define V8_CFG_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-class ExitNode;
-class Location;
-
-// Translate a source AST into a control-flow graph (CFG). The CFG contains
-// single-entry, single-exit blocks of straight-line instructions and
-// administrative nodes.
-//
-// Instructions are described by the following grammar.
-//
-// <Instruction> ::=
-// Move <Location> <Value>
-// | PropLoad <Location> <Value> <Value>
-// | BinaryOp <Location> Token::Value <Value> <Value>
-// | Return Nowhere <Value>
-// | Position <Int>
-//
-// Values are trivial expressions:
-//
-// <Value> ::= Constant | <Location>
-//
-// Locations are storable values ('lvalues'). They can be slots,
-// compiler-generated temporaries, or the special location 'Nowhere'
-// indicating that no value is needed.
-//
-// <Location> ::=
-// SlotLocation Slot::Type <Index>
-// | TempLocation
-// | Nowhere
-
-
-// Administrative nodes: There are several types of 'administrative' nodes
-// that do not contain instructions and do not necessarily have a single
-// predecessor and a single successor.
-//
-// EntryNode: there is a distinguished entry node that has no predecessors
-// and a single successor.
-//
-// ExitNode: there is a distinguished exit node that has arbitrarily many
-// predecessors and no successor.
-//
-// JoinNode: join nodes have multiple predecessors and a single successor.
-//
-// BranchNode: branch nodes have a single predecessor and multiple
-// successors.
-
-
-// A convenient class to keep 'global' values when building a CFG. Since
-// CFG construction can be invoked recursively, CFG globals are stacked.
-class CfgGlobals BASE_EMBEDDED {
- public:
- explicit CfgGlobals(FunctionLiteral* fun);
-
- ~CfgGlobals() { top_ = previous_; }
-
- static CfgGlobals* current() {
- ASSERT(top_ != NULL);
- return top_;
- }
-
- // The function currently being compiled.
- FunctionLiteral* fun() { return global_fun_; }
-
- // The shared global exit node for all exits from the function.
- ExitNode* exit() { return global_exit_; }
-
- // A singleton.
- Location* nowhere() { return nowhere_; }
-
-#ifdef DEBUG
- int next_node_number() { return node_counter_++; }
- int next_temp_number() { return temp_counter_++; }
-#endif
-
- private:
- static CfgGlobals* top_;
- FunctionLiteral* global_fun_;
- ExitNode* global_exit_;
- Location* nowhere_;
-
-#ifdef DEBUG
- // Used to number nodes and temporaries when printing.
- int node_counter_;
- int temp_counter_;
-#endif
-
- CfgGlobals* previous_;
-};
-
-
-class SlotLocation;
-
-// Values represent trivial source expressions: ones with no side effects
-// and that do not require code to be generated.
-class Value : public ZoneObject {
- public:
- virtual ~Value() {}
-
- // Predicates:
-
- virtual bool is_temporary() { return false; }
- virtual bool is_slot() { return false; }
- virtual bool is_constant() { return false; }
-
- // True if the value is a temporary allocated to the stack in
- // fast-compilation mode.
- virtual bool is_on_stack() { return false; }
-
- // Support for fast-compilation mode:
-
- // Move the value into a register.
- virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
- // Push the value on the stack.
- virtual void Push(MacroAssembler* masm) = 0;
-
- // Move the value into a slot location.
- virtual void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-};
-
-
-// A compile-time constant that appeared as a literal in the source AST.
-class Constant : public Value {
- public:
- explicit Constant(Handle<Object> handle) : handle_(handle) {}
-
- // Cast accessor.
- static Constant* cast(Value* value) {
- ASSERT(value->is_constant());
- return reinterpret_cast<Constant*>(value);
- }
-
- // Accessors.
- Handle<Object> handle() { return handle_; }
-
- // Predicates.
- bool is_constant() { return true; }
-
- // Support for fast-compilation mode.
- void Get(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Handle<Object> handle_;
-};
-
-
-// Locations are values that can be stored into ('lvalues').
-class Location : public Value {
- public:
- virtual ~Location() {}
-
- // Static factory function returning the singleton nowhere location.
- static Location* Nowhere() {
- return CfgGlobals::current()->nowhere();
- }
-
- // Support for fast-compilation mode:
-
- // Assumes temporaries have been allocated.
- virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
- // Store the value in a register to the location. Assumes temporaries
- // have been allocated.
- virtual void Set(MacroAssembler* masm, Register reg) = 0;
-
- // Assumes temporaries have been allocated, and if the value is a
- // temporary it was not allocated to the stack.
- virtual void Push(MacroAssembler* masm) = 0;
-
- // Emit code to move a value into this location.
- virtual void Move(MacroAssembler* masm, Value* value) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-};
-
-
-// Nowhere is a special (singleton) location that indicates the value of a
-// computation is not needed (though its side effects are).
-class Nowhere : public Location {
- public:
- // We should not try to emit code to read Nowhere.
- void Get(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
- void Push(MacroAssembler* masm) { UNREACHABLE(); }
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { UNREACHABLE(); }
-
- // Setting Nowhere is ignored.
- void Set(MacroAssembler* masm, Register reg) {}
- void Move(MacroAssembler* masm, Value* value) {}
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Nowhere() {}
-
- friend class CfgGlobals;
-};
-
-
-// SlotLocations represent parameters and stack-allocated (i.e.,
-// non-context) local variables.
-class SlotLocation : public Location {
- public:
- SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
-
- // Cast accessor.
- static SlotLocation* cast(Value* value) {
- ASSERT(value->is_slot());
- return reinterpret_cast<SlotLocation*>(value);
- }
-
- // Accessors.
- Slot::Type type() { return type_; }
- int index() { return index_; }
-
- // Predicates.
- bool is_slot() { return true; }
-
- // Support for fast-compilation mode.
- void Get(MacroAssembler* masm, Register reg);
- void Set(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void Move(MacroAssembler* masm, Value* value);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Slot::Type type_;
- int index_;
-};
-
-
-// TempLocations represent compiler generated temporaries. They are
-// allocated to registers or memory either before code generation (in the
-// optimized-for-speed compiler) or on the fly during code generation (in
-// the optimized-for-space compiler).
-class TempLocation : public Location {
- public:
- // Fast-compilation mode allocation decisions.
- enum Where {
- NOT_ALLOCATED, // Not yet allocated.
- ACCUMULATOR, // Allocated to the dedicated accumulator register.
- STACK // " " " " stack.
- };
-
- TempLocation() : where_(NOT_ALLOCATED) {
-#ifdef DEBUG
- number_ = -1;
-#endif
- }
-
- // Cast accessor.
- static TempLocation* cast(Value* value) {
- ASSERT(value->is_temporary());
- return reinterpret_cast<TempLocation*>(value);
- }
-
- // Accessors.
- Where where() { return where_; }
- void set_where(Where where) {
- ASSERT(where_ == TempLocation::NOT_ALLOCATED);
- where_ = where;
- }
-
- // Predicates.
- bool is_on_stack() { return where_ == STACK; }
- bool is_temporary() { return true; }
-
- // Support for fast-compilation mode. Assume the temp has been allocated.
- void Get(MacroAssembler* masm, Register reg);
- void Set(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void Move(MacroAssembler* masm, Value* value);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_temp_number();
- return number_;
- }
-
- void Print();
-#endif
-
- private:
- Where where_;
-
-#ifdef DEBUG
- int number_;
-#endif
-};
-
-
-// Instructions are computations. The represent non-trivial source
-// expressions: typically ones that have side effects and require code to
-// be generated.
-class Instruction : public ZoneObject {
- public:
- // Accessors.
- Location* location() { return location_; }
- void set_location(Location* location) { location_ = location; }
-
- // Support for fast-compilation mode:
-
- // Emit code to perform the instruction.
- virtual void Compile(MacroAssembler* masm) = 0;
-
- // Allocate a temporary which is the result of the immediate predecessor
- // instruction. It is allocated to the accumulator register if it is used
- // as an operand to this instruction, otherwise to the stack.
- virtual void FastAllocate(TempLocation* temp) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
- // Every instruction has a location where its result is stored (which may
- // be Nowhere).
- explicit Instruction(Location* location) : location_(location) {}
-
- virtual ~Instruction() {}
-
- Location* location_;
-};
-
-
-// Base class of instructions that have no input operands.
-class ZeroOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands (nothing).
- virtual void Print() {}
-#endif
-
- protected:
- explicit ZeroOperandInstruction(Location* loc) : Instruction(loc) {}
-};
-
-
-// Base class of instructions that have a single input operand.
-class OneOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands.
- virtual void Print();
-#endif
-
- protected:
- OneOperandInstruction(Location* loc, Value* value)
- : Instruction(loc), value_(value) {
- }
-
- Value* value_;
-};
-
-
-// Base class of instructions that have two input operands.
-class TwoOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands.
- virtual void Print();
-#endif
-
- protected:
- TwoOperandInstruction(Location* loc, Value* value0, Value* value1)
- : Instruction(loc), value0_(value0), value1_(value1) {
- }
-
- Value* value0_;
- Value* value1_;
-};
-
-
-// A phantom instruction that indicates the start of a statement. It
-// causes the statement position to be recorded in the relocation
-// information but generates no code.
-class PositionInstr : public ZeroOperandInstruction {
- public:
- explicit PositionInstr(int pos)
- : ZeroOperandInstruction(CfgGlobals::current()->nowhere()), pos_(pos) {
- }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
- // This should not be called. The last instruction of the previous
- // statement should not have a temporary as its location.
- void FastAllocate(TempLocation* temp) { UNREACHABLE(); }
-
-#ifdef DEBUG
- // Printing support. Print nothing.
- void Print() {}
-#endif
-
- private:
- int pos_;
-};
-
-
-// Move a value to a location.
-class MoveInstr : public OneOperandInstruction {
- public:
- MoveInstr(Location* loc, Value* value)
- : OneOperandInstruction(loc, value) {
- }
-
- // Accessors.
- Value* value() { return value_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- // Printing support.
- void Print();
-#endif
-};
-
-
-// Load a property from a receiver, leaving the result in a location.
-class PropLoadInstr : public TwoOperandInstruction {
- public:
- PropLoadInstr(Location* loc, Value* object, Value* key)
- : TwoOperandInstruction(loc, object, key) {
- }
-
- // Accessors.
- Value* object() { return value0_; }
- Value* key() { return value1_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// Perform a (non-short-circuited) binary operation on a pair of values,
-// leaving the result in a location.
-class BinaryOpInstr : public TwoOperandInstruction {
- public:
- BinaryOpInstr(Location* loc, Token::Value op, Value* left, Value* right)
- : TwoOperandInstruction(loc, left, right), op_(op) {
- }
-
- // Accessors.
- Value* left() { return value0_; }
- Value* right() { return value1_; }
- Token::Value op() { return op_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Token::Value op_;
-};
-
-
-// Return a value. Has the side effect of moving its value into the return
-// value register. Can only occur as the last instruction in an instruction
-// block, and implies that the block is closed (cannot have instructions
-// appended or graph fragments concatenated to the end) and that the block's
-// successor is the global exit node for the current function.
-class ReturnInstr : public OneOperandInstruction {
- public:
- explicit ReturnInstr(Value* value)
- : OneOperandInstruction(CfgGlobals::current()->nowhere(), value) {
- }
-
- virtual ~ReturnInstr() {}
-
- // Accessors.
- Value* value() { return value_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// Nodes make up control-flow graphs.
-class CfgNode : public ZoneObject {
- public:
- CfgNode() : is_marked_(false) {
-#ifdef DEBUG
- number_ = -1;
-#endif
- }
-
- virtual ~CfgNode() {}
-
- // Because CFGs contain cycles, nodes support marking during traversal
- // (e.g., for printing or compilation). The traversal functions will mark
- // unmarked nodes and backtrack if they encounter a marked one. After a
- // traversal, the graph should be explicitly unmarked by calling Unmark on
- // the entry node.
- bool is_marked() { return is_marked_; }
- virtual void Unmark() = 0;
-
- // Predicates:
-
- // True if the node is an instruction block.
- virtual bool is_block() { return false; }
-
- // Support for fast-compilation mode. Emit the instructions or control
- // flow represented by the node.
- virtual void Compile(MacroAssembler* masm) = 0;
-
-#ifdef DEBUG
- int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_node_number();
- return number_;
- }
-
- virtual void Print() = 0;
-#endif
-
- protected:
- bool is_marked_;
-
-#ifdef DEBUG
- int number_;
-#endif
-};
-
-
-// A block is a single-entry, single-exit block of instructions.
-class InstructionBlock : public CfgNode {
- public:
- InstructionBlock() : successor_(NULL), instructions_(4) {}
-
- virtual ~InstructionBlock() {}
-
- void Unmark();
-
- // Cast accessor.
- static InstructionBlock* cast(CfgNode* node) {
- ASSERT(node->is_block());
- return reinterpret_cast<InstructionBlock*>(node);
- }
-
- bool is_block() { return true; }
-
- // Accessors.
- CfgNode* successor() { return successor_; }
-
- void set_successor(CfgNode* succ) {
- ASSERT(successor_ == NULL);
- successor_ = succ;
- }
-
- ZoneList<Instruction*>* instructions() { return &instructions_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
- // Add an instruction to the end of the block.
- void Append(Instruction* instr) { instructions_.Add(instr); }
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- CfgNode* successor_;
- ZoneList<Instruction*> instructions_;
-};
-
-
-// An entry node (one per function).
-class EntryNode : public CfgNode {
- public:
- explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
-
- virtual ~EntryNode() {}
-
- void Unmark();
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- InstructionBlock* successor_;
-};
-
-
-// An exit node (one per function).
-class ExitNode : public CfgNode {
- public:
- ExitNode() {}
-
- virtual ~ExitNode() {}
-
- void Unmark();
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// A CFG consists of a linked structure of nodes. Nodes are linked by
-// pointing to their successors, always beginning with a (single) entry node
-// (not necessarily of type EntryNode). If it is still possible to add
-// nodes to the end of the graph (i.e., there is a (single) path that does
-// not end with the global exit node), then the CFG has an exit node as
-// well.
-//
-// The empty CFG is represented by a NULL entry and a NULL exit.
-//
-// We use the term 'open fragment' to mean a CFG whose entry and exits are
-// both instruction blocks. It is always possible to add instructions and
-// nodes to the beginning or end of an open fragment.
-//
-// We use the term 'closed fragment' to mean a CFG whose entry is an
-// instruction block and whose exit is NULL (all paths go to the global
-// exit).
-//
-// We use the term 'fragment' to refer to a CFG that is known to be an open
-// or closed fragment.
-class Cfg : public ZoneObject {
- public:
- // Create an empty CFG fragment.
- Cfg() : entry_(NULL), exit_(NULL) {}
-
- // Build the CFG for a function. The returned CFG begins with an
- // EntryNode and all paths end with the ExitNode.
- static Cfg* Build();
-
- // The entry and exit nodes of the CFG (not necessarily EntryNode and
- // ExitNode).
- CfgNode* entry() { return entry_; }
- CfgNode* exit() { return exit_; }
-
- // True if the CFG has no nodes.
- bool is_empty() { return entry_ == NULL; }
-
- // True if the CFG has an available exit node (i.e., it can be appended or
- // concatenated to).
- bool has_exit() { return exit_ != NULL; }
-
- // Add an EntryNode to a CFG fragment. It is no longer a fragment
- // (instructions can no longer be prepended).
- void PrependEntryNode();
-
- // Append an instruction to the end of an open fragment.
- void Append(Instruction* instr);
-
- // Appends a return instruction to the end of an open fragment and make
- // it a closed fragment (the exit's successor becomes global exit node).
- void AppendReturnInstruction(Value* value);
-
- // Glue an other CFG fragment to the end of this (open) fragment.
- void Concatenate(Cfg* other);
-
- // Support for compilation. Compile the entire CFG.
- Handle<Code> Compile(Handle<Script> script);
-
-#ifdef DEBUG
- // Support for printing.
- void Print();
-#endif
-
- private:
- // Entry and exit nodes.
- CfgNode* entry_;
- CfgNode* exit_;
-};
-
-
-// An implementation of a set of locations (currently slot locations), most
-// of the operations are destructive.
-class LocationSet BASE_EMBEDDED {
- public:
- // Construct an empty location set.
- LocationSet() : parameters_(0), locals_(0) {}
-
- // Raw accessors.
- uintptr_t parameters() { return parameters_; }
- uintptr_t locals() { return locals_; }
-
- // Make this the empty set.
- void Empty() {
- parameters_ = locals_ = 0;
- }
-
- // Insert an element.
- void AddElement(SlotLocation* location) {
- if (location->type() == Slot::PARAMETER) {
- // Parameter indexes begin with -1 ('this').
- ASSERT(location->index() < kBitsPerPointer - 1);
- parameters_ |= (1 << (location->index() + 1));
- } else {
- ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kBitsPerPointer);
- locals_ |= (1 << location->index());
- }
- }
-
- // (Destructively) compute the union with another set.
- void Union(LocationSet* other) {
- parameters_ |= other->parameters();
- locals_ |= other->locals();
- }
-
- bool Contains(SlotLocation* location) {
- if (location->type() == Slot::PARAMETER) {
- ASSERT(location->index() < kBitsPerPointer - 1);
- return (parameters_ & (1 << (location->index() + 1)));
- } else {
- ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kBitsPerPointer);
- return (locals_ & (1 << location->index()));
- }
- }
-
- private:
- uintptr_t parameters_;
- uintptr_t locals_;
-};
-
-
-// An ExpressionCfgBuilder traverses an expression and returns an open CFG
-// fragment (currently a possibly empty list of instructions represented by
-// a singleton instruction block) and the expression's value.
-//
-// Failure to build the CFG is indicated by a NULL CFG.
-class ExpressionCfgBuilder : public AstVisitor {
- public:
- ExpressionCfgBuilder() : destination_(NULL), value_(NULL), graph_(NULL) {}
-
- // Result accessors.
- Value* value() { return value_; }
- Cfg* graph() { return graph_; }
- LocationSet* assigned_vars() { return &assigned_vars_; }
-
- // Build the cfg for an expression and remember its value. The
- // destination is a 'hint' where the value should go which may be ignored.
- // NULL is used to indicate no preference.
- //
- // Concretely, if the expression needs to generate a temporary for its
- // value, it should use the passed destination or generate one if NULL.
- void Build(Expression* expr, Location* destination) {
- value_ = NULL;
- graph_ = new Cfg();
- assigned_vars_.Empty();
- destination_ = destination;
- Visit(expr);
- }
-
- // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- // State for the visitor. Input parameter:
- Location* destination_;
-
- // Output parameters:
- Value* value_;
- Cfg* graph_;
- LocationSet assigned_vars_;
-};
-
-
-// A StatementCfgBuilder maintains a CFG fragment accumulator. When it
-// visits a statement, it concatenates the CFG for the statement to the end
-// of the accumulator.
-class StatementCfgBuilder : public AstVisitor {
- public:
- StatementCfgBuilder() : graph_(new Cfg()) {}
-
- Cfg* graph() { return graph_; }
-
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- // State for the visitor. Input/output parameter:
- Cfg* graph_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CFG_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 9a00ae2b65..a18fa0fec7 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -469,44 +469,32 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
}
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- int pos = fun->start_position();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
+static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+ if (pos != RelocInfo::kNoPosition) {
+ cgen->masm()->RecordStatementPosition(pos);
+ cgen->masm()->RecordPosition(pos);
}
}
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+}
+
+
void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- int pos = fun->end_position();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
- }
+ if (FLAG_debug_info) RecordPositions(this, fun->end_position());
}
-void CodeGenerator::CodeForStatementPosition(AstNode* node) {
- if (FLAG_debug_info) {
- int pos = node->statement_pos();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
- }
+void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
+ if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
}
void CodeGenerator::CodeForSourcePosition(int pos) {
- if (FLAG_debug_info) {
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordPosition(pos);
- }
+ if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ masm()->RecordPosition(pos);
}
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 15f64794db..6ba7a9a9d1 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -28,7 +28,6 @@
#include "v8.h"
#include "bootstrapper.h"
-#include "cfg.h"
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
@@ -79,22 +78,6 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
return Handle<Code>::null();
}
- if (FLAG_multipass) {
- CfgGlobals scope(literal);
- Cfg* cfg = Cfg::Build();
-#ifdef DEBUG
- if (FLAG_print_cfg && cfg != NULL) {
- SmartPointer<char> name = literal->name()->ToCString();
- PrintF("Function \"%s\":\n", *name);
- cfg->Print();
- PrintF("\n");
- }
-#endif
- if (cfg != NULL) {
- return cfg->Compile(script);
- }
- }
-
// Generate code and return it.
Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
return result;
@@ -121,8 +104,6 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ScriptDataImpl* pre_data) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- // Make sure we have an initial stack limit.
- StackGuard guard;
PostponeInterruptsScope postpone;
ASSERT(!i::Top::global_context().is_null());
@@ -351,8 +332,6 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
- // Make sure we have an initial stack limit.
- StackGuard guard;
PostponeInterruptsScope postpone;
// Compute name, source code and script data.
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 3dba53a435..9d5cace03b 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -65,6 +65,7 @@ void DebuggerAgent::Run() {
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
+ listening_->Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
@@ -93,6 +94,10 @@ void DebuggerAgent::Shutdown() {
}
+void DebuggerAgent::WaitUntilListening() {
+ listening_->Wait();
+}
+
void DebuggerAgent::CreateSession(Socket* client) {
ScopedLock with(session_access_);
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index 04f883f40e..3647994364 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -47,7 +47,8 @@ class DebuggerAgent: public Thread {
: name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)) {
+ terminate_now_(OS::CreateSemaphore(0)),
+ listening_(OS::CreateSemaphore(0)) {
ASSERT(instance_ == NULL);
instance_ = this;
}
@@ -57,6 +58,7 @@ class DebuggerAgent: public Thread {
}
void Shutdown();
+ void WaitUntilListening();
private:
void Run();
@@ -72,6 +74,7 @@ class DebuggerAgent: public Thread {
Mutex* session_access_; // Mutex guarging access to session_.
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
+ Semaphore* listening_;
static DebuggerAgent* instance_;
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index ce70c75b4e..cb789beb95 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// jsminify this file, js2c: jsmin
-
// Default number of frames to include in the response to backtrace request.
const kDefaultBacktraceLength = 10;
@@ -35,7 +33,7 @@ const Debug = {};
// Regular expression to skip "crud" at the beginning of a source line which is
// not really code. Currently the regular expression matches whitespace and
// comments.
-const sourceLineBeginningSkip = /^(?:[ \v\h]*(?:\/\*.*?\*\/)*)*/;
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
// Debug events which can occour in the V8 JavaScript engine. These originate
// from the API include file debug.h.
@@ -350,7 +348,7 @@ ScriptBreakPoint.prototype.set = function (script) {
if (!script.sourceColumnStart_) {
script.sourceColumnStart_ = new Array(script.lineCount());
}
-
+
// Fill cache if needed and get column where the actual source starts.
if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
script.sourceColumnStart_[line] =
@@ -361,11 +359,11 @@ ScriptBreakPoint.prototype.set = function (script) {
// Convert the line and column into an absolute position within the script.
var pos = Debug.findScriptSourcePosition(script, this.line(), column);
-
+
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
if (pos === null) return;
-
+
// Create a break point object and set the break point.
break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount());
@@ -492,7 +490,7 @@ Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
// Returns the character position in a script based on a line number and an
// optional position within that line.
Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = script.locationFromLine(opt_line, opt_column);
+ var location = script.locationFromLine(opt_line, opt_column);
return location ? location.position : null;
}
@@ -944,7 +942,7 @@ ExceptionEvent.prototype.toJSONProtocol = function() {
o.body = { uncaught: this.uncaught_,
exception: MakeMirror(this.exception_)
};
-
+
// Exceptions might happen whithout any JavaScript frames.
if (this.exec_state_.frameCount() > 0) {
o.body.sourceLine = this.sourceLine();
@@ -1097,7 +1095,7 @@ DebugCommandProcessor.prototype.processDebugRequest = function (request) {
function ProtocolMessage(request) {
// Update sequence number.
this.seq = next_response_seq++;
-
+
if (request) {
// If message is based on a request this is a response. Fill the initial
// response from the request.
@@ -1487,7 +1485,7 @@ DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request,
response.failed('Missing argument "groupId"');
return;
}
-
+
var cleared_break_points = [];
var new_script_break_points = [];
for (var i = 0; i < script_break_points.length; i++) {
@@ -1603,7 +1601,7 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
if (index < 0 || this.exec_state_.frameCount() <= index) {
return response.failed('Invalid frame number');
}
-
+
this.exec_state_.setSelectedFrame(request.arguments.number);
}
response.body = this.exec_state_.frame();
@@ -1633,7 +1631,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
-
+
// Fill all scopes for this frame.
var total_scopes = frame.scopeCount();
var scopes = [];
@@ -1750,7 +1748,7 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
-
+
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 3c2bfa80c6..ec658d68f0 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -2498,6 +2498,11 @@ void Debugger::StopAgent() {
}
+void Debugger::WaitForAgent() {
+ if (agent_ != NULL)
+ agent_->WaitUntilListening();
+}
+
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index d6b2c088d3..29c2bc2036 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -354,6 +354,7 @@ class Debug {
static char* ArchiveDebug(char* to);
static char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
+ static void FreeThreadResources() { }
// Mirror cache handling.
static void ClearMirrorCache();
@@ -645,6 +646,9 @@ class Debugger {
// Stop the debugger agent.
static void StopAgent();
+ // Blocks until the agent has started listening for connections
+ static void WaitForAgent();
+
// Unload the debugger if possible. Only called when no debugger is currently
// active.
static void UnloadDebugger();
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 04ec9059f3..8bc6b74e14 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -61,9 +61,6 @@ static Handle<Object> Invoke(bool construct,
// Entering JavaScript.
VMState state(JS);
- // Guard the stack against too much recursion.
- StackGuard guard;
-
// Placeholder for return value.
Object* value = reinterpret_cast<Object*>(kZapValue);
@@ -217,55 +214,6 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
StackGuard::ThreadLocal StackGuard::thread_local_;
-StackGuard::StackGuard() {
- // NOTE: Overall the StackGuard code assumes that the stack grows towards
- // lower addresses.
- ExecutionAccess access;
- if (thread_local_.nesting_++ == 0) {
- // Initial StackGuard is being set. We will set the stack limits based on
- // the current stack pointer allowing the stack to grow kLimitSize from
- // here.
-
- // Ensure that either the stack limits are unset (kIllegalLimit) or that
- // they indicate a pending interruption. The interrupt limit will be
- // temporarily reset through the code below and reestablished if the
- // interrupt flags indicate that an interrupt is pending.
- ASSERT(thread_local_.jslimit_ == kIllegalLimit ||
- (thread_local_.jslimit_ == kInterruptLimit &&
- thread_local_.interrupt_flags_ != 0));
- ASSERT(thread_local_.climit_ == kIllegalLimit ||
- (thread_local_.climit_ == kInterruptLimit &&
- thread_local_.interrupt_flags_ != 0));
-
- uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
- thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
- Heap::SetStackLimit(limit);
- // NOTE: The check for overflow is not safe as there is no guarantee that
- // the running thread has its stack in all memory up to address 0x00000000.
- thread_local_.initial_climit_ = thread_local_.climit_ =
- reinterpret_cast<uintptr_t>(this) >= kLimitSize ?
- reinterpret_cast<uintptr_t>(this) - kLimitSize : 0;
-
- if (thread_local_.interrupt_flags_ != 0) {
- set_limits(kInterruptLimit, access);
- }
- }
- // Ensure that proper limits have been set.
- ASSERT(thread_local_.jslimit_ != kIllegalLimit &&
- thread_local_.climit_ != kIllegalLimit);
- ASSERT(thread_local_.initial_jslimit_ != kIllegalLimit &&
- thread_local_.initial_climit_ != kIllegalLimit);
-}
-
-
-StackGuard::~StackGuard() {
- ExecutionAccess access;
- if (--thread_local_.nesting_ == 0) {
- set_limits(kIllegalLimit, access);
- }
-}
-
-
bool StackGuard::IsStackOverflow() {
ExecutionAccess access;
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -285,15 +233,16 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access;
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
- thread_local_.jslimit_ = limit;
- Heap::SetStackLimit(limit);
+ thread_local_.jslimit_ = jslimit;
+ Heap::SetStackLimit(jslimit);
}
if (thread_local_.climit_ == thread_local_.initial_climit_) {
thread_local_.climit_ = limit;
}
thread_local_.initial_climit_ = limit;
- thread_local_.initial_jslimit_ = limit;
+ thread_local_.initial_jslimit_ = jslimit;
}
@@ -407,6 +356,61 @@ char* StackGuard::RestoreStackGuard(char* from) {
}
+static internal::Thread::LocalStorageKey stack_limit_key =
+ internal::Thread::CreateThreadLocalKey();
+
+
+void StackGuard::FreeThreadResources() {
+ Thread::SetThreadLocal(
+ stack_limit_key,
+ reinterpret_cast<void*>(thread_local_.initial_climit_));
+}
+
+
+void StackGuard::ThreadLocal::Clear() {
+ initial_jslimit_ = kIllegalLimit;
+ jslimit_ = kIllegalLimit;
+ initial_climit_ = kIllegalLimit;
+ climit_ = kIllegalLimit;
+ nesting_ = 0;
+ postpone_interrupts_nesting_ = 0;
+ interrupt_flags_ = 0;
+ Heap::SetStackLimit(kIllegalLimit);
+}
+
+
+void StackGuard::ThreadLocal::Initialize() {
+ if (initial_climit_ == kIllegalLimit) {
+ // Takes the address of the limit variable in order to find out where
+ // the top of stack is right now.
+ intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+ initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ initial_climit_ = limit;
+ climit_ = limit;
+ Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+ }
+ nesting_ = 0;
+ postpone_interrupts_nesting_ = 0;
+ interrupt_flags_ = 0;
+}
+
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+ thread_local_.Clear();
+}
+
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+ thread_local_.Initialize();
+ void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+ // You should hold the ExecutionAccess lock when you call this.
+ if (stored_limit != NULL) {
+ StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+ }
+}
+
+
// --- C a l l s t o n a t i v e s ---
#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 4cdfd2be6a..55307f71fd 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -141,14 +141,13 @@ class Execution : public AllStatic {
class ExecutionAccess;
-// Stack guards are used to limit the number of nested invocations of
-// JavaScript and the stack size used in each invocation.
-class StackGuard BASE_EMBEDDED {
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class StackGuard : public AllStatic {
public:
- StackGuard();
-
- ~StackGuard();
-
+ // Pass the address beyond which the stack should not grow. The stack
+ // is assumed to grow downwards.
static void SetStackLimit(uintptr_t limit);
static Address address_of_jslimit() {
@@ -159,6 +158,13 @@ class StackGuard BASE_EMBEDDED {
static char* ArchiveStackGuard(char* to);
static char* RestoreStackGuard(char* from);
static int ArchiveSpacePerThread();
+ static void FreeThreadResources();
+ // Sets up the default stack guard for this thread if it has not
+ // already been set up.
+ static void InitThread(const ExecutionAccess& lock);
+ // Clears the stack guard for this thread so it does not look as if
+ // it has been set up.
+ static void ClearThread(const ExecutionAccess& lock);
static bool IsStackOverflow();
static bool IsPreempted();
@@ -175,6 +181,13 @@ class StackGuard BASE_EMBEDDED {
#endif
static void Continue(InterruptFlag after_what);
+ // This provides an asynchronous read of the stack limit for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ static uintptr_t climit() {
+ return thread_local_.climit_;
+ }
+
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
@@ -183,13 +196,6 @@ class StackGuard BASE_EMBEDDED {
// You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock);
- // This provides an asynchronous read of the stack limit for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- static uintptr_t climit() {
- return thread_local_.climit_;
- }
-
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
Heap::SetStackLimit(value);
@@ -200,14 +206,9 @@ class StackGuard BASE_EMBEDDED {
// Reset limits to initial values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
static void reset_limits(const ExecutionAccess& lock) {
- if (thread_local_.nesting_ == 0) {
- // No limits have been set yet.
- set_limits(kIllegalLimit, lock);
- } else {
- thread_local_.jslimit_ = thread_local_.initial_jslimit_;
- Heap::SetStackLimit(thread_local_.jslimit_);
- thread_local_.climit_ = thread_local_.initial_climit_;
- }
+ thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+ Heap::SetStackLimit(thread_local_.jslimit_);
+ thread_local_.climit_ = thread_local_.initial_climit_;
}
// Enable or disable interrupts.
@@ -217,24 +218,19 @@ class StackGuard BASE_EMBEDDED {
static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xffffffffffffffff);
+ static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xffffffff;
+ static const uintptr_t kIllegalLimit = 0xfffffff8;
#endif
class ThreadLocal {
public:
- ThreadLocal()
- : initial_jslimit_(kIllegalLimit),
- jslimit_(kIllegalLimit),
- initial_climit_(kIllegalLimit),
- climit_(kIllegalLimit),
- nesting_(0),
- postpone_interrupts_nesting_(0),
- interrupt_flags_(0) {
- Heap::SetStackLimit(kIllegalLimit);
- }
+ ThreadLocal() { Clear(); }
+ // You should hold the ExecutionAccess lock when you call Initialize or
+ // Clear.
+ void Initialize();
+ void Clear();
uintptr_t initial_jslimit_;
uintptr_t jslimit_;
uintptr_t initial_climit_;
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index d91b2662a1..622055c306 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -673,6 +673,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
}
+Handle<String> Factory::NumberToString(Handle<Object> number) {
+ CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+}
+
+
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index ddf71dec7a..0596fbf00c 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -286,6 +286,8 @@ class Factory : public AllStatic {
Handle<Object> value,
PropertyAttributes attributes);
+ static Handle<String> NumberToString(Handle<Object> number);
+
enum ApiInstanceType {
JavaScriptObject,
InnerGlobalObject,
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index c05feb4da9..91c5bcada8 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -133,7 +133,6 @@ DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"Minimum length for automatic enable preparsing")
-DEFINE_bool(multipass, false, "use the multipass code generator")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -271,7 +270,6 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_cfg, false, "print control-flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 931e3b9bf0..b43ec53207 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -29,6 +29,7 @@
#include "accessors.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
@@ -46,10 +47,10 @@ v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
int HandleScope::NumberOfHandles() {
- int n = HandleScopeImplementer::instance()->Blocks()->length();
+ int n = HandleScopeImplementer::instance()->blocks()->length();
if (n == 0) return 0;
return ((n - 1) * kHandleBlockSize) +
- (current_.next - HandleScopeImplementer::instance()->Blocks()->last());
+ (current_.next - HandleScopeImplementer::instance()->blocks()->last());
}
@@ -67,8 +68,8 @@ Object** HandleScope::Extend() {
HandleScopeImplementer* impl = HandleScopeImplementer::instance();
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
- if (!impl->Blocks()->is_empty()) {
- Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
+ if (!impl->blocks()->is_empty()) {
+ Object** limit = &impl->blocks()->last()[kHandleBlockSize];
if (current_.limit != limit) {
current_.limit = limit;
}
@@ -81,7 +82,7 @@ Object** HandleScope::Extend() {
result = impl->GetSpareOrNewBlock();
// Add the extension to the global list of blocks, but count the
// extension as part of the current scope.
- impl->Blocks()->Add(result);
+ impl->blocks()->Add(result);
current_.extensions++;
current_.limit = &result[kHandleBlockSize];
}
@@ -479,15 +480,17 @@ int GetScriptLineNumber(Handle<Script> script, int code_pos) {
}
+void CustomArguments::IterateInstance(ObjectVisitor* v) {
+ v->VisitPointers(values_, values_ + 4);
+}
+
+
// Compute the property keys from the interceptor.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- Handle<Object> data(interceptor->data());
- v8::AccessorInfo info(
- v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(object));
+ CustomArguments args(interceptor->data(), *receiver, *object);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
@@ -507,11 +510,8 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- Handle<Object> data(interceptor->data());
- v8::AccessorInfo info(
- v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(object));
+ CustomArguments args(interceptor->data(), *receiver, *object);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index c1122c4b79..ecb6919874 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -67,10 +67,9 @@ JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
if (obj->IsJSObject()) {
JSObject* js_obj = JSObject::cast(obj);
String* constructor = JSObject::cast(js_obj)->constructor_name();
- // Differentiate Array, Function, and Object instances.
+ // Differentiate Object and Array instances.
if (fine_grain && (constructor == Heap::Object_symbol() ||
- constructor == Heap::Array_symbol() ||
- constructor == Heap::function_class_symbol())) {
+ constructor == Heap::Array_symbol())) {
return JSObjectsCluster(constructor, obj);
} else {
return JSObjectsCluster(constructor);
@@ -163,31 +162,138 @@ class RetainersPrinter : public RetainerHeapProfile::Printer {
};
-class RetainerTreePrinter BASE_EMBEDDED {
+// Visitor for printing a cluster tree.
+class ClusterTreePrinter BASE_EMBEDDED {
public:
- explicit RetainerTreePrinter(StringStream* stream) : stream_(stream) {}
+ explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) {
Print(stream_, cluster, number_and_size);
}
static void Print(StringStream* stream,
const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& numNNber_and_size);
+ const NumberAndSizeInfo& number_and_size);
private:
StringStream* stream_;
};
-void RetainerTreePrinter::Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
+void ClusterTreePrinter::Print(StringStream* stream,
+ const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
stream->Put(',');
cluster.Print(stream);
stream->Add(";%d", number_and_size.number());
}
+// Visitor for printing a retainer tree.
+class SimpleRetainerTreePrinter BASE_EMBEDDED {
+ public:
+ explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
+ : printer_(printer) {}
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+ RetainerHeapProfile::Printer* printer_;
+};
+
+
+void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ ClusterTreePrinter retainers_printer(&stream);
+ tree->ForEach(&retainers_printer);
+ printer_->PrintRetainers(cluster, stream);
+}
+
+
+// Visitor for aggregating references count of equivalent clusters.
+class RetainersAggregator BASE_EMBEDDED {
+ public:
+ RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
+ : coarser_(coarser), dest_tree_(dest_tree) {}
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ ClustersCoarser* coarser_;
+ JSObjectsClusterTree* dest_tree_;
+};
+
+
+void RetainersAggregator::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+ if (eq.is_null()) eq = cluster;
+ JSObjectsClusterTree::Locator loc;
+ dest_tree_->Insert(eq, &loc);
+ NumberAndSizeInfo aggregated_number = loc.value();
+ aggregated_number.increment_number(number_and_size.number());
+ loc.set_value(aggregated_number);
+}
+
+
+// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
+class AggregatingRetainerTreePrinter BASE_EMBEDDED {
+ public:
+ AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
+ RetainerHeapProfile::Printer* printer)
+ : coarser_(coarser), printer_(printer) {}
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+ ClustersCoarser* coarser_;
+ RetainerHeapProfile::Printer* printer_;
+};
+
+
+void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
+ JSObjectsClusterTree dest_tree_;
+ RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
+ tree->ForEach(&retainers_aggregator);
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ ClusterTreePrinter retainers_printer(&stream);
+ dest_tree_.ForEach(&retainers_printer);
+ printer_->PrintRetainers(cluster, stream);
+}
+
+
+// A helper class for building a retainers tree, that aggregates
+// all equivalent clusters.
+class RetainerTreeAggregator BASE_EMBEDDED {
+ public:
+ explicit RetainerTreeAggregator(ClustersCoarser* coarser)
+ : coarser_(coarser) {}
+ void Process(JSObjectsRetainerTree* input_tree) {
+ input_tree->ForEach(this);
+ }
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+ JSObjectsRetainerTree& output_tree() { return output_tree_; }
+
+ private:
+ ClustersCoarser* coarser_;
+ JSObjectsRetainerTree output_tree_;
+};
+
+
+void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+ if (eq.is_null()) return;
+ JSObjectsRetainerTree::Locator loc;
+ if (output_tree_.Insert(eq, &loc)) {
+ loc.set_value(new JSObjectsClusterTree());
+ }
+ RetainersAggregator retainers_aggregator(coarser_, loc.value());
+ tree->ForEach(&retainers_aggregator);
+}
+
} // namespace
@@ -227,6 +333,8 @@ void JSObjectsCluster::Print(StringStream* accumulator) const {
accumulator->Add("(roots)");
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
accumulator->Add("(global property)");
+ } else if (constructor_ == FromSpecialCase(SELF)) {
+ accumulator->Add("(self)");
} else {
SmartPointer<char> s_name(
constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
@@ -287,9 +395,11 @@ inline int ClustersCoarser::ClusterBackRefs::Compare(
ClustersCoarser::ClustersCoarser()
- : zscope_(DELETE_ON_EXIT),
- sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
- current_pair_(NULL) {
+ : zscope_(DELETE_ON_EXIT),
+ sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
+ current_pair_(NULL),
+ current_set_(NULL),
+ self_(NULL) {
}
@@ -300,10 +410,12 @@ void ClustersCoarser::Call(const JSObjectsCluster& cluster,
ASSERT(current_pair_ == NULL);
current_pair_ = &pair;
current_set_ = new JSObjectsRetainerTree();
+ self_ = &cluster;
tree->ForEach(this);
sim_list_.Add(pair);
current_pair_ = NULL;
current_set_ = NULL;
+ self_ = NULL;
}
@@ -311,8 +423,13 @@ void ClustersCoarser::Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) {
ASSERT(current_pair_ != NULL);
ASSERT(current_set_ != NULL);
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ ASSERT(self_ != NULL);
JSObjectsRetainerTree::Locator loc;
+ if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
+ current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
+ return;
+ }
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
if (!eq.is_null()) {
if (current_set_->Find(eq, &loc)) return;
current_pair_->refs.Add(eq);
@@ -337,11 +454,7 @@ void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
tree->ForEach(this);
- // To sort similarity list properly, references list of a cluster is
- // required to be sorted, thus 'O1 <- A, B' and 'O2 <- B, A' would
- // be considered equivalent. But we don't sort them explicitly
- // because we know that they come from a splay tree traversal, so
- // they are already sorted.
+ sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
sim_list_.Sort(ClusterBackRefsCmp);
return FillEqualityTree();
}
@@ -357,8 +470,9 @@ JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
// Return true for coarsible clusters that have a non-identical equivalent.
- return cluster.can_be_coarsed() &&
- JSObjectsCluster::Compare(cluster, GetCoarseEquivalent(cluster)) != 0;
+ if (!cluster.can_be_coarsed()) return false;
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
}
@@ -396,10 +510,7 @@ const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(DELETE_ON_EXIT),
- coarse_cluster_tree_(NULL),
- current_printer_(NULL),
- current_stream_(NULL) {
+ : zscope_(DELETE_ON_EXIT) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
Heap::IterateRoots(&extractor);
@@ -434,10 +545,15 @@ void RetainerHeapProfile::CollectStats(HeapObject* obj) {
void RetainerHeapProfile::DebugPrintStats(
RetainerHeapProfile::Printer* printer) {
coarser_.Process(&retainers_tree_);
- ASSERT(current_printer_ == NULL);
- current_printer_ = printer;
- retainers_tree_.ForEach(this);
- current_printer_ = NULL;
+ // Print clusters that have no equivalents, aggregating their retainers.
+ AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
+ retainers_tree_.ForEach(&agg_printer);
+ // Now aggregate clusters that have equivalents...
+ RetainerTreeAggregator aggregator(&coarser_);
+ aggregator.Process(&retainers_tree_);
+ // ...and print them.
+ SimpleRetainerTreePrinter s_printer(printer);
+ aggregator.output_tree().ForEach(&s_printer);
}
@@ -447,44 +563,6 @@ void RetainerHeapProfile::PrintStats() {
}
-void RetainerHeapProfile::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- // First level of retainer graph.
- if (coarser_.HasAnEquivalent(cluster)) return;
- ASSERT(current_stream_ == NULL);
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- current_stream_ = &stream;
- ASSERT(coarse_cluster_tree_ == NULL);
- coarse_cluster_tree_ = new JSObjectsClusterTree();
- tree->ForEach(this);
- // Print aggregated counts and sizes.
- RetainerTreePrinter printer(current_stream_);
- coarse_cluster_tree_->ForEach(&printer);
- coarse_cluster_tree_ = NULL;
- current_printer_->PrintRetainers(cluster, stream);
- current_stream_ = NULL;
-}
-
-
-void RetainerHeapProfile::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ASSERT(coarse_cluster_tree_ != NULL);
- ASSERT(current_stream_ != NULL);
- JSObjectsCluster eq = coarser_.GetCoarseEquivalent(cluster);
- if (eq.is_null()) {
- RetainerTreePrinter::Print(current_stream_, cluster, number_and_size);
- } else {
- // Aggregate counts and sizes for equivalent clusters.
- JSObjectsClusterTree::Locator loc;
- coarse_cluster_tree_->Insert(eq, &loc);
- NumberAndSizeInfo eq_number_and_size = loc.value();
- eq_number_and_size.increment_number(number_and_size.number());
- loc.set_value(eq_number_and_size);
- }
-}
-
-
//
// HeapProfiler class implementation.
//
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index adc3da2b86..7fda883f87 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -53,7 +53,8 @@ class JSObjectsCluster BASE_EMBEDDED {
// These special cases are used in retainer profile.
enum SpecialCase {
ROOTS = 1,
- GLOBAL_PROPERTY = 2
+ GLOBAL_PROPERTY = 2,
+ SELF = 3 // This case is used in ClustersCoarser only.
};
JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
@@ -77,6 +78,9 @@ class JSObjectsCluster BASE_EMBEDDED {
(a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
: cons_cmp;
}
+ static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
+ return Compare(*a, *b);
+ }
bool is_null() const { return constructor_ == NULL; }
bool can_be_coarsed() const { return instance_ != NULL; }
@@ -93,6 +97,7 @@ class JSObjectsCluster BASE_EMBEDDED {
switch (special) {
case ROOTS: return Heap::result_symbol();
case GLOBAL_PROPERTY: return Heap::code_symbol();
+ case SELF: return Heap::catch_var_symbol();
default:
UNREACHABLE();
return NULL;
@@ -183,6 +188,8 @@ class ClustersCoarser BASE_EMBEDDED {
ClusterBackRefs& operator=(const ClusterBackRefs& src);
static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
+ void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
+ static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
JSObjectsCluster cluster;
ZoneList<JSObjectsCluster> refs;
@@ -219,6 +226,7 @@ class ClustersCoarser BASE_EMBEDDED {
EqualityTree eq_tree_;
ClusterBackRefs* current_pair_;
JSObjectsRetainerTree* current_set_;
+ const JSObjectsCluster* self_;
};
@@ -242,20 +250,9 @@ class RetainerHeapProfile BASE_EMBEDDED {
void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
private:
- // Limit on the number of retainers to be printed per cluster.
- static const int kMaxRetainersToPrint = 50;
ZoneScope zscope_;
JSObjectsRetainerTree retainers_tree_;
ClustersCoarser coarser_;
- // TODO(mnaganov): Use some helper class to hold these state variables.
- JSObjectsClusterTree* coarse_cluster_tree_;
- Printer* current_printer_;
- StringStream* current_stream_;
- public:
- // Used by JSObjectsRetainerTree::ForEach.
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
};
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 1a80d64784..dcc25a3c11 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -77,14 +77,17 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
int Heap::semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
+size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
int Heap::semispace_size_ = 16*MB;
int Heap::old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB;
+size_t Heap::code_range_size_ = 256*MB;
#else
int Heap::semispace_size_ = 8*MB;
int Heap::old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
+size_t Heap::code_range_size_ = 0;
#endif
GCCallback Heap::global_gc_prologue_callback_ = NULL;
@@ -497,8 +500,8 @@ void Heap::PostGarbageCollectionProcessing() {
DisableAssertNoAllocation allow_allocation;
GlobalHandles::PostGarbageCollectionProcessing();
}
- // Update flat string readers.
- FlatStringReader::PostGarbageCollectionProcessing();
+ // Update relocatables.
+ Relocatable::PostGarbageCollectionProcessing();
}
@@ -1250,6 +1253,10 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// spaces.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1261,7 +1268,8 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Object* Heap::AllocateHeapNumber(double value) {
// Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
+ if (always_allocate()) return AllocateHeapNumber(value, TENURED);
+
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
@@ -1582,6 +1590,31 @@ Object* Heap::SmiOrNumberFromDouble(double value,
}
+Object* Heap::NumberToString(Object* number) {
+ Object* cached = GetNumberStringCache(number);
+ if (cached != undefined_value()) {
+ return cached;
+ }
+
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str;
+ if (number->IsSmi()) {
+ int num = Smi::cast(number)->value();
+ str = IntToCString(num, buffer);
+ } else {
+ double num = HeapNumber::cast(number)->value();
+ str = DoubleToCString(num, buffer);
+ }
+ Object* result = AllocateStringFromAscii(CStrVector(str));
+
+ if (!result->IsFailure()) {
+ SetNumberStringCache(number, String::cast(result));
+ }
+ return result;
+}
+
+
Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
return SmiOrNumberFromDouble(value,
true /* number object must be new */,
@@ -1862,6 +1895,9 @@ Object* Heap::AllocateByteArray(int length) {
AllocationSpace space =
size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = LO_SPACE;
+
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1889,6 +1925,9 @@ Object* Heap::AllocatePixelArray(int length,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1923,6 +1962,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// Initialize the object
HeapObject::cast(result)->set_map(code_map());
Code* code = Code::cast(result);
+ ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_size(desc.reloc_size);
code->set_sinfo_size(sinfo_size);
@@ -1967,6 +2007,7 @@ Object* Heap::CopyCode(Code* code) {
obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
+ ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
}
@@ -2532,13 +2573,17 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
int size = SeqAsciiString::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -2565,13 +2610,17 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
int size = SeqTwoByteString::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -2609,7 +2658,7 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* Heap::AllocateRawFixedArray(int length) {
// Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
+ if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
@@ -2662,6 +2711,9 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
ASSERT(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
+ // New space can't cope with forced allocation.
+ if (always_allocate()) pretenure = TENURED;
+
int size = FixedArray::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (pretenure != TENURED) {
@@ -3088,6 +3140,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v) {
SYNCHRONIZE_TAG("bootstrapper");
Top::Iterate(v);
SYNCHRONIZE_TAG("top");
+ Relocatable::Iterate(v);
+ SYNCHRONIZE_TAG("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v);
@@ -3212,6 +3266,14 @@ bool Heap::Setup(bool create_heap_objects) {
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
+ // On 64-bit platform(s), we put all code objects in a 2 GB range of
+ // virtual address space, so that they can call each other with near calls.
+ if (code_range_size_ > 0) {
+ if (!CodeRange::Setup(code_range_size_)) {
+ return false;
+ }
+ }
+
code_space_ =
new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 92602c8506..e878efcf2c 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -882,11 +882,14 @@ class Heap : public AllStatic {
kRootListLength
};
+ static Object* NumberToString(Object* number);
+
private:
static int semispace_size_;
static int initial_semispace_size_;
static int young_generation_size_;
static int old_generation_size_;
+ static size_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 9a5352b418..1de20f4e3f 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -85,19 +85,25 @@ void RelocInfo::set_target_address(Address target) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return *reinterpret_cast<Object**>(pc_);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(pc_);
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
+ return &Memory::Object_at(pc_);
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- *reinterpret_cast<Object**>(pc_) = target;
+ Memory::Object_at(pc_) = target;
}
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index e7712df30c..ad44026caf 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -42,10 +42,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ mov(Operand::StaticVariable(passed), edi);
// The actual argument count has already been loaded into register
- // eax, but JumpToBuiltin expects eax to contain the number of
+ // eax, but JumpToRuntime expects eax to contain the number of
// arguments including the receiver.
__ inc(eax);
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
}
@@ -129,12 +129,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
- __ AllocateObjectInNewSpace(edi,
- ebx,
- edi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -189,14 +184,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
- __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// ebx: JSObject
@@ -674,18 +669,18 @@ static const int kPreallocatedArrayElements = 4;
// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter holes is larger than zero an elements backing
-// store is allocated with this size and filled with the hole values. Otherwise
-// the elements backing store is set to the empty FixedArray.
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
- int holes,
+ int initial_capacity,
Label* gc_required) {
- ASSERT(holes >= 0);
+ ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ mov(scratch1, FieldOperand(array_function,
@@ -694,15 +689,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
- if (holes > 0) {
- size += FixedArray::SizeFor(holes);
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
}
- __ AllocateObjectInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
@@ -717,7 +712,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// If no storage is requested for the elements array just set the empty
// fixed array.
- if (holes == 0) {
+ if (initial_capacity == 0) {
__ mov(FieldOperand(result, JSArray::kElementsOffset),
Factory::empty_fixed_array());
return;
@@ -737,17 +732,18 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch2: start of next object
__ mov(FieldOperand(scratch1, JSObject::kMapOffset),
Factory::fixed_array_map());
- __ mov(FieldOperand(scratch1, Array::kLengthOffset), Immediate(holes));
+ __ mov(FieldOperand(scratch1, Array::kLengthOffset),
+ Immediate(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- if (holes <= kLoopUnfoldLimit) {
+ if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
__ mov(scratch3, Factory::the_hole_value());
- for (int i = 0; i < holes; i++) {
+ for (int i = 0; i < initial_capacity; i++) {
__ mov(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
scratch3);
@@ -797,26 +793,26 @@ static void AllocateJSArray(MacroAssembler* masm,
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateObjectInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
__ jmp(&allocated);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(&not_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ AllocateObjectInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size, // array_size is a smi.
- array_size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
diff --git a/deps/v8/src/ia32/cfg-ia32.cc b/deps/v8/src/ia32/cfg-ia32.cc
deleted file mode 100644
index 58985a5a07..0000000000
--- a/deps/v8/src/ia32/cfg-ia32.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-ia32.h"
-#include "macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Label deferred_enter, deferred_exit;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ push(ebp);
- __ mov(ebp, esp);
- __ push(esi);
- __ push(edi);
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ Set(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < count; i++) {
- __ push(eax);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, &deferred_enter);
- __ bind(&deferred_exit);
- }
- }
- successor_->Compile(masm);
- if (FLAG_check_stack) {
- Comment cmnt(masm, "[ Deferred Stack Check");
- __ bind(&deferred_enter);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ jmp(&deferred_exit);
- }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
- __ mov(esp, ebp);
- __ pop(ebp);
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ret((count + 1) * kPointerSize);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
- // A test eax instruction after the call indicates to the IC code that it
- // was inlined. Ensure there is not one here.
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ pop(ebx); // Discard key.
- } else {
- key()->Get(masm, ecx);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- __ pop(ebx); // Discard receiver.
- location()->Set(masm, eax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Push both operands and call the specialized stub.
- if (!left()->is_on_stack()) left()->Push(masm);
- right()->Push(masm);
- GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
- __ CallStub(&stub);
- location()->Set(masm, eax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value_->Get(masm, eax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Immediate(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ push(Immediate(handle_));
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(ebp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ToOperand(loc), Immediate(handle_));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ mov(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // We dispatch to the value because in some cases (temp or constant)
- // we can use a single instruction.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- // The accumulator is not live across a MoveInstr.
- __ mov(eax, ToOperand(this));
- __ mov(ToOperand(loc), eax);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(eax)) __ mov(reg, eax);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(eax)) __ mov(eax, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(eax);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, eax);
- break;
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ mov(ToOperand(loc), eax);
- break;
- case STACK:
- __ pop(ToOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index d9f6672f90..0e314b9fcd 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2305,7 +2305,6 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -2544,10 +2543,12 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
+#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
}
@@ -4333,7 +4334,6 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -4415,8 +4415,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatementPosition(node);
-
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
frame_->Push(&result);
@@ -4433,12 +4431,10 @@ void CodeGenerator::VisitProperty(Property* node) {
void CodeGenerator::VisitCall(Call* node) {
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
@@ -4451,7 +4447,63 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ mov(result.reg(),
+ FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -4591,7 +4643,6 @@ void CodeGenerator::VisitCall(Call* node) {
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -4621,66 +4672,6 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
-
- // Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ mov(result.reg(),
- FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
- frame_->SetElementAt(arg_count, &result);
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-}
-
-
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -6992,12 +6983,12 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch2,
Register result) {
// Allocate heap number in new space.
- __ AllocateObjectInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Set the map.
__ mov(FieldOperand(result, HeapObject::kMapOffset),
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index afdbffe58e..142a5a1048 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -553,7 +553,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index c05a5cab61..f7369a8b7f 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -421,21 +421,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ sar(ebx, kSmiTagSize); // Untag the index.
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
+ __ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
{ // Clamp the value to [0..255].
- Label done, check_255;
- __ cmp(eax, 0);
- __ j(greater_equal, &check_255);
- __ mov(eax, Immediate(0));
- __ jmp(&done);
- __ bind(&check_255);
- __ cmp(eax, 255);
- __ j(less_equal, &done);
+ Label done, is_negative;
+ __ test(eax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ j(negative, &is_negative);
__ mov(eax, Immediate(255));
+ __ jmp(&done);
+ __ bind(&is_negative);
+ __ xor_(eax, Operand(eax)); // Clear eax.
__ bind(&done);
}
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ mov(eax, edx); // Return the original value.
__ ret(0);
// Extra capacity case: Check if there is extra capacity to
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a8d7e445c3..a3b2149727 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -664,12 +664,12 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -692,14 +692,14 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -722,12 +722,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int header_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -903,11 +903,11 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(1);
@@ -1049,7 +1049,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
@@ -1068,7 +1067,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 60ede8a4ef..ed72c96b9a 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -175,30 +175,30 @@ class MacroAssembler: public Assembler {
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the contnt of result is known to be
// the allocation top on entry (could be result_end from a previous call to
- // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -254,14 +254,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& ext);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& ext);
// ---------------------------------------------------------------------------
@@ -321,8 +321,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 3bed2681f8..8fa4287f76 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -28,21 +28,22 @@
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
+#include "allocation.h"
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 58a3ce5209..ca4e142101 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1783,12 +1783,12 @@ Object* ConstructStubCompiler::CompileConstructStub(
// ebx: initial map
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ AllocateObjectInNewSpace(ecx,
- edx,
- ecx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(ecx,
+ edx,
+ ecx,
+ no_reg,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index dd7ea1c9ca..25211d9a7b 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -51,6 +51,13 @@ class List {
INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); }
+ // Deallocates memory used by the list and leaves the list in a consistent
+ // empty state.
+ void Free() {
+ DeleteData(data_);
+ Initialize(0);
+ }
+
INLINE(void* operator new(size_t size)) { return P::New(size); }
INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index dcb4b499e6..f327a0a04a 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -163,7 +163,7 @@ void Log::OpenMemoryBuffer() {
void Log::Close() {
if (Write == WriteToFile) {
- fclose(output_handle_);
+ if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
} else if (Write == WriteToMemory) {
delete output_buffer_;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 5631decba9..63a6d6eb78 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -50,7 +50,7 @@ enum HandlerType {
};
-// Flags used for the AllocateObjectInNewSpace functions.
+// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index cbd47a8763..a20245c385 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -282,8 +282,6 @@ class MarkingVisitor : public ObjectVisitor {
rinfo->IsCallInstruction());
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
MarkCompactCollector::MarkObject(code);
- // When compacting we convert the call to a real object pointer.
- if (IsCompacting()) rinfo->set_call_object(code);
}
private:
@@ -1383,6 +1381,14 @@ class UpdatingVisitor: public ObjectVisitor {
reinterpret_cast<Code*>(target)->instruction_start());
}
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ VisitPointer(&target);
+ rinfo->set_call_address(
+ reinterpret_cast<Code*>(target)->instruction_start());
+ }
+
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
diff --git a/deps/v8/src/memory.h b/deps/v8/src/memory.h
index c64699ee3b..503492a4b5 100644
--- a/deps/v8/src/memory.h
+++ b/deps/v8/src/memory.h
@@ -63,6 +63,10 @@ class Memory {
static Object*& Object_at(Address addr) {
return *reinterpret_cast<Object**>(addr);
}
+
+ static Handle<Object>& Object_Handle_at(Address addr) {
+ return *reinterpret_cast<Handle<Object>*>(addr);
+ }
};
} } // namespace v8::internal
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 6513067135..27207928ca 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -32,6 +32,11 @@
var kVowelSounds = 0;
var kCapitalVowelSounds = 0;
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
function GetInstanceName(cons) {
if (cons.length == 0) {
@@ -565,11 +570,6 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
// Defines accessors for a property that is calculated the first time
// the property is read.
function DefineOneShotAccessor(obj, name, fun) {
@@ -781,14 +781,15 @@ function FormatStackTrace(error, frames) {
}
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
+ var line;
try {
- var line = FormatSourcePosition(frame);
+ line = FormatSourcePosition(frame);
} catch (e) {
try {
- var line = "<error: " + e + ">";
+ line = "<error: " + e + ">";
} catch (ee) {
// Any code that reaches this point is seriously nasty!
- var line = "<error>";
+ line = "<error>";
}
}
lines.push(" at " + line);
diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js
index ee3dd647a6..c4ab7b8ee8 100644
--- a/deps/v8/src/mirror-delay.js
+++ b/deps/v8/src/mirror-delay.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// jsminify this file, js2c: jsmin
-
// Touch the RegExp and Date functions to make sure that date-delay.js and
// regexp-delay.js has been loaded. This is required as the mirrors use
// functions within these files through the builtins object.
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index ea2c202c9e..834589a019 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "debug.h"
#include "execution.h"
@@ -158,14 +159,12 @@ Object* Object::GetPropertyWithCallback(Object* receiver,
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope;
- Handle<JSObject> self(JSObject::cast(receiver));
- Handle<JSObject> holder_handle(JSObject::cast(holder));
+ JSObject* self = JSObject::cast(receiver);
+ JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
- Handle<Object> fun_data(data->data());
- LOG(ApiNamedPropertyAccess("load", *self, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(self),
- v8::Utils::ToLocal(fun_data),
- v8::Utils::ToLocal(holder_handle));
+ LOG(ApiNamedPropertyAccess("load", self, name));
+ CustomArguments args(data->data(), self, holder_handle);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -1538,11 +1537,9 @@ Object* JSObject::SetPropertyWithInterceptor(String* name,
Handle<Object> value_handle(value);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- Handle<Object> data_handle(interceptor->data());
LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
v8::Handle<v8::Value> result;
@@ -1605,14 +1602,10 @@ Object* JSObject::SetPropertyWithCallback(Object* structure,
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
Handle<String> key(name);
- Handle<Object> fun_data(data->data());
LOG(ApiNamedPropertyAccess("store", this, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(self),
- v8::Utils::ToLocal(fun_data),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(data->data(), this, JSObject::cast(holder));
+ v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
@@ -2036,10 +2029,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- Handle<Object> data_handle(interceptor->data());
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
@@ -2307,11 +2298,9 @@ Object* JSObject::DeletePropertyWithInterceptor(String* name) {
if (!interceptor->deleter()->IsUndefined()) {
v8::NamedPropertyDeleter deleter =
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- Handle<Object> data_handle(interceptor->data());
LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
@@ -2370,11 +2359,9 @@ Object* JSObject::DeleteElementWithInterceptor(uint32_t index) {
v8::IndexedPropertyDeleter deleter =
v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
Handle<JSObject> this_handle(this);
- Handle<Object> data_handle(interceptor->data());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
@@ -3971,35 +3958,75 @@ const unibrow::byte* String::ReadBlock(String* input,
}
-FlatStringReader* FlatStringReader::top_ = NULL;
+Relocatable* Relocatable::top_ = NULL;
+
+
+void Relocatable::PostGarbageCollectionProcessing() {
+ Relocatable* current = top_;
+ while (current != NULL) {
+ current->PostGarbageCollection();
+ current = current->prev_;
+ }
+}
+
+
+// Reserve space for statics needing saving and restoring.
+int Relocatable::ArchiveSpacePerThread() {
+ return sizeof(top_);
+}
+
+
+// Archive statics that are thread local.
+char* Relocatable::ArchiveState(char* to) {
+ *reinterpret_cast<Relocatable**>(to) = top_;
+ top_ = NULL;
+ return to + ArchiveSpacePerThread();
+}
+
+
+// Restore statics that are thread local.
+char* Relocatable::RestoreState(char* from) {
+ top_ = *reinterpret_cast<Relocatable**>(from);
+ return from + ArchiveSpacePerThread();
+}
+
+
+char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
+ Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
+ Iterate(v, top);
+ return thread_storage + ArchiveSpacePerThread();
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v) {
+ Iterate(v, top_);
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
+ Relocatable* current = top;
+ while (current != NULL) {
+ current->IterateInstance(v);
+ current = current->prev_;
+ }
+}
FlatStringReader::FlatStringReader(Handle<String> str)
: str_(str.location()),
- length_(str->length()),
- prev_(top_) {
- top_ = this;
- RefreshState();
+ length_(str->length()) {
+ PostGarbageCollection();
}
FlatStringReader::FlatStringReader(Vector<const char> input)
- : str_(NULL),
+ : str_(0),
is_ascii_(true),
length_(input.length()),
- start_(input.start()),
- prev_(top_) {
- top_ = this;
-}
+ start_(input.start()) { }
-FlatStringReader::~FlatStringReader() {
- ASSERT_EQ(top_, this);
- top_ = prev_;
-}
-
-
-void FlatStringReader::RefreshState() {
+void FlatStringReader::PostGarbageCollection() {
if (str_ == NULL) return;
Handle<String> str(str_);
ASSERT(str->IsFlat());
@@ -4012,15 +4039,6 @@ void FlatStringReader::RefreshState() {
}
-void FlatStringReader::PostGarbageCollectionProcessing() {
- FlatStringReader* current = top_;
- while (current != NULL) {
- current->RefreshState();
- current = current->prev_;
- }
-}
-
-
void StringInputBuffer::Seek(unsigned pos) {
Reset(pos, input_);
}
@@ -5033,15 +5051,16 @@ void Code::CopyFrom(const CodeDesc& desc) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::kApplyMask;
+ Assembler* origin = desc.origin; // Needed to find target_object on X64.
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+ Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
// pointers to the first instruction in the code object
- Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+ Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(code->instruction_start());
} else {
@@ -5429,10 +5448,8 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
- Handle<Object> data_handle(interceptor->data());
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
@@ -5564,11 +5581,9 @@ Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetter setter =
v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- Handle<Object> data_handle(interceptor->data());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -5836,13 +5851,11 @@ Object* JSObject::GetElementWithInterceptor(JSObject* receiver,
Handle<JSObject> holder_handle(this);
if (!interceptor->getter()->IsUndefined()) {
- Handle<Object> data_handle(interceptor->data());
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -6074,15 +6087,13 @@ Object* JSObject::GetPropertyWithInterceptor(
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- Handle<Object> data_handle(interceptor->data());
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 5de9afabea..e9430f5b34 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -211,7 +211,7 @@ enum PropertyNormalizationMode {
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V) \
+#define INSTANCE_TYPE_LIST_ALL(V) \
V(SHORT_SYMBOL_TYPE) \
V(MEDIUM_SYMBOL_TYPE) \
V(LONG_SYMBOL_TYPE) \
@@ -282,8 +282,6 @@ enum PropertyNormalizationMode {
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE) \
V(SCRIPT_TYPE) \
\
V(JS_VALUE_TYPE) \
@@ -297,6 +295,17 @@ enum PropertyNormalizationMode {
\
V(JS_FUNCTION_TYPE) \
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO_TYPE) \
+ V(BREAK_POINT_INFO_TYPE)
+#else
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)
+#endif
+
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_ALL(V) \
+ INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
@@ -673,8 +682,10 @@ enum InstanceType {
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
+#endif
SCRIPT_TYPE,
JS_VALUE_TYPE,
@@ -751,14 +762,17 @@ class Object BASE_EMBEDDED {
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
+#ifdef DEBUG
+ // See objects-inl.h for more details
inline bool IsSeqString();
inline bool IsSlicedString();
inline bool IsExternalString();
- inline bool IsConsString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
+#endif // DEBUG
+ inline bool IsConsString();
inline bool IsNumber();
inline bool IsByteArray();
@@ -4205,25 +4219,47 @@ class ExternalTwoByteString: public ExternalString {
};
+// Utility superclass for stack-allocated objects that must be updated
+// on gc. It provides two ways for the gc to update instances, either
+// iterating or updating after gc.
+class Relocatable BASE_EMBEDDED {
+ public:
+ inline Relocatable() : prev_(top_) { top_ = this; }
+ virtual ~Relocatable() {
+ ASSERT_EQ(top_, this);
+ top_ = prev_;
+ }
+ virtual void IterateInstance(ObjectVisitor* v) { }
+ virtual void PostGarbageCollection() { }
+
+ static void PostGarbageCollectionProcessing();
+ static int ArchiveSpacePerThread();
+ static char* ArchiveState(char* to);
+ static char* RestoreState(char* from);
+ static void Iterate(ObjectVisitor* v);
+ static void Iterate(ObjectVisitor* v, Relocatable* top);
+ static char* Iterate(ObjectVisitor* v, char* t);
+ private:
+ static Relocatable* top_;
+ Relocatable* prev_;
+};
+
+
// A flat string reader provides random access to the contents of a
// string independent of the character width of the string. The handle
// must be valid as long as the reader is being used.
-class FlatStringReader BASE_EMBEDDED {
+class FlatStringReader : public Relocatable {
public:
explicit FlatStringReader(Handle<String> str);
explicit FlatStringReader(Vector<const char> input);
- ~FlatStringReader();
- void RefreshState();
+ void PostGarbageCollection();
inline uc32 Get(int index);
int length() { return length_; }
- static void PostGarbageCollectionProcessing();
private:
String** str_;
bool is_ascii_;
int length_;
const void* start_;
- FlatStringReader* prev_;
- static FlatStringReader* top_;
};
@@ -4389,6 +4425,9 @@ class JSArray: public JSObject {
void JSArrayVerify();
#endif
+ // Number of element slots to pre-allocate for an empty array.
+ static const int kPreallocatedArrayElements = 4;
+
// Layout description.
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 0abb9ed774..3b246878e6 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -798,12 +798,6 @@ class ParserFactory BASE_EMBEDDED {
return Call::sentinel();
}
- virtual Expression* NewCallEval(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return CallEval::sentinel();
- }
-
virtual Statement* EmptyStatement() {
return NULL;
}
@@ -854,12 +848,6 @@ class AstBuildingParserFactory : public ParserFactory {
return new Call(expression, arguments, pos);
}
- virtual Expression* NewCallEval(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return new CallEval(expression, arguments, pos);
- }
-
virtual Statement* EmptyStatement();
};
@@ -1196,7 +1184,6 @@ Parser::Parser(Handle<Script> script,
bool Parser::PreParseProgram(Handle<String> source,
unibrow::CharacterStream* stream) {
HistogramTimerScope timer(&Counters::pre_parse);
- StackGuard guard;
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
@@ -1937,31 +1924,20 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Statement* Parser::ParseFunctionDeclaration(bool* ok) {
- // Parse a function literal. We may or may not have a function name.
- // If we have a name we use it as the variable name for the function
- // (a function declaration) and not as the function name of a function
- // expression.
-
+ // FunctionDeclaration ::
+ // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
-
- Handle<String> name;
- if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
- FunctionLiteral* fun = ParseFunctionLiteral(name, function_token_position,
- DECLARATION, CHECK_OK);
-
- if (name.is_null()) {
- // We don't have a name - it is always an anonymous function
- // expression.
- return NEW(ExpressionStatement(fun));
- } else {
- // We have a name so even if we're not at the top-level of the
- // global or a function scope, we treat is as such and introduce
- // the function with it's initial value upon entering the
- // corresponding scope.
- Declare(name, Variable::VAR, fun, true, CHECK_OK);
- return factory()->EmptyStatement();
- }
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ FunctionLiteral* fun = ParseFunctionLiteral(name,
+ function_token_position,
+ DECLARATION,
+ CHECK_OK);
+ // Even if we're not at the top-level of the global or a function
+ // scope, we treat is as such and introduce the function with it's
+ // initial value upon entering the corresponding scope.
+ Declare(name, Variable::VAR, fun, true, CHECK_OK);
+ return factory()->EmptyStatement();
}
@@ -2672,25 +2648,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
if (peek() == Token::IN) {
- // Report syntax error if the expression is an invalid
- // left-hand side expression.
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report
+ // the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- if (expression != NULL && expression->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
- expression = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_for_in",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+ expression = NewThrowReferenceError(type);
}
ForInStatement* loop = NEW(ForInStatement(labels));
Target target(this, loop);
@@ -2767,30 +2731,15 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
return expression;
}
+ // Signal a reference error if the expression is an invalid left-hand
+ // side expression. We could report this as a syntax error here but
+ // for compatibility with JSC we choose to report the error at
+ // runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- if (expression != NULL && expression->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
- expression = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- //
- // NOTE: KJS sometimes delay the error reporting to runtime. If
- // we want to be completely compatible we should do the same.
- // For example: "(x++) = 42" gives a reference error at runtime
- // with KJS whereas we report a syntax error at compile time.
- ReportMessage("invalid_lhs_in_assignment", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+ expression = NewThrowReferenceError(type);
}
-
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@@ -2963,45 +2912,37 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
- Expression* x = ParseUnaryExpression(CHECK_OK);
+ Expression* expression = ParseUnaryExpression(CHECK_OK);
// Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
+ if (expression != NULL && expression->AsLiteral() &&
+ expression->AsLiteral()->handle()->IsNumber()) {
+ double value = expression->AsLiteral()->handle()->Number();
switch (op) {
case Token::ADD:
- return x;
+ return expression;
case Token::SUB:
- return NewNumberLiteral(-x_val);
+ return NewNumberLiteral(-value);
case Token::BIT_NOT:
- return NewNumberLiteral(~DoubleToInt32(x_val));
+ return NewNumberLiteral(~DoubleToInt32(value));
default: break;
}
}
- return NEW(UnaryOperation(op, x));
+ return NEW(UnaryOperation(op, expression));
} else if (Token::IsCountOp(op)) {
op = Next();
- Expression* x = ParseUnaryExpression(CHECK_OK);
- if (x == NULL || !x->IsValidLeftHandSide()) {
- if (x != NULL && x->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
- x = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_prefix_op", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Expression* expression = ParseUnaryExpression(CHECK_OK);
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report the
+ // error at runtime.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+ expression = NewThrowReferenceError(type);
}
- return NEW(CountOperation(true /* prefix */, op, x));
+ return NEW(CountOperation(true /* prefix */, op, expression));
} else {
return ParsePostfixExpression(ok);
@@ -3013,30 +2954,20 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- Expression* result = ParseLeftHandSideExpression(CHECK_OK);
+ Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
- if (result == NULL || !result->IsValidLeftHandSide()) {
- if (result != NULL && result->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
- result = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_postfix_op",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report the
+ // error at runtime.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+ expression = NewThrowReferenceError(type);
}
Token::Value next = Next();
- result = NEW(CountOperation(false /* postfix */, next, result));
+ expression = NEW(CountOperation(false /* postfix */, next, expression));
}
- return result;
+ return expression;
}
@@ -3074,8 +3005,6 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// declared in the current scope chain. These calls are marked as
// potentially direct eval calls. Whether they are actually direct calls
// to eval is determined at run time.
-
- bool is_potentially_direct_eval = false;
if (!is_pre_parsing_) {
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
@@ -3083,16 +3012,10 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
top_scope_->RecordEvalCall();
- is_potentially_direct_eval = true;
}
}
}
-
- if (is_potentially_direct_eval) {
- result = factory()->NewCallEval(result, args, pos);
- } else {
- result = factory()->NewCall(result, args, pos);
- }
+ result = factory()->NewCall(result, args, pos);
break;
}
@@ -4840,8 +4763,6 @@ bool ParseRegExp(FlatStringReader* input,
bool multiline,
RegExpCompileData* result) {
ASSERT(result != NULL);
- // Make sure we have a stack guard.
- StackGuard guard;
RegExpParser parser(input, &result->error, multiline);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 44d283b36d..73d6eeb651 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -554,14 +554,18 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
-#if defined (__arm__) || defined(__thumb__)
- sample.pc = mcontext.mc_r15;
- sample.sp = mcontext.mc_r13;
- sample.fp = mcontext.mc_r11;
-#else
+#if V8_HOST_ARCH_IA32
sample.pc = mcontext.mc_eip;
sample.sp = mcontext.mc_esp;
sample.fp = mcontext.mc_ebp;
+#elif V8_HOST_ARCH_X64
+ sample.pc = mcontext.mc_rip;
+ sample.sp = mcontext.mc_rsp;
+ sample.fp = mcontext.mc_rbp;
+#elif V8_HOST_ARCH_ARM
+ sample.pc = mcontext.mc_r15;
+ sample.sp = mcontext.mc_r13;
+ sample.fp = mcontext.mc_r11;
#endif
active_sampler_->SampleStack(&sample);
}
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 596b0fb040..0b236a5a93 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -42,6 +42,7 @@
#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
+#include <mach/vm_statistics.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
@@ -123,12 +124,22 @@ size_t OS::AllocateAlignment() {
}
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase = mmap(NULL, msize, prot,
+ MAP_PRIVATE | MAP_ANON,
+ kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
@@ -280,9 +291,6 @@ int OS::StackWalk(Vector<StackFrame> frames) {
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 7a8af40fd3..bf66c4b5ca 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -358,11 +358,6 @@ void PrettyPrinter::VisitCall(Call* node) {
}
-void PrettyPrinter::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void PrettyPrinter::VisitCallNew(CallNew* node) {
Print("new (");
Visit(node->expression());
@@ -1040,11 +1035,6 @@ void AstPrinter::VisitCall(Call* node) {
}
-void AstPrinter::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void AstPrinter::VisitCallNew(CallNew* node) {
IndentedScope indent("CALL NEW");
Visit(node->expression());
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index 83cb6e4ef4..87a674dba3 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -69,6 +69,14 @@ void RegExpStack::Reset() {
}
+void RegExpStack::ThreadLocal::Free() {
+ if (thread_local_.memory_size_ > 0) {
+ DeleteArray(thread_local_.memory_);
+ thread_local_ = ThreadLocal();
+ }
+}
+
+
Address RegExpStack::EnsureCapacity(size_t size) {
if (size > kMaximumStackSize) return NULL;
if (size < kMinimumStackSize) size = kMinimumStackSize;
diff --git a/deps/v8/src/regexp-stack.h b/deps/v8/src/regexp-stack.h
index 99cf33cd32..319ab2894c 100644
--- a/deps/v8/src/regexp-stack.h
+++ b/deps/v8/src/regexp-stack.h
@@ -71,6 +71,7 @@ class RegExpStack {
static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
static char* ArchiveStack(char* to);
static char* RestoreStack(char* from);
+ static void FreeThreadResources() { thread_local_.Free(); }
private:
// Artificial limit used when no memory has been allocated.
@@ -92,6 +93,7 @@ class RegExpStack {
Address memory_;
size_t memory_size_;
Address limit_;
+ void Free();
};
// Resets the buffer if it has grown beyond the default/minimum size.
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index d6ea68e694..11fc071f0f 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -383,12 +383,6 @@ void AstOptimizer::VisitCall(Call* node) {
}
-void AstOptimizer::VisitCallEval(CallEval* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
void AstOptimizer::VisitCallNew(CallNew* node) {
Visit(node->expression());
OptimizeArguments(node->arguments());
@@ -759,12 +753,6 @@ void Processor::VisitCall(Call* node) {
}
-void Processor::VisitCallEval(CallEval* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
void Processor::VisitCallNew(CallNew* node) {
USE(node);
UNREACHABLE();
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 06b61e7bcb..4e1940d81a 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -3020,8 +3020,20 @@ static Object* Runtime_LocalKeys(Arguments args) {
// Some fast paths through GetKeysInFixedArrayFor reuse a cached
// property array and since the result is mutable we have to create
// a fresh clone on each invocation.
- Handle<FixedArray> copy = Factory::NewFixedArray(contents->length());
- contents->CopyTo(0, *copy, 0, contents->length());
+ int length = contents->length();
+ Handle<FixedArray> copy = Factory::NewFixedArray(length);
+ for (int i = 0; i < length; i++) {
+ Object* entry = contents->get(i);
+ if (entry->IsString()) {
+ copy->set(i, entry);
+ } else {
+ ASSERT(entry->IsNumber());
+ HandleScope scope;
+ Handle<Object> entry_handle(entry);
+ Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+ copy->set(i, *entry_str);
+ }
+ }
return *Factory::NewJSArrayWithElements(copy);
}
@@ -3587,27 +3599,7 @@ static Object* Runtime_NumberToString(Arguments args) {
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- Object* cached = Heap::GetNumberStringCache(number);
- if (cached != Heap::undefined_value()) {
- return cached;
- }
-
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Smi::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = HeapNumber::cast(number)->value();
- str = DoubleToCString(num, buffer);
- }
- Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
-
- if (!result->IsFailure()) {
- Heap::SetNumberStringCache(number, String::cast(result));
- }
- return result;
+ return Heap::NumberToString(number);
}
@@ -7148,7 +7140,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
// the function being debugged.
// function(arguments,__source__) {return eval(__source__);}
static const char* source_str =
- "function(arguments,__source__){return eval(__source__);}";
+ "(function(arguments,__source__){return eval(__source__);})";
static const int source_str_length = strlen(source_str);
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index b6a9d9434a..94cd02aac8 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -922,7 +922,9 @@ class ReferenceUpdater: public ObjectVisitor {
serializer_(serializer),
reference_encoder_(serializer->reference_encoder_),
offsets_(8),
- addresses_(8) {
+ addresses_(8),
+ offsets_32_bit_(0),
+ data_32_bit_(0) {
}
virtual void VisitPointers(Object** start, Object** end) {
@@ -939,8 +941,12 @@ class ReferenceUpdater: public ObjectVisitor {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Address encoded_target = serializer_->GetSavedAddress(target);
- offsets_.Add(rinfo->target_address_address() - obj_address_);
- addresses_.Add(encoded_target);
+ // All calls and jumps are to code objects that encode into 32 bits.
+ offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
+ uint32_t small_target =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
+ ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
+ data_32_bit_.Add(small_target);
}
@@ -965,6 +971,10 @@ class ReferenceUpdater: public ObjectVisitor {
for (int i = 0; i < offsets_.length(); i++) {
memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
}
+ for (int i = 0; i < offsets_32_bit_.length(); i++) {
+ memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
+ sizeof(uint32_t));
+ }
}
private:
@@ -973,6 +983,10 @@ class ReferenceUpdater: public ObjectVisitor {
ExternalReferenceEncoder* reference_encoder_;
List<int> offsets_;
List<Address> addresses_;
+ // Some updates are 32-bit even on a 64-bit platform.
+ // We keep a separate list of them on 64-bit platforms.
+ List<int> offsets_32_bit_;
+ List<uint32_t> data_32_bit_;
};
@@ -1062,7 +1076,7 @@ void Serializer::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
- CHECK(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
// We need a counter function during serialization to resolve the
// references to counters in the code on the heap.
@@ -1395,7 +1409,7 @@ void Deserializer::Deserialize() {
// No active threads.
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
- ASSERT(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
reference_decoder_ = new ExternalReferenceDecoder();
// By setting linear allocation only, we forbid the use of free list
// allocation which is not predicted by SimulatedAddress.
@@ -1432,7 +1446,9 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+ // On all platforms, the encoded code object address is only 32 bits.
+ Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
+ reinterpret_cast<Address>(rinfo->target_object_address())));
Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
rinfo->set_target_address(target_object->instruction_start());
}
@@ -1632,8 +1648,7 @@ Object* Deserializer::GetObject() {
obj->IterateBody(type, size, this);
if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- LOG(CodeMoveEvent(a, code->address()));
+ LOG(CodeMoveEvent(a, obj->address()));
}
objects_++;
return o;
@@ -1664,7 +1679,6 @@ Object* Deserializer::Resolve(Address encoded) {
// Encoded addresses of HeapObjects always have 'HeapObject' tags.
ASSERT(o->IsHeapObject());
-
switch (GetSpace(encoded)) {
// For Map space and Old space, we cache the known Pages in map_pages,
// old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 998debb30b..43abaa4999 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -145,6 +145,128 @@ Page::RSetState Page::rset_state_ = Page::IN_USE;
#endif
// -----------------------------------------------------------------------------
+// CodeRange
+
+List<CodeRange::FreeBlock> CodeRange::free_list_(0);
+List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
+int CodeRange::current_allocation_block_index_ = 0;
+VirtualMemory* CodeRange::code_range_ = NULL;
+
+
+bool CodeRange::Setup(const size_t requested) {
+ ASSERT(code_range_ == NULL);
+
+ code_range_ = new VirtualMemory(requested);
+ CHECK(code_range_ != NULL);
+ if (!code_range_->IsReserved()) {
+ delete code_range_;
+ code_range_ = NULL;
+ return false;
+ }
+
+ // We are sure that we have mapped a block of requested addresses.
+ ASSERT(code_range_->size() == requested);
+ LOG(NewEvent("CodeRange", code_range_->address(), requested));
+ allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+ current_allocation_block_index_ = 0;
+ return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right) {
+ // The entire point of CodeRange is that the difference between two
+ // addresses in the range can be represented as a signed 32-bit int,
+ // so the cast is semantically correct.
+ return static_cast<int>(left->start - right->start);
+}
+
+
+void CodeRange::GetNextAllocationBlock(size_t requested) {
+ for (current_allocation_block_index_++;
+ current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_++) {
+ if (requested <= allocation_list_[current_allocation_block_index_].size) {
+ return; // Found a large enough allocation block.
+ }
+ }
+
+ // Sort and merge the free blocks on the free list and the allocation list.
+ free_list_.AddAll(allocation_list_);
+ allocation_list_.Clear();
+ free_list_.Sort(&CompareFreeBlockAddress);
+ for (int i = 0; i < free_list_.length();) {
+ FreeBlock merged = free_list_[i];
+ i++;
+ // Add adjacent free blocks to the current merged block.
+ while (i < free_list_.length() &&
+ free_list_[i].start == merged.start + merged.size) {
+ merged.size += free_list_[i].size;
+ i++;
+ }
+ if (merged.size > 0) {
+ allocation_list_.Add(merged);
+ }
+ }
+ free_list_.Clear();
+
+ for (current_allocation_block_index_ = 0;
+ current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_++) {
+ if (requested <= allocation_list_[current_allocation_block_index_].size) {
+ return; // Found a large enough allocation block.
+ }
+ }
+
+ // Code range is full or too fragmented.
+ V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+}
+
+
+
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+ ASSERT(current_allocation_block_index_ < allocation_list_.length());
+ if (requested > allocation_list_[current_allocation_block_index_].size) {
+ // Find an allocation block large enough. This function call may
+ // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
+ GetNextAllocationBlock(requested);
+ }
+ // Commit the requested memory at the start of the current allocation block.
+ *allocated = RoundUp(requested, Page::kPageSize);
+ FreeBlock current = allocation_list_[current_allocation_block_index_];
+ if (*allocated >= current.size - Page::kPageSize) {
+ // Don't leave a small free block, useless for a large object or chunk.
+ *allocated = current.size;
+ }
+ ASSERT(*allocated <= current.size);
+ if (!code_range_->Commit(current.start, *allocated, true)) {
+ *allocated = 0;
+ return NULL;
+ }
+ allocation_list_[current_allocation_block_index_].start += *allocated;
+ allocation_list_[current_allocation_block_index_].size -= *allocated;
+ if (*allocated == current.size) {
+ GetNextAllocationBlock(0); // This block is used up, get the next one.
+ }
+ return current.start;
+}
+
+
+void CodeRange::FreeRawMemory(void* address, size_t length) {
+ free_list_.Add(FreeBlock(address, length));
+ code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+ delete code_range_; // Frees all memory in the virtual memory range.
+ code_range_ = NULL;
+ free_list_.Free();
+ allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
// MemoryAllocator
//
int MemoryAllocator::capacity_ = 0;
@@ -226,8 +348,12 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
-
- void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
+ void* mem;
+ if (executable == EXECUTABLE && CodeRange::exists()) {
+ mem = CodeRange::AllocateRawMemory(requested, allocated);
+ } else {
+ mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+ }
int alloced = *allocated;
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
@@ -236,7 +362,11 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
- OS::Free(mem, length);
+ if (CodeRange::contains(static_cast<Address>(mem))) {
+ CodeRange::FreeRawMemory(mem, length);
+ } else {
+ OS::Free(mem, length);
+ }
Counters::memory_allocated.Decrement(length);
size_ -= length;
ASSERT(size_ >= 0);
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 7170318c54..76b88ef7f0 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -315,6 +315,72 @@ class Space : public Malloced {
// ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements. This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space. On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange : public AllStatic {
+ public:
+ // Reserves a range of virtual memory, but does not commit any of it.
+ // Can only be called once, at heap initialization time.
+ // Returns false on failure.
+ static bool Setup(const size_t requested_size);
+
+ // Frees the range of virtual memory, and frees the data structures used to
+ // manage it.
+ static void TearDown();
+
+ static bool exists() { return code_range_ != NULL; }
+ static bool contains(Address address) {
+ if (code_range_ == NULL) return false;
+ Address start = static_cast<Address>(code_range_->address());
+ return start <= address && address < start + code_range_->size();
+ }
+
+ // Allocates a chunk of memory from the large-object portion of
+ // the code range. On platforms with no separate code range, should
+ // not be called.
+ static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+ static void FreeRawMemory(void* buf, size_t length);
+
+ private:
+ // The reserved range of virtual memory that all code objects are put in.
+ static VirtualMemory* code_range_;
+ // Plain old data class, just a struct plus a constructor.
+ class FreeBlock {
+ public:
+ FreeBlock(Address start_arg, size_t size_arg)
+ : start(start_arg), size(size_arg) {}
+ FreeBlock(void* start_arg, size_t size_arg)
+ : start(static_cast<Address>(start_arg)), size(size_arg) {}
+
+ Address start;
+ size_t size;
+ };
+
+ // Freed blocks of memory are added to the free list. When the allocation
+ // list is exhausted, the free list is sorted and merged to make the new
+ // allocation list.
+ static List<FreeBlock> free_list_;
+ // Memory is allocated from the free blocks on the allocation list.
+ // The block at current_allocation_block_index_ is the current block.
+ static List<FreeBlock> allocation_list_;
+ static int current_allocation_block_index_;
+
+ // Finds a block on the allocation list that contains at least the
+ // requested amount of memory. If none is found, sorts and merges
+ // the existing free memory blocks, and searches again.
+ // If none can be found, terminates V8 with FatalProcessOutOfMemory.
+ static void GetNextAllocationBlock(size_t requested);
+ // Compares the start addresses of two free blocks.
+ static int CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right);
+};
+
+
+// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator manages chunks for the paged heap spaces (old space and map
// space). A paged chunk consists of pages. Pages in a chunk have contiguous
@@ -380,8 +446,9 @@ class MemoryAllocator : public AllStatic {
// function returns an invalid page pointer (NULL). The caller must check
// whether the returned page is valid (by calling Page::is_valid()). It is
// guaranteed that allocated pages have contiguous addresses. The actual
- // number of allocated page is returned in the output parameter
- // allocated_pages.
+ // number of allocated pages is returned in the output parameter
+ // allocated_pages. If the PagedSpace owner is executable and there is
+ // a code range, the pages are allocated from the code range.
static Page* AllocatePages(int requested_pages, int* allocated_pages,
PagedSpace* owner);
@@ -395,6 +462,9 @@ class MemoryAllocator : public AllStatic {
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
+ // If the flag is EXECUTABLE and a code range exists, the requested
+ // memory is allocated from the code range. If a code range exists
+ // and the freed memory is in it, the code range manages the freed memory.
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable);
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 263fac56c2..fbdc307935 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -62,7 +62,7 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- var char_code = %_FastCharCodeAt(this, index);
+ var char_code = %_FastCharCodeAt(this, pos);
if (!%_IsSmi(char_code)) {
var subject = ToString(this);
var index = TO_INTEGER(pos);
@@ -184,6 +184,14 @@ function SubString(string, start, end) {
}
+// This has the same size as the lastMatchInfo array, and can be used for
+// functions that expect that structure to be returned. It is used when the
+// needle is a string rather than a regexp. In this case we can't update
+// lastMatchArray without erroneously affecting the properties on the global
+// RegExp object.
+var reusableMatchInfo = [2, "", "", -1, -1];
+
+
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
var subject = ToString(this);
@@ -224,14 +232,6 @@ function StringReplace(search, replace) {
}
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned. It is used when the
-// needle is a string rather than a regexp. In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
// Helper function for regular expressions in String.prototype.replace.
function StringReplaceRegExp(subject, regexp, replace) {
replace = ToString(replace);
@@ -370,8 +370,8 @@ function addCaptureString(builder, matchInfo, index) {
// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
// should be 'abcd' and not 'dddd' (or anything else).
function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(lastMatchInfo)) return subject;
+ var matchInfo = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(matchInfo)) return subject;
var result = new ReplaceResultBuilder(subject);
// There's at least one match. If the regexp is global, we have to loop
@@ -382,11 +382,11 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
if (regexp.global) {
var previous = 0;
do {
- result.addSpecialSlice(previous, lastMatchInfo[CAPTURE0]);
- var startOfMatch = lastMatchInfo[CAPTURE0];
- previous = lastMatchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
- // Can't use lastMatchInfo any more from here, since the function could
+ result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
+ var startOfMatch = matchInfo[CAPTURE0];
+ previous = matchInfo[CAPTURE1];
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ // Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
// Increment previous if we matched an empty string, as per ECMA-262
@@ -401,20 +401,20 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// Per ECMA-262 15.10.6.2, if the previous index is greater than the
// string length, there is no match
- lastMatchInfo = (previous > subject.length)
+ matchInfo = (previous > subject.length)
? null
: DoRegExpExec(regexp, subject, previous);
- } while (!IS_NULL(lastMatchInfo));
+ } while (!IS_NULL(matchInfo));
// Tack on the final right substring after the last match, if necessary.
if (previous < subject.length) {
result.addSpecialSlice(previous, subject.length);
}
} else { // Not a global regexp, no need to loop.
- result.addSpecialSlice(0, lastMatchInfo[CAPTURE0]);
- var endOfMatch = lastMatchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
- // Can't use lastMatchInfo any more from here, since the function could
+ result.addSpecialSlice(0, matchInfo[CAPTURE0]);
+ var endOfMatch = matchInfo[CAPTURE1];
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ // Can't use matchInfo any more from here, since the function could
// overwrite it.
result.addSpecialSlice(endOfMatch, subject.length);
}
@@ -424,20 +424,20 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
+function ApplyReplacementFunction(replace, matchInfo, subject) {
// Compute the parameter list consisting of the match, captures, index,
// and subject for the replace function invocation.
- var index = lastMatchInfo[CAPTURE0];
+ var index = matchInfo[CAPTURE0];
// The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+ var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
if (m == 1) {
- var s = CaptureString(subject, lastMatchInfo, 0);
+ var s = CaptureString(subject, matchInfo, 0);
// Don't call directly to avoid exposing the built-in global object.
return replace.call(null, s, index, subject);
}
var parameters = $Array(m + 2);
for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, lastMatchInfo, j);
+ parameters[j] = CaptureString(subject, matchInfo, j);
}
parameters[j] = index;
parameters[j + 1] = subject;
@@ -539,14 +539,14 @@ function StringSplit(separator, limit) {
return result;
}
- var lastMatchInfo = splitMatch(separator, subject, currentIndex, startIndex);
+ var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
- if (IS_NULL(lastMatchInfo)) {
+ if (IS_NULL(matchInfo)) {
result[result.length] = subject.slice(currentIndex, length);
return result;
}
- var endIndex = lastMatchInfo[CAPTURE1];
+ var endIndex = matchInfo[CAPTURE1];
// We ignore a zero-length match at the currentIndex.
if (startIndex === endIndex && endIndex === currentIndex) {
@@ -554,12 +554,12 @@ function StringSplit(separator, limit) {
continue;
}
- result[result.length] = SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
+ result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
if (result.length === limit) return result;
- for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
- var start = lastMatchInfo[CAPTURE(i)];
- var end = lastMatchInfo[CAPTURE(i + 1)];
+ for (var i = 2; i < NUMBER_OF_CAPTURES(matchInfo); i += 2) {
+ var start = matchInfo[CAPTURE(i)];
+ var end = matchInfo[CAPTURE(i + 1)];
if (start != -1 && end != -1) {
result[result.length] = SubString(subject, start, end);
} else {
@@ -574,16 +574,16 @@ function StringSplit(separator, limit) {
// ECMA-262 section 15.5.4.14
-// Helper function used by split. This version returns the lastMatchInfo
+// Helper function used by split. This version returns the matchInfo
// instead of allocating a new array with basically the same information.
function splitMatch(separator, subject, current_index, start_index) {
if (IS_REGEXP(separator)) {
- var lastMatchInfo = DoRegExpExec(separator, subject, start_index);
- if (lastMatchInfo == null) return null;
+ var matchInfo = DoRegExpExec(separator, subject, start_index);
+ if (matchInfo == null) return null;
// Section 15.5.4.14 paragraph two says that we do not allow zero length
// matches at the end of the string.
- if (lastMatchInfo[CAPTURE0] === subject.length) return null;
- return lastMatchInfo;
+ if (matchInfo[CAPTURE0] === subject.length) return null;
+ return matchInfo;
}
var separatorIndex = subject.indexOf(separator, start_index);
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 2906c22927..e10dc61b2c 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -735,28 +735,17 @@ Handle<Code> ComputeCallMiss(int argc) {
Object* LoadCallbackProperty(Arguments args) {
- Handle<JSObject> recv = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
AccessorInfo* callback = AccessorInfo::cast(args[2]);
- Handle<Object> data = args.at<Object>(3);
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
- Handle<String> name = args.at<String>(4);
- // NOTE: If we can align the structure of an AccessorInfo with the
- // locations of the arguments to this function maybe we don't have
- // to explicitly create the structure but can just pass a pointer
- // into the stack.
- LOG(ApiNamedPropertyAccess("load", *recv, *name));
- v8::AccessorInfo info(v8::Utils::ToLocal(recv),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(holder));
+ v8::AccessorInfo info(args.arguments());
HandleScope scope;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- result = fun(v8::Utils::ToLocal(name), info);
+ result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
if (result.IsEmpty()) return Heap::undefined_value();
@@ -765,7 +754,7 @@ Object* LoadCallbackProperty(Arguments args) {
Object* StoreCallbackProperty(Arguments args) {
- Handle<JSObject> recv = args.at<JSObject>(0);
+ JSObject* recv = JSObject::cast(args[0]);
AccessorInfo* callback = AccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
@@ -773,11 +762,9 @@ Object* StoreCallbackProperty(Arguments args) {
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
HandleScope scope;
- Handle<Object> data(callback->data());
- LOG(ApiNamedPropertyAccess("store", *recv, *name));
- v8::AccessorInfo info(v8::Utils::ToLocal(recv),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(recv));
+ LOG(ApiNamedPropertyAccess("store", recv, *name));
+ CustomArguments custom_args(callback->data(), recv, recv);
+ v8::AccessorInfo info(custom_args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
@@ -795,11 +782,11 @@ Object* StoreCallbackProperty(Arguments args) {
* provide any value for the given name.
*/
Object* LoadPropertyWithInterceptorOnly(Arguments args) {
- Handle<JSObject> receiver_handle = args.at<JSObject>(0);
- Handle<JSObject> holder_handle = args.at<JSObject>(1);
+ JSObject* receiver_handle = JSObject::cast(args[0]);
+ JSObject* holder_handle = JSObject::cast(args[1]);
Handle<String> name_handle = args.at<String>(2);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
- Handle<Object> data_handle = args.at<Object>(4);
+ Object* data_handle = args[4];
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
@@ -808,9 +795,8 @@ Object* LoadPropertyWithInterceptorOnly(Arguments args) {
{
// Use the interceptor getter.
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(data_handle, receiver_handle, holder_handle);
+ v8::AccessorInfo info(args.end());
HandleScope scope;
v8::Handle<v8::Value> r;
{
@@ -861,9 +847,8 @@ static Object* LoadWithInterceptor(Arguments* args,
{
// Use the interceptor getter.
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
+ v8::AccessorInfo info(args.end());
HandleScope scope;
v8::Handle<v8::Value> r;
{
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 039c29268f..aa7788e3b2 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -98,7 +98,8 @@ void Top::InitializeThreadLocal() {
thread_local_.stack_is_cooked_ = false;
thread_local_.try_catch_handler_ = NULL;
thread_local_.context_ = NULL;
- thread_local_.thread_id_ = ThreadManager::kInvalidId;
+ int id = ThreadManager::CurrentId();
+ thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
thread_local_.external_caught_exception_ = false;
thread_local_.failed_access_check_callback_ = NULL;
clear_pending_exception();
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index 5b3d6a075c..ae94f08e3c 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -78,6 +78,12 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_ == NULL);
+ }
};
#define TOP_ADDRESS_LIST(C) \
@@ -316,6 +322,7 @@ class Top {
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
static char* ArchiveThread(char* to);
static char* RestoreThread(char* from);
+ static void FreeThreadResources() { thread_local_.Free(); }
static const char* kStackOverflowMessage;
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 0dfe7652ad..5af71b6826 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -30,6 +30,11 @@
// Expect $String = global.String;
+// Lazily initialized.
+var hexCharArray = 0;
+var hexCharCodeArray = 0;
+
+
function URIAddEncodedOctetToBuffer(octet, result, index) {
result[index++] = 37; // Char code of '%'.
result[index++] = hexCharCodeArray[octet >> 4];
@@ -320,11 +325,6 @@ function URIEncodeComponent(component) {
}
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
-
-
function HexValueOf(c) {
var code = c.charCodeAt(0);
diff --git a/deps/v8/src/usage-analyzer.cc b/deps/v8/src/usage-analyzer.cc
index 5514f405c6..23a4d9fcbe 100644
--- a/deps/v8/src/usage-analyzer.cc
+++ b/deps/v8/src/usage-analyzer.cc
@@ -44,45 +44,12 @@ class UsageComputer: public AstVisitor {
public:
static bool Traverse(AstNode* node);
- void VisitBlock(Block* node);
- void VisitDeclaration(Declaration* node);
- void VisitExpressionStatement(ExpressionStatement* node);
- void VisitEmptyStatement(EmptyStatement* node);
- void VisitIfStatement(IfStatement* node);
- void VisitContinueStatement(ContinueStatement* node);
- void VisitBreakStatement(BreakStatement* node);
- void VisitReturnStatement(ReturnStatement* node);
- void VisitWithEnterStatement(WithEnterStatement* node);
- void VisitWithExitStatement(WithExitStatement* node);
- void VisitSwitchStatement(SwitchStatement* node);
- void VisitLoopStatement(LoopStatement* node);
- void VisitForInStatement(ForInStatement* node);
- void VisitTryCatch(TryCatch* node);
- void VisitTryFinally(TryFinally* node);
- void VisitDebuggerStatement(DebuggerStatement* node);
- void VisitFunctionLiteral(FunctionLiteral* node);
- void VisitFunctionBoilerplateLiteral(FunctionBoilerplateLiteral* node);
- void VisitConditional(Conditional* node);
- void VisitSlot(Slot* node);
- void VisitVariable(Variable* node);
- void VisitVariableProxy(VariableProxy* node);
- void VisitLiteral(Literal* node);
- void VisitRegExpLiteral(RegExpLiteral* node);
- void VisitObjectLiteral(ObjectLiteral* node);
- void VisitArrayLiteral(ArrayLiteral* node);
- void VisitCatchExtensionObject(CatchExtensionObject* node);
- void VisitAssignment(Assignment* node);
- void VisitThrow(Throw* node);
- void VisitProperty(Property* node);
- void VisitCall(Call* node);
- void VisitCallEval(CallEval* node);
- void VisitCallNew(CallNew* node);
- void VisitCallRuntime(CallRuntime* node);
- void VisitUnaryOperation(UnaryOperation* node);
- void VisitCountOperation(CountOperation* node);
- void VisitBinaryOperation(BinaryOperation* node);
- void VisitCompareOperation(CompareOperation* node);
- void VisitThisFunction(ThisFunction* node);
+ // AST node visit functions.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ void VisitVariable(Variable* var);
private:
int weight_;
@@ -329,13 +296,9 @@ void UsageComputer::VisitCall(Call* node) {
}
-void UsageComputer::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void UsageComputer::VisitCallNew(CallNew* node) {
- VisitCall(node);
+ Read(node->expression());
+ ReadList(node->arguments());
}
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index d56d279809..3c684b8199 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -239,7 +239,7 @@ int WriteChars(const char* filename,
FILE* f = OS::FOpen(filename, "wb");
if (f == NULL) {
if (verbose) {
- OS::PrintError("Cannot open file %s for reading.\n", filename);
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
}
return 0;
}
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index a20415875e..3f8e6cdeae 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -71,6 +71,14 @@ bool V8::Initialize(Deserializer *des) {
::assembler::arm::Simulator::Initialize();
#endif
+ { // NOLINT
+ // Ensure that the thread has a valid stack guard. The v8::Locker object
+ // will ensure this too, but we don't have to use lockers if we are only
+ // using one thread.
+ ExecutionAccess lock;
+ StackGuard::InitThread(lock);
+ }
+
// Setup the object heap
ASSERT(!Heap::HasBeenSetup());
if (!Heap::Setup(create_heap_objects)) {
@@ -161,10 +169,10 @@ uint32_t V8::Random() {
}
-bool V8::IdleNotification(bool is_high_priority) {
- if (!FLAG_use_idle_notification) return false;
- // Ignore high priority instances of V8.
- if (is_high_priority) return false;
+bool V8::IdleNotification() {
+ // Returning true tells the caller that there is no need to call
+ // IdleNotification again.
+ if (!FLAG_use_idle_notification) return true;
// Tell the heap that it may want to adjust.
return Heap::IdleNotification();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 7786d66f8d..106ae612c1 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -95,7 +95,7 @@ class V8 : public AllStatic {
static Smi* RandomPositiveSmi();
// Idle notification directly from the API.
- static bool IdleNotification(bool is_high_priority);
+ static bool IdleNotification();
private:
// True if engine is currently running
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 3022a7e9a6..80a7cd94fb 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -56,10 +56,20 @@ Locker::Locker() : has_lock_(false), top_level_(true) {
if (!internal::ThreadManager::IsLockedByCurrentThread()) {
internal::ThreadManager::Lock();
has_lock_ = true;
+ // Make sure that V8 is initialized. Archiving of threads interferes
+ // with deserialization by adding additional root pointers, so we must
+ // initialize here, before anyone can call ~Locker() or Unlocker().
+ if (!internal::V8::IsRunning()) {
+ V8::Initialize();
+ }
// This may be a locker within an unlocker in which case we have to
// get the saved state for this thread and restore it.
if (internal::ThreadManager::RestoreThread()) {
top_level_ = false;
+ } else {
+ internal::ExecutionAccess access;
+ internal::StackGuard::ClearThread(access);
+ internal::StackGuard::InitThread(access);
}
}
ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
@@ -77,7 +87,9 @@ bool Locker::IsLocked() {
Locker::~Locker() {
ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
if (has_lock_) {
- if (!top_level_) {
+ if (top_level_) {
+ internal::ThreadManager::FreeThreadResources();
+ } else {
internal::ThreadManager::ArchiveThread();
}
internal::ThreadManager::Unlock();
@@ -139,11 +151,14 @@ bool ThreadManager::RestoreThread() {
ThreadState* state =
reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
if (state == NULL) {
+ // This is a new thread.
+ StackGuard::InitThread(access);
return false;
}
char* from = state->data();
from = HandleScopeImplementer::RestoreThread(from);
from = Top::RestoreThread(from);
+ from = Relocatable::RestoreState(from);
#ifdef ENABLE_DEBUGGER_SUPPORT
from = Debug::RestoreDebug(from);
#endif
@@ -183,7 +198,8 @@ static int ArchiveSpacePerThread() {
#endif
StackGuard::ArchiveSpacePerThread() +
RegExpStack::ArchiveSpacePerThread() +
- Bootstrapper::ArchiveSpacePerThread();
+ Bootstrapper::ArchiveSpacePerThread() +
+ Relocatable::ArchiveSpacePerThread();
}
@@ -273,6 +289,7 @@ void ThreadManager::EagerlyArchiveThread() {
// in ThreadManager::Iterate(ObjectVisitor*).
to = HandleScopeImplementer::ArchiveThread(to);
to = Top::ArchiveThread(to);
+ to = Relocatable::ArchiveState(to);
#ifdef ENABLE_DEBUGGER_SUPPORT
to = Debug::ArchiveDebug(to);
#endif
@@ -284,6 +301,18 @@ void ThreadManager::EagerlyArchiveThread() {
}
+void ThreadManager::FreeThreadResources() {
+ HandleScopeImplementer::FreeThreadResources();
+ Top::FreeThreadResources();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug::FreeThreadResources();
+#endif
+ StackGuard::FreeThreadResources();
+ RegExpStack::FreeThreadResources();
+ Bootstrapper::FreeThreadResources();
+}
+
+
bool ThreadManager::IsArchived() {
return Thread::HasThreadLocal(thread_state_key);
}
@@ -297,6 +326,7 @@ void ThreadManager::Iterate(ObjectVisitor* v) {
char* data = state->data();
data = HandleScopeImplementer::Iterate(v, data);
data = Top::Iterate(v, data);
+ data = Relocatable::Iterate(v, data);
}
}
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index f808e54fed..0684053281 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -86,6 +86,7 @@ class ThreadManager : public AllStatic {
static void ArchiveThread();
static bool RestoreThread();
+ static void FreeThreadResources();
static bool IsArchived();
static void Iterate(ObjectVisitor* v);
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index c2adb230ca..ca78b5fc1a 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -171,7 +171,7 @@ class Variable: public ZoneObject {
UseCount* var_uses() { return &var_uses_; }
UseCount* obj_uses() { return &obj_uses_; }
- bool IsVariable(Handle<String> n) {
+ bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
@@ -185,6 +185,12 @@ class Variable: public ZoneObject {
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
+ // True if the variable is named eval and not known to be shadowed.
+ bool is_possibly_eval() const {
+ return IsVariable(Factory::eval_symbol()) &&
+ (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+ }
+
Variable* local_if_not_shadowed() const {
ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
return local_if_not_shadowed_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 8b612de5a9..28815cc07c 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 13
+#define BUILD_NUMBER 14
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index f51a3ea887..899a17cd45 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -70,6 +70,20 @@ void Assembler::emitw(uint16_t x) {
}
+void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ RecordRelocInfo(rmode);
+ int current = code_targets_.length();
+ if (current > 0 && code_targets_.last().is_identical_to(target)) {
+ // Optimization if we keep jumping to the same code target.
+ emitl(current - 1);
+ } else {
+ code_targets_.Add(target);
+ emitl(current);
+ }
+}
+
+
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
}
@@ -162,15 +176,18 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(pc);
+ return Memory::int32_at(pc) + pc + 4;
}
void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(pc) = target;
- CPU::FlushICache(pc, sizeof(intptr_t));
+ Memory::int32_at(pc) = target - pc - 4;
+ CPU::FlushICache(pc, sizeof(int32_t));
}
+Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
+ return code_targets_[Memory::int32_at(pc)];
+}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -179,15 +196,24 @@ void Assembler::set_target_address_at(Address pc, Address target) {
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
- *p += delta; // relocate entry
+ Memory::Address_at(pc_) += delta;
+ } else if (IsCodeTarget(rmode_)) {
+ Memory::int32_at(pc_) -= delta;
+ } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ Memory::int32_at(pc_ + 1) -= delta; // relocate entry
}
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
+ if (IsCodeTarget(rmode_)) {
+ return Assembler::target_address_at(pc_);
+ } else {
+ return Memory::Address_at(pc_);
+ }
}
@@ -199,13 +225,27 @@ Address RelocInfo::target_address_address() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
+ if (IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, target);
+ } else {
+ Memory::Address_at(pc_) = target;
+ }
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return *reinterpret_cast<Object**>(pc_);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ if (rmode_ == EMBEDDED_OBJECT) {
+ return Memory::Object_Handle_at(pc_);
+ } else {
+ return origin->code_target_object_handle_at(pc_);
+ }
}
@@ -240,16 +280,15 @@ bool RelocInfo::IsCallInstruction() {
Address RelocInfo::call_address() {
ASSERT(IsCallInstruction());
- return Assembler::target_address_at(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset);
+ return Memory::Address_at(
+ pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsCallInstruction());
- Assembler::set_target_address_at(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset,
- target);
+ Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
+ target;
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index b4204a9377..cf79a435bb 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -264,7 +264,8 @@ static void InitCoverageLog();
byte* Assembler::spare_buffer_ = NULL;
-Assembler::Assembler(void* buffer, int buffer_size) {
+Assembler::Assembler(void* buffer, int buffer_size)
+ : code_targets_(100) {
if (buffer == NULL) {
// do our own buffer management
if (buffer_size <= kMinimalBufferSize) {
@@ -762,6 +763,15 @@ void Assembler::call(Label* L) {
}
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp
+ emit(0xE8);
+ emit_code_target(target, rmode);
+}
+
+
void Assembler::call(Register adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1062,6 +1072,19 @@ void Assembler::j(Condition cc, Label* L) {
}
+void Assembler::j(Condition cc,
+ Handle<Code> target,
+ RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint4(cc));
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emit_code_target(target, rmode);
+}
+
+
void Assembler::jmp(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1093,6 +1116,15 @@ void Assembler::jmp(Label* L) {
}
+void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emit_code_target(target, rmode);
+}
+
+
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2387,7 +2419,8 @@ void Assembler::WriteRecordedPositions() {
}
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
-
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::JS_RETURN;
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 697dd54643..e17a55d828 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -440,18 +440,26 @@ class Assembler : public Malloced {
// Assembler functions are invoked in between GetCode() calls.
void GetCode(CodeDesc* desc);
- // Read/Modify the code target in the branch/call instruction at pc.
- // On the x64 architecture, the address is absolute, not relative.
+ // Read/Modify the code target in the relative branch/call instruction at pc.
+ // On the x64 architecture, we use relative jumps with a 32-bit displacement
+ // to jump to other Code objects in the Code space in the heap.
+ // Jumps to C functions are done indirectly through a 64-bit register holding
+ // the absolute address of the target.
+ // These functions convert between absolute Addresses of Code objects and
+ // the relative displacements stored in the code.
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
-
+ inline Handle<Object> code_target_object_handle_at(Address pc);
// Distance between the address of the code target in the call instruction
- // and the return address. Checked in the debug build.
- static const int kCallTargetAddressOffset = 3 + kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to (movq = REX.W 0xB8+r.).
- static const int kPatchReturnSequenceAddressOffset = 2;
-
+ // and the return address pushed on the stack.
+ static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
+ // Distance between the start of the JS return sequence and where the
+ // 32-bit displacement of a near call would be, relative to the pushed
+ // return address. TODO: Use return sequence length instead.
+ // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
+ static const int kPatchReturnSequenceAddressOffset = 13 - 4;
+ // TODO(X64): Rename this, removing the "Real", after changing the above.
+ static const int kRealPatchReturnSequenceAddressOffset = 2;
// ---------------------------------------------------------------------------
// Code generation
//
@@ -687,6 +695,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x4, dst, src);
}
+ void andl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+
void decq(Register dst);
void decq(const Operand& dst);
void decl(Register dst);
@@ -919,6 +931,7 @@ class Assembler : public Malloced {
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
+ void call(Handle<Code> target, RelocInfo::Mode rmode);
// Call near absolute indirect, address in register
void call(Register adr);
@@ -928,7 +941,9 @@ class Assembler : public Malloced {
// Jumps
// Jump short or near relative.
+ // Use a 32-bit signed displacement.
void jmp(Label* L); // unconditional jump to L
+ void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
void jmp(Register adr);
@@ -938,6 +953,7 @@ class Assembler : public Malloced {
// Conditional jumps
void j(Condition cc, Label* L);
+ void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Floating-point operations
void fld(int i);
@@ -1043,14 +1059,6 @@ class Assembler : public Malloced {
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- // Writes a doubleword of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- // void dd(uint32_t data);
-
- // Writes a quadword of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- // void dd(uint64_t data, RelocInfo::Mode reloc_info);
-
int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
@@ -1092,9 +1100,9 @@ class Assembler : public Malloced {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
- inline void emit(Handle<Object> handle);
inline void emitq(uint64_t x, RelocInfo::Mode rmode);
inline void emitw(uint16_t x);
+ inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
@@ -1272,6 +1280,7 @@ class Assembler : public Malloced {
byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
+ List< Handle<Code> > code_targets_;
// push-pop elimination
byte* last_pc_;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index d399a88005..35eddc45e3 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -41,10 +41,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ movq(Operand(kScratchRegister, 0), rdi);
// The actual argument count has already been loaded into register
- // rax, but JumpToBuiltin expects rax to contain the number of
+ // rax, but JumpToRuntime expects rax to contain the number of
// arguments including the receiver.
__ incq(rax);
- __ JumpToBuiltin(ExternalReference(id), 1);
+ __ JumpToRuntime(ExternalReference(id), 1);
}
@@ -452,8 +452,391 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+ __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ movq(result,
+ Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity >= 0);
+
+ // Load the initial map from the array function.
+ __ movq(scratch1, FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+ Factory::empty_fixed_array());
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+ // If no storage is requested for the elements array just set the empty
+ // fixed array.
+ if (initial_capacity == 0) {
+ __ Move(FieldOperand(result, JSArray::kElementsOffset),
+ Factory::empty_fixed_array());
+ return;
+ }
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array
+ // scratch2: start of next object
+ __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ movq(FieldOperand(scratch1, Array::kLengthOffset),
+ Immediate(initial_capacity));
+
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+ static const int kLoopUnfoldLimit = 4;
+ ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ __ Move(scratch3, Factory::the_hole_value());
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ // Use a scratch register here to have only one reloc info when unfolding
+ // the loop.
+ for (int i = 0; i < initial_capacity; i++) {
+ __ movq(FieldOperand(scratch1,
+ FixedArray::kHeaderSize + i * kPointerSize),
+ scratch3);
+ }
+ } else {
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(scratch1, 0), scratch3);
+ __ addq(scratch1, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(scratch1, scratch2);
+ __ j(below, &loop);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array,
+ Register elements_array_end,
+ Register scratch,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ movq(elements_array,
+ FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ testq(array_size, array_size);
+ __ j(not_zero, &not_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array: initial map
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+ __ Move(elements_array, Factory::empty_fixed_array());
+ __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ lea(elements_array, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+ // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ SmiToInteger64(array_size, array_size);
+ __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ Label not_empty_2, fill_array;
+ __ testq(array_size, array_size);
+ __ j(not_zero, &not_empty_2);
+ // Length of the FixedArray is the number of pre-allocated elements even
+ // though the actual JSArray has length 0.
+ __ movq(FieldOperand(elements_array, Array::kLengthOffset),
+ Immediate(kPreallocatedArrayElements));
+ __ jmp(&fill_array);
+ __ bind(&not_empty_2);
+ // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+ // same.
+ __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ __ bind(&fill_array);
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ Move(scratch, Factory::the_hole_value());
+ __ lea(elements_array, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(elements_array, 0), scratch);
+ __ addq(elements_array, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(elements_array, elements_array_end);
+ __ j(below, &loop);
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// rdi: constructor (built-in Array function)
+// rax: argc
+// rsp[0]: return address
+// rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments.
+ __ testq(rax, rax);
+ __ j(not_zero, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ rdi,
+ rbx,
+ rcx,
+ rdx,
+ r8,
+ kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(kPointerSize);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmpq(rax, Immediate(1));
+ __ j(not_equal, &argc_two_or_more);
+ __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+ Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
+ __ j(not_positive_smi, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is to large to actually allocate an elements array.
+ __ JumpIfSmiGreaterEqualsConstant(rdx,
+ JSObject::kInitialMaxFastElementArray,
+ call_generic_code);
+
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0]: return address
+ // esp[8]: argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(2 * kPointerSize);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ movq(rdx, rax);
+ __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0] : return address
+ // esp[8] : last argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+
+ // rax: argc
+ // rbx: JSArray
+ // rcx: elements_array
+ // r8: elements_array_end (untagged)
+ // esp[0]: return address
+ // esp[8]: last argument
+
+ // Location of the last argument
+ __ lea(r9, Operand(rsp, kPointerSize));
+
+ // Location of the first array element (Parameter fill_with_holes to
+ // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+ __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // rax: argc
+ // rbx: JSArray
+ // rdx: location of the first array element
+ // r9: location of the last argument
+ // esp[0]: return address
+ // esp[8]: last argument
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(Operand(rdx, 0), kScratchRegister);
+ __ addq(rdx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Remove caller arguments from the stack and return.
+ // rax: argc
+ // rbx: JSArray
+ // esp[0]: return address
+ // esp[8]: last argument
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ push(rcx);
+ __ movq(rax, rbx);
+ __ ret(0);
+}
+
+
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // Just jump to the generic array code.
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, rdi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = __ CheckNotSmi(rbx);
+ __ Assert(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
@@ -461,7 +844,36 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // Just jump to the generic construct code.
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // does always have a map.
+ GenerateLoadArrayFunction(masm, rbx);
+ __ cmpq(rdi, rbx);
+ __ Assert(equal, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = __ CheckNotSmi(rbx);
+ __ Assert(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -529,6 +941,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdi: constructor
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
+ ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
// rdi: constructor
// rax: initial map (if proven valid below)
@@ -547,12 +960,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
// rdi: size of new object
- __ AllocateObjectInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
@@ -607,14 +1020,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rbx: JSObject
// rdi: start of next object (will be start of FixedArray)
// rdx: number of elements in properties array
- __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// rbx: JSObject
diff --git a/deps/v8/src/x64/cfg-x64.cc b/deps/v8/src/x64/cfg-x64.cc
deleted file mode 100644
index b755f49ebe..0000000000
--- a/deps/v8/src/x64/cfg-x64.cc
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-x64.h"
-#include "debug.h"
-#include "macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Label deferred_enter, deferred_exit;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- __ push(rdi);
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(kScratchRegister);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ movq(kScratchRegister, stack_limit);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ j(below, &deferred_enter);
- __ bind(&deferred_exit);
- }
- }
- successor_->Compile(masm);
- if (FLAG_check_stack) {
- Comment cmnt(masm, "[ Deferred Stack Check");
- __ bind(&deferred_enter);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ jmp(&deferred_exit);
- }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
- __ movq(rsp, rbp);
- __ pop(rbp);
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ret((count + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // "movq rsp, rbp; pop rbp" has length 4. "ret k" has length 3.
- const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3;
- for (int i = 0; i < kPadding; ++i) {
- __ int3();
- }
-#endif
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
- // A test rax instruction after the call indicates to the IC code that it
- // was inlined. Ensure there is not one after the call below.
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(rbx); // Discard key.
- } else {
- key()->Get(masm, rcx);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- }
- __ pop(rbx); // Discard receiver.
- location()->Set(masm, rax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Push both operands and call the specialized stub.
- if (!left()->is_on_stack()) left()->Push(masm);
- right()->Push(masm);
- GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
- __ CallStub(&stub);
- location()->Set(masm, rax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, rax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ Move(reg, handle_);
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ Push(handle_);
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(rbp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return Operand(rax, 0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ Move(ToOperand(loc), handle_);
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ movq(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ movq(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // We dispatch to the value because in some cases (temp or constant) we
- // can use special instruction sequences.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ movq(kScratchRegister, ToOperand(this));
- __ movq(ToOperand(loc), kScratchRegister);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(rax)) __ movq(reg, rax);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(rax)) __ movq(rax, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(rax);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, rax);
- break;
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ movq(ToOperand(loc), rax);
- break;
- case STACK:
- __ pop(ToOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index e4dbd62408..8e6dbef2d1 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -913,7 +913,6 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -2592,7 +2591,6 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -2674,8 +2672,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatementPosition(node);
-
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
frame_->Push(&result);
@@ -2694,8 +2690,6 @@ void CodeGenerator::VisitCall(Call* node) {
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
-
// Check if the function is a variable or a property.
Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
@@ -2710,7 +2704,64 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ movq(scratch.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ movq(result.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -2737,6 +2788,7 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->RestoreContextRegister();
// Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
+
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
@@ -2763,6 +2815,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CallWithArguments(args, node->position());
+
} else if (property != NULL) {
// Check if the key is a literal string.
Literal* literal = property->key()->AsLiteral();
@@ -2828,6 +2881,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CallWithArguments(args, node->position());
}
+
} else {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is not global
@@ -2845,70 +2899,8 @@ void CodeGenerator::VisitCall(Call* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
-
- // Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ movq(scratch.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ movq(result.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
- frame_->SetElementAt(arg_count, &result);
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -7204,12 +7196,12 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch,
Register result) {
// Allocate heap number in new space.
- __ AllocateObjectInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch,
+ no_reg,
+ need_gc,
+ TAG_OBJECT);
// Set the map and tag the result.
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 2ae8145e8f..87db3a9bef 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -553,7 +553,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index b2f52b2954..820909122a 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -257,7 +257,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, fast, check_string, index_int, index_string;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
@@ -287,11 +287,36 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&index_int);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &slow);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(below, &fast); // Unsigned comparison rejects negative indices.
+ __ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
+ // Fast case: Do the load.
+ __ movq(rax, Operand(rcx, rax, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
+
+ // Check whether the elements is a pixel array.
+ // rax: untagged index
+ // rcx: elements array
+ __ bind(&check_pixel_array);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kPixelArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
+
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -332,16 +357,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
__ shrl(rax, Immediate(String::kLongLengthShift));
__ jmp(&index_int);
- // Fast case: Do the load.
- __ bind(&fast);
- __ movq(rax, Operand(rcx, rax, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, &slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
- __ ret(0);
}
@@ -402,7 +417,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, fast, array, extra, check_pixel_array;
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
@@ -435,8 +450,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rbx: index (as a smi), zero-extended.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &slow);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ SmiToInteger32(rdx, rbx);
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
@@ -445,7 +461,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rbx: index (as a smi)
__ j(below, &fast);
-
// Slow case: Push extra copies of the arguments (3).
__ bind(&slow);
__ pop(rcx);
@@ -456,6 +471,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ // Check whether the elements is a pixel array.
+ // rax: value
+ // rcx: elements array
+ // rbx: index (as a smi), zero-extended.
+ __ bind(&check_pixel_array);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kPixelArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ // Check that the value is a smi. If a conversion is needed call into the
+ // runtime to convert and clamp.
+ __ JumpIfNotSmi(rax, &slow);
+ __ SmiToInteger32(rbx, rbx);
+ __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ movq(rdx, rax); // Save the value.
+ __ SmiToInteger32(rax, rax);
+ { // Clamp the value to [0..255].
+ Label done, is_negative;
+ __ testl(rax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ j(negative, &is_negative);
+ __ movl(rax, Immediate(255));
+ __ jmp(&done);
+ __ bind(&is_negative);
+ __ xorl(rax, rax); // Clear rax.
+ __ bind(&done);
+ }
+ __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movb(Operand(rcx, rbx, times_1, 0), rax);
+ __ movq(rax, rdx); // Return the original value.
+ __ ret(0);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -476,7 +522,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ SmiSubConstant(rbx, rbx, 1, NULL);
__ jmp(&fast);
-
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
@@ -493,7 +538,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
__ j(above_equal, &extra);
-
// Fast case: Do the store.
__ bind(&fast);
// rax: value
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 637428db38..2689e388b6 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -63,6 +63,13 @@ void MacroAssembler::CompareRoot(Register with,
}
+void MacroAssembler::CompareRoot(Operand with,
+ Heap::RootListIndex index) {
+ LoadRoot(kScratchRegister, index);
+ cmpq(with, kScratchRegister);
+}
+
+
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
Register addr,
@@ -332,17 +339,16 @@ void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
// should remove this need and make the runtime routine entry code
// smarter.
movq(rax, Immediate(num_arguments));
- JumpToBuiltin(ext, result_size);
+ JumpToRuntime(ext, result_size);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext,
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
movq(rbx, ext);
CEntryStub ces(result_size);
- movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
- jmp(kScratchRegister);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -357,7 +363,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
unresolved_.Add(entry);
@@ -519,6 +524,18 @@ void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
}
+void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
+ int constant,
+ Label* on_greater_equals) {
+ if (Smi::IsValid(constant)) {
+ Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
+ j(are_greater_equal, on_greater_equals);
+ } else if (constant < Smi::kMinValue) {
+ jmp(on_greater_equals);
+ }
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
Condition is_valid = CheckInteger32ValidSmiValue(src);
j(ReverseCondition(is_valid), on_invalid);
@@ -602,6 +619,22 @@ Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
}
+Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
+ int constant) {
+ if (constant == 0) {
+ testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
+ return positive;
+ }
+ if (Smi::IsValid(constant)) {
+ cmpl(src, Immediate(Smi::FromInt(constant)));
+ return greater_equal;
+ }
+ // Can't be equal.
+ UNREACHABLE();
+ return no_condition;
+}
+
+
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
// A 32-bit integer value can be converted to a smi if it is in the
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
@@ -1235,17 +1268,8 @@ void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
- Label target;
- bind(&target);
-#endif
- jmp(kScratchRegister);
-#ifdef DEBUG
- ASSERT_EQ(kCallTargetAddressOffset,
- SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+ // TODO(X64): Inline this
+ jmp(code_object, rmode);
}
@@ -1264,17 +1288,7 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
WriteRecordedPositions();
- movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
- // Patch target is kPointer size bytes *before* target label.
- Label target;
- bind(&target);
-#endif
- call(kScratchRegister);
-#ifdef DEBUG
- ASSERT_EQ(kCallTargetAddressOffset,
- SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+ call(code_object, rmode);
}
@@ -1541,7 +1555,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry =
{ pc_offset() - kCallTargetAddressOffset, flags, name };
@@ -2024,12 +2037,12 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -2053,14 +2066,14 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -2084,12 +2097,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int header_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
// Load address of new object into result.
LoadAllocationTopHelper(result, result_end, scratch, flags);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index de2070ab8d..adc136a7f8 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -56,6 +56,7 @@ class MacroAssembler: public Assembler {
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// ---------------------------------------------------------------------------
@@ -193,6 +194,9 @@ class MacroAssembler: public Assembler {
// Check whether a tagged smi is equal to a constant.
Condition CheckSmiEqualsConstant(Register src, int constant);
+ // Check whether a tagged smi is greater than or equal to a constant.
+ Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
+
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
@@ -216,6 +220,12 @@ class MacroAssembler: public Assembler {
// to the constant.
void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
+ // Jump to label if the value is a tagged smi with value greater than or equal
+ // to the constant.
+ void JumpIfSmiGreaterEqualsConstant(Register src,
+ int constant,
+ Label* on_equals);
+
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
@@ -449,30 +459,30 @@ class MacroAssembler: public Assembler {
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the content of result is known to be
// the allocation top on entry (could be result_end from a previous call to
- // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -527,14 +537,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& ext, int result_size);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& ext, int result_size);
// ---------------------------------------------------------------------------
@@ -594,8 +604,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 184c166bc0..998c9095e7 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -28,6 +28,7 @@
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
+#include "allocation.h"
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
@@ -35,15 +36,15 @@
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on x64 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 741d4c3695..0994230073 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1751,6 +1751,7 @@ Object* ConstructStubCompiler::CompileConstructStub(
// Load the initial map and verify that it is in fact a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
__ JumpIfSmi(rbx, &generic_stub_call);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ j(not_equal, &generic_stub_call);
@@ -1768,12 +1769,12 @@ Object* ConstructStubCompiler::CompileConstructStub(
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateObjectInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rcx,
+ rdx,
+ rcx,
+ no_reg,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 9996eeb0af..1235b13b2d 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -43,7 +43,14 @@ static Object* AllocateAfterFailures() {
NewSpace* new_space = Heap::new_space();
static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
while (new_space->Available() > kNewSpaceFillerSize) {
+ int available_before = new_space->Available();
CHECK(!Heap::AllocateByteArray(0)->IsFailure());
+ if (available_before == new_space->Available()) {
+ // It seems that we are avoiding new space allocations when
+ // allocation is forced, so no need to fill up new space
+ // in order to make the test harder.
+ break;
+ }
}
CHECK(!Heap::AllocateByteArray(100)->IsFailure());
CHECK(!Heap::AllocateFixedArray(100, NOT_TENURED)->IsFailure());
@@ -144,3 +151,65 @@ TEST(StressJS) {
CHECK_EQ(42, result->Int32Value());
env->Exit();
}
+
+
+// CodeRange test.
+// Tests memory management in a CodeRange by allocating and freeing blocks,
+// using a pseudorandom generator to choose block sizes geometrically
+// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
+// Ensure that the freed chunks are collected and reused by allocating (in
+// total) more than the size of the CodeRange.
+
+// This pseudorandom generator does not need to be particularly good.
+// Use the lower half of the V8::Random() generator.
+unsigned int Pseudorandom() {
+ static uint32_t lo = 2345;
+ lo = 18273 * (lo & 0xFFFF) + (lo >> 16); // Provably not 0.
+ return lo & 0xFFFF;
+}
+
+
+// Plain old data class. Represents a block of allocated memory.
+class Block {
+ public:
+ Block(void* base_arg, int size_arg)
+ : base(base_arg), size(size_arg) {}
+
+ void *base;
+ int size;
+};
+
+
+TEST(CodeRange) {
+ const int code_range_size = 16*MB;
+ CodeRange::Setup(code_range_size);
+ int current_allocated = 0;
+ int total_allocated = 0;
+ List<Block> blocks(1000);
+
+ while (total_allocated < 5 * code_range_size) {
+ if (current_allocated < code_range_size / 10) {
+ // Allocate a block.
+ // Geometrically distributed sizes, greater than Page::kPageSize.
+ size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
+ Pseudorandom() % 5000 + 1;
+ size_t allocated = 0;
+ void* base = CodeRange::AllocateRawMemory(requested, &allocated);
+ blocks.Add(Block(base, allocated));
+ current_allocated += allocated;
+ total_allocated += allocated;
+ } else {
+ // Free a block.
+ int index = Pseudorandom() % blocks.length();
+ CodeRange::FreeRawMemory(blocks[index].base, blocks[index].size);
+ current_allocated -= blocks[index].size;
+ if (index < blocks.length() - 1) {
+ blocks[index] = blocks.RemoveLast();
+ } else {
+ blocks.RemoveLast();
+ }
+ }
+ }
+
+ CodeRange::TearDown();
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 80f91d39bf..2282c2d9b9 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,6 +31,7 @@
#include "api.h"
#include "compilation-cache.h"
+#include "execution.h"
#include "snapshot.h"
#include "platform.h"
#include "top.h"
@@ -7729,6 +7730,42 @@ THREADED_TEST(PixelArray) {
CHECK_EQ(1503, result->Int32Value());
result = CompileRun("pixels[1]");
CHECK_EQ(1, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = -i;"
+ "}"
+ "sum;");
+ CHECK_EQ(-28, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 0;"
+ "}"
+ "sum;");
+ CHECK_EQ(0, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 255;"
+ "}"
+ "sum;");
+ CHECK_EQ(8 * 255, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 256 + i;"
+ "}"
+ "sum;");
+ CHECK_EQ(2076, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = i;"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value());
+
result = CompileRun("var sum = 0;"
"for (var i = 0; i < 8; i++) {"
" sum += pixels[i];"
@@ -7839,6 +7876,9 @@ THREADED_TEST(PixelArray) {
CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+ result = CompileRun("pixels[1] = 23;");
+ CHECK_EQ(23, result->Int32Value());
+
free(pixel_data);
}
@@ -7873,9 +7913,87 @@ THREADED_TEST(StackTrace) {
}
-// Test that idle notification can be handled when V8 has not yet been
-// set up.
+// Test that idle notification can be handled and eventually returns true.
THREADED_TEST(IdleNotification) {
- for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
- for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
+ bool rv = false;
+ for (int i = 0; i < 100; i++) {
+ rv = v8::V8::IdleNotification();
+ if (rv)
+ break;
+ }
+ CHECK(rv == true);
+}
+
+
+static uint32_t* stack_limit;
+
+static v8::Handle<Value> GetStackLimitCallback(const v8::Arguments& args) {
+ stack_limit = reinterpret_cast<uint32_t*>(i::StackGuard::climit());
+ return v8::Undefined();
+}
+
+
+// Uses the address of a local variable to determine the stack top now.
+// Given a size, returns an address that is that far from the current
+// top of stack.
+static uint32_t* ComputeStackLimit(uint32_t size) {
+ uint32_t* answer = &size - (size / sizeof(size));
+ // If the size is very large and the stack is very near the bottom of
+ // memory then the calculation above may wrap around and give an address
+ // that is above the (downwards-growing) stack. In that case we return
+ // a very low address.
+ if (answer > &size) return reinterpret_cast<uint32_t*>(sizeof(size));
+ return answer;
+}
+
+
+TEST(SetResourceConstraints) {
+ static const int K = 1024;
+ uint32_t* set_limit = ComputeStackLimit(128 * K);
+
+ // Set stack limit.
+ v8::ResourceConstraints constraints;
+ constraints.set_stack_limit(set_limit);
+ CHECK(v8::SetResourceConstraints(&constraints));
+
+ // Execute a script.
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(GetStackLimitCallback);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("get_stack_limit"), fun);
+ CompileRun("get_stack_limit();");
+
+ CHECK(stack_limit == set_limit);
+}
+
+
+TEST(SetResourceConstraintsInThread) {
+ uint32_t* set_limit;
+ {
+ v8::Locker locker;
+ static const int K = 1024;
+ set_limit = ComputeStackLimit(128 * K);
+
+ // Set stack limit.
+ v8::ResourceConstraints constraints;
+ constraints.set_stack_limit(set_limit);
+ CHECK(v8::SetResourceConstraints(&constraints));
+
+ // Execute a script.
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(GetStackLimitCallback);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("get_stack_limit"), fun);
+ CompileRun("get_stack_limit();");
+
+ CHECK(stack_limit == set_limit);
+ }
+ {
+ v8::Locker locker;
+ CHECK(stack_limit == set_limit);
+ }
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 436084af37..1da363c2ae 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -4096,11 +4096,11 @@ v8::Handle<v8::Function> debugger_call_with_data;
// passed it throws an exception.
static const char* debugger_call_with_closure_source =
"var x = 3;"
- "function (exec_state) {"
+ "(function (exec_state) {"
" if (exec_state.y) return x - 1;"
" exec_state.y = x;"
" return exec_state.y"
- "}";
+ "})";
v8::Handle<v8::Function> debugger_call_with_closure;
// Function to retrieve the number of JavaScript frames by calling a JavaScript
@@ -4522,6 +4522,7 @@ TEST(DebuggerAgent) {
// with the client connected.
ok = i::Debugger::StartAgent("test", kPort2);
CHECK(ok);
+ i::Debugger::WaitForAgent();
i::Socket* client = i::OS::CreateSocket();
ok = client->Connect("localhost", port2_str);
CHECK(ok);
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index f8e5a6b755..b199507d70 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -74,13 +74,12 @@ TEST(ConstructorProfile) {
}
-static JSObjectsCluster AddHeapObjectToTree(
- JSObjectsRetainerTree* tree,
- i::String* constructor,
- int instance,
- JSObjectsCluster* ref1 = NULL,
- JSObjectsCluster* ref2 = NULL,
- JSObjectsCluster* ref3 = NULL) {
+static JSObjectsCluster AddHeapObjectToTree(JSObjectsRetainerTree* tree,
+ i::String* constructor,
+ int instance,
+ JSObjectsCluster* ref1 = NULL,
+ JSObjectsCluster* ref2 = NULL,
+ JSObjectsCluster* ref3 = NULL) {
JSObjectsCluster o(constructor, reinterpret_cast<i::Object*>(instance));
JSObjectsClusterTree* o_tree = new JSObjectsClusterTree();
JSObjectsClusterTree::Locator o_loc;
@@ -94,6 +93,16 @@ static JSObjectsCluster AddHeapObjectToTree(
}
+static void AddSelfReferenceToTree(JSObjectsRetainerTree* tree,
+ JSObjectsCluster* self_ref) {
+ JSObjectsRetainerTree::Locator loc;
+ CHECK(tree->Find(*self_ref, &loc));
+ JSObjectsClusterTree::Locator o_loc;
+ CHECK_NE(NULL, loc.value());
+ loc.value()->Insert(*self_ref, &o_loc);
+}
+
+
static inline void CheckEqualsHelper(const char* file, int line,
const char* expected_source,
const JSObjectsCluster& expected,
@@ -121,7 +130,7 @@ static inline void CheckNonEqualsHelper(const char* file, int line,
if (JSObjectsCluster::Compare(expected, value) == 0) {
i::HeapStringAllocator allocator;
i::StringStream stream(&allocator);
- stream.Add("# Expected: ");
+ stream.Add("# !Expected: ");
expected.DebugPrint(&stream);
stream.Add("\n# Found: ");
value.DebugPrint(&stream);
@@ -243,9 +252,11 @@ TEST(ClustersCoarserPathsTraversal) {
coarser.Process(&tree);
CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o11));
CHECK_EQ(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o12));
CHECK_EQ(coarser.GetCoarseEquivalent(o21), coarser.GetCoarseEquivalent(o22));
CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o21));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(p));
@@ -253,6 +264,54 @@ TEST(ClustersCoarserPathsTraversal) {
}
+TEST(ClustersCoarserSelf) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
+
+ JSObjectsRetainerTree tree;
+
+ // On the following graph:
+ //
+ // p (self-referencing)
+ // <- o1 <-
+ // q (self-referencing) o
+ // <- o2 <-
+ // r (self-referencing)
+ //
+ // we expect that coarser will deduce equivalences: p ~ q ~ r, o1 ~ o2;
+
+ JSObjectsCluster o =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100);
+ JSObjectsCluster o1 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x110, &o);
+ JSObjectsCluster o2 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x120, &o);
+ JSObjectsCluster p =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &o1);
+ AddSelfReferenceToTree(&tree, &p);
+ JSObjectsCluster q =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x310, &o1, &o2);
+ AddSelfReferenceToTree(&tree, &q);
+ JSObjectsCluster r =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x320, &o2);
+ AddSelfReferenceToTree(&tree, &r);
+
+ ClustersCoarser coarser;
+ coarser.Process(&tree);
+
+ CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o1));
+ CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
+ CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
+ CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
+ CHECK_NE(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(p));
+}
+
+
namespace {
class RetainerProfilePrinter : public RetainerHeapProfile::Printer {
@@ -322,7 +381,14 @@ TEST(RetainerProfile) {
}
RetainerProfilePrinter printer;
ret_profile.DebugPrintStats(&printer);
- CHECK_EQ("(global property);1,B;2,C;2", printer.GetRetainers("A"));
+ const char* retainers_of_a = printer.GetRetainers("A");
+ // The order of retainers is unspecified, so we check string length, and
+ // verify each retainer separately.
+ CHECK_EQ(static_cast<int>(strlen("(global property);1,B;2,C;2")),
+ static_cast<int>(strlen(retainers_of_a)));
+ CHECK(strstr(retainers_of_a, "(global property);1") != NULL);
+ CHECK(strstr(retainers_of_a, "B;2") != NULL);
+ CHECK(strstr(retainers_of_a, "C;2") != NULL);
CHECK_EQ("(global property);2", printer.GetRetainers("B"));
CHECK_EQ("(global property);1", printer.GetRetainers("C"));
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index dafd3aaad5..65ab50a04f 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -401,13 +401,6 @@ class TestSampler : public v8::internal::Sampler {
} // namespace
TEST(ProfMultipleThreads) {
- // V8 needs to be initialized before the first Locker
- // instantiation. Otherwise, Top::Initialize will reset
- // thread_id_ in ThreadTopLocal.
- v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
-
LoopingJsThread jsThread;
jsThread.Start();
LoopingNonJsThread nonJsThread;
diff --git a/deps/v8/test/cctest/test-sockets.cc b/deps/v8/test/cctest/test-sockets.cc
index a4b2285e88..822a23fa95 100644
--- a/deps/v8/test/cctest/test-sockets.cc
+++ b/deps/v8/test/cctest/test-sockets.cc
@@ -42,6 +42,7 @@ void SocketListenerThread::Run() {
// Create the server socket and bind it to the requested port.
server_ = OS::CreateSocket();
+ server_->SetReuseAddress(true);
CHECK(server_ != NULL);
ok = server_->Bind(port_);
CHECK(ok);
diff --git a/deps/v8/test/mjsunit/class-of-builtins.js b/deps/v8/test/mjsunit/class-of-builtins.js
index 40c958c624..59fefffa75 100644
--- a/deps/v8/test/mjsunit/class-of-builtins.js
+++ b/deps/v8/test/mjsunit/class-of-builtins.js
@@ -35,7 +35,7 @@ var funs = {
Boolean: [ Boolean ],
Number: [ Number ],
Date: [ Date ],
- RegExp: [ RegExp ],
+ RegExp: [ RegExp ],
Error: [ Error, TypeError, RangeError, SyntaxError, ReferenceError, EvalError, URIError ]
}
for (f in funs) {
diff --git a/deps/v8/test/mjsunit/debug-compile-event.js b/deps/v8/test/mjsunit/debug-compile-event.js
index c346f76e81..4804ac772d 100644
--- a/deps/v8/test/mjsunit/debug-compile-event.js
+++ b/deps/v8/test/mjsunit/debug-compile-event.js
@@ -102,10 +102,10 @@ Debug.setListener(listener);
// Compile different sources.
compileSource('a=1');
-compileSource('function(){}');
+compileSource('(function(){})');
compileSource('eval("a=2")');
source_count++; // Using eval causes additional compilation event.
-compileSource('eval("eval(\'function(){return a;}\')")');
+compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
compileSource('JSON.parse("{a:1,b:2}")');
source_count++; // Using JSON.parse causes additional compilation event.
diff --git a/deps/v8/test/mjsunit/invalid-lhs.js b/deps/v8/test/mjsunit/invalid-lhs.js
index bbd19f20b2..ef63add775 100644
--- a/deps/v8/test/mjsunit/invalid-lhs.js
+++ b/deps/v8/test/mjsunit/invalid-lhs.js
@@ -25,9 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test that we get exceptions for invalid left-hand sides. Also
-// tests that if the invalid left-hand side is a function call, the
-// exception is delayed until runtime.
+// Test that we get exceptions for invalid left-hand sides. The
+// exceptions are delayed until runtime.
// Normal assignments:
assertThrows("12 = 12");
@@ -57,12 +56,10 @@ assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);");
// Assignments to 'this'.
assertThrows("this = 42");
-assertThrows("function f() { this = 12; }");
-assertThrows("for (this in Array) ;");
+assertDoesNotThrow("function f() { this = 12; }");
+assertThrows("for (this in {x:3, y:4, z:5}) ;");
assertThrows("for (this = 0;;) ;");
assertThrows("this++");
assertThrows("++this");
assertThrows("this--");
assertThrows("--this");
-
-
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index 9b67b9ba09..3208f16c38 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -85,16 +85,16 @@ function testScriptMirror(f, file_name, file_lines, type, compilation_type,
// Test the script mirror for different functions.
testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
-testScriptMirror(eval('function(){}'), null, 1, 2, 1, 'function(){}', 87);
-testScriptMirror(eval('function(){\n }'), null, 2, 2, 1, 'function(){\n }', 88);
+testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
+testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
testScriptMirror(%CompileString("({a:1,\n b:2})", true), null, 2, 2, 2, '({a:1,\n b:2})');
// Test taking slices of source.
-var mirror = debug.MakeMirror(eval('function(){\n 1;\n}')).script();
-assertEquals('function(){\n', mirror.sourceSlice(0, 1).sourceText());
+var mirror = debug.MakeMirror(eval('(function(){\n 1;\n})')).script();
+assertEquals('(function(){\n', mirror.sourceSlice(0, 1).sourceText());
assertEquals(' 1;\n', mirror.sourceSlice(1, 2).sourceText());
-assertEquals('}', mirror.sourceSlice(2, 3).sourceText());
-assertEquals('function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
-assertEquals(' 1;\n}', mirror.sourceSlice(1, 3).sourceText());
-assertEquals('function(){\n 1;\n}', mirror.sourceSlice(0, 3).sourceText());
+assertEquals('})', mirror.sourceSlice(2, 3).sourceText());
+assertEquals('(function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
+assertEquals(' 1;\n})', mirror.sourceSlice(1, 3).sourceText());
+assertEquals('(function(){\n 1;\n})', mirror.sourceSlice(0, 3).sourceText());
diff --git a/deps/v8/test/mjsunit/regress/regress-220.js b/deps/v8/test/mjsunit/regress/regress-220.js
index 416aa4166e..32c6471c48 100644
--- a/deps/v8/test/mjsunit/regress/regress-220.js
+++ b/deps/v8/test/mjsunit/regress/regress-220.js
@@ -28,4 +28,4 @@
function foo(f) { eval(f); }
// Ensure that compiling a declaration of a function does not crash.
-foo("function (x) { with ({x: []}) function x(){} }");
+foo("(function (x) { with ({x: []}) function x(){} })");
diff --git a/deps/v8/test/mjsunit/switch.js b/deps/v8/test/mjsunit/switch.js
index 4044490a1f..180f994a8e 100644
--- a/deps/v8/test/mjsunit/switch.js
+++ b/deps/v8/test/mjsunit/switch.js
@@ -269,7 +269,7 @@ assertEquals("A", f7((170/16)-(170%16/16)), "0-1-switch.heapnum");
function makeVeryLong(length) {
- var res = "function() {\n" +
+ var res = "(function () {\n" +
" var res = 0;\n" +
" for (var i = 0; i <= " + length + "; i++) {\n" +
" switch(i) {\n";
@@ -280,7 +280,7 @@ function makeVeryLong(length) {
" }\n" +
" }\n" +
" return res;\n" +
- "}";
+ "})";
return eval(res);
}
var verylong_size = 1000;
diff --git a/deps/v8/test/mjsunit/third_party/object-keys.js b/deps/v8/test/mjsunit/third_party/object-keys.js
index 7486b8fb8d..999ce704de 100644
--- a/deps/v8/test/mjsunit/third_party/object-keys.js
+++ b/deps/v8/test/mjsunit/third_party/object-keys.js
@@ -51,6 +51,8 @@ x.__proto__ = [1, 2, 3];
assertEquals(Object.keys(x), []);
assertEquals(Object.keys(function () {}), []);
+assertEquals('string', typeof(Object.keys([1])[0]));
+
function argsTest(a, b, c) {
assertEquals([0, 1, 2], Object.keys(arguments));
}
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 8af8f22bef..46a00f4c6f 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -87,7 +87,7 @@
'-O3',
],
'conditions': [
- [ 'gcc_version=="44"', {
+ [ 'gcc_version==44', {
'cflags': [
# Avoid gcc 4.4 strict aliasing issues in dtoa.c
'-fno-strict-aliasing',
@@ -219,8 +219,6 @@
'../../src/builtins.cc',
'../../src/builtins.h',
'../../src/bytecodes-irregexp.h',
- '../../src/cfg.cc',
- '../../src/cfg.h',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.cc',
@@ -390,7 +388,6 @@
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
- '../../src/arm/cfg-arm.cc',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
@@ -421,7 +418,6 @@
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/cfg-ia32.cc',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
@@ -450,7 +446,6 @@
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
'../../src/x64/builtins-x64.cc',
- '../../src/x64/cfg-x64.cc',
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index cae39e897c..2b7dbdfbae 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -52,20 +52,6 @@ def RemoveCommentsAndTrailingWhitespace(lines):
return lines
-def CompressScript(lines, do_jsmin):
- # If we're not expecting this code to be user visible, we can run it through
- # a more aggressive minifier.
- if do_jsmin:
- return jsmin.jsmin(lines)
-
- # Remove stuff from the source that we don't want to appear when
- # people print the source code using Function.prototype.toString().
- # Note that we could easily compress the scripts mode but don't
- # since we want it to remain readable.
- lines = RemoveCommentsAndTrailingWhitespace(lines)
- return lines
-
-
def ReadFile(filename):
file = open(filename, "rt")
try:
@@ -295,16 +281,18 @@ def JS2C(source, target, env):
# Build source code lines
source_lines = [ ]
+
+ minifier = jsmin.JavaScriptMinifier()
+
source_lines_empty = []
for module in modules:
filename = str(module)
delay = filename.endswith('-delay.js')
lines = ReadFile(filename)
- do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
Validate(lines, filename)
- lines = CompressScript(lines, do_jsmin)
+ lines = minifier.JSMinify(lines)
data = ToCArray(lines)
id = (os.path.split(filename)[1])[:-3]
if delay: id = id[:-6]
diff --git a/deps/v8/tools/jsmin.py b/deps/v8/tools/jsmin.py
index ae7581413a..fd1abe48fd 100644
--- a/deps/v8/tools/jsmin.py
+++ b/deps/v8/tools/jsmin.py
@@ -1,218 +1,278 @@
-#!/usr/bin/python
-
-# This code is original from jsmin by Douglas Crockford, it was translated to
-# Python by Baruch Even. The original code had the following copyright and
-# license.
-#
-# /* jsmin.c
-# 2007-05-22
-#
-# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy of
-# this software and associated documentation files (the "Software"), to deal in
-# the Software without restriction, including without limitation the rights to
-# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-# of the Software, and to permit persons to whom the Software is furnished to do
-# so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# The Software shall be used for Good, not Evil.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-# */
-
-from StringIO import StringIO
-
-def jsmin(js):
- ins = StringIO(js)
- outs = StringIO()
- JavascriptMinify().minify(ins, outs)
- str = outs.getvalue()
- if len(str) > 0 and str[0] == '\n':
- str = str[1:]
- return str
-
-def isAlphanum(c):
- """return true if the character is a letter, digit, underscore,
- dollar sign, or non-ASCII character.
- """
- return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
- (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
-
-class UnterminatedComment(Exception):
- pass
-
-class UnterminatedStringLiteral(Exception):
- pass
-
-class UnterminatedRegularExpression(Exception):
- pass
-
-class JavascriptMinify(object):
-
- def _outA(self):
- self.outstream.write(self.theA)
- def _outB(self):
- self.outstream.write(self.theB)
-
- def _get(self):
- """return the next character from stdin. Watch out for lookahead. If
- the character is a control character, translate it to a space or
- linefeed.
- """
- c = self.theLookahead
- self.theLookahead = None
- if c == None:
- c = self.instream.read(1)
- if c >= ' ' or c == '\n':
- return c
- if c == '': # EOF
- return '\000'
- if c == '\r':
- return '\n'
- return ' '
-
- def _peek(self):
- self.theLookahead = self._get()
- return self.theLookahead
-
- def _next(self):
- """get the next character, excluding comments. peek() is used to see
- if an unescaped '/' is followed by a '/' or '*'.
- """
- c = self._get()
- if c == '/' and self.theA != '\\':
- p = self._peek()
- if p == '/':
- c = self._get()
- while c > '\n':
- c = self._get()
- return c
- if p == '*':
- c = self._get()
- while 1:
- c = self._get()
- if c == '*':
- if self._peek() == '/':
- self._get()
- return ' '
- if c == '\000':
- raise UnterminatedComment()
-
- return c
-
- def _action(self, action):
- """do something! What you do is determined by the argument:
- 1 Output A. Copy B to A. Get the next B.
- 2 Copy B to A. Get the next B. (Delete A).
- 3 Get the next B. (Delete B).
- action treats a string as a single character. Wow!
- action recognizes a regular expression if it is preceded by ( or , or =.
- """
- if action <= 1:
- self._outA()
-
- if action <= 2:
- self.theA = self.theB
- if self.theA == "'" or self.theA == '"':
- while 1:
- self._outA()
- self.theA = self._get()
- if self.theA == self.theB:
- break
- if self.theA <= '\n':
- raise UnterminatedStringLiteral()
- if self.theA == '\\':
- self._outA()
- self.theA = self._get()
-
-
- if action <= 3:
- self.theB = self._next()
- if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
- self.theA == '=' or self.theA == ':' or
- self.theA == '[' or self.theA == '?' or
- self.theA == '!' or self.theA == '&' or
- self.theA == '|' or self.theA == ';' or
- self.theA == '{' or self.theA == '}' or
- self.theA == '\n'):
- self._outA()
- self._outB()
- while 1:
- self.theA = self._get()
- if self.theA == '/':
- break
- elif self.theA == '\\':
- self._outA()
- self.theA = self._get()
- elif self.theA <= '\n':
- raise UnterminatedRegularExpression()
- self._outA()
- self.theB = self._next()
-
-
- def _jsmin(self):
- """Copy the input to the output, deleting the characters which are
- insignificant to JavaScript. Comments will be removed. Tabs will be
- replaced with spaces. Carriage returns will be replaced with linefeeds.
- Most spaces and linefeeds will be removed.
- """
- self.theA = '\n'
- self._action(3)
-
- while self.theA != '\000':
- if self.theA == ' ':
- if isAlphanum(self.theB):
- self._action(1)
- else:
- self._action(2)
- elif self.theA == '\n':
- if self.theB in ['{', '[', '(', '+', '-']:
- self._action(1)
- elif self.theB == ' ':
- self._action(3)
- else:
- if isAlphanum(self.theB):
- self._action(1)
- else:
- self._action(2)
- else:
- if self.theB == ' ':
- if isAlphanum(self.theA):
- self._action(1)
- else:
- self._action(3)
- elif self.theB == '\n':
- if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
- self._action(1)
- else:
- if isAlphanum(self.theA):
- self._action(1)
- else:
- self._action(3)
- else:
- self._action(1)
-
- def minify(self, instream, outstream):
- self.instream = instream
- self.outstream = outstream
- self.theA = '\n'
- self.theB = None
- self.theLookahead = None
-
- self._jsmin()
- self.instream.close()
-
-if __name__ == '__main__':
- import sys
- jsm = JavascriptMinify()
- jsm.minify(sys.stdin, sys.stdout)
+#!/usr/bin/python2.4
+
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A JavaScript minifier.
+
+It is far from being a complete JS parser, so there are many valid
+JavaScript programs that will be ruined by it. Another strangeness is that
+it accepts $ and % as parts of identifiers. It doesn't merge lines or strip
+out blank lines in order to ease debugging. Variables at the top scope are
+properties of the global object so we can't rename them. It is assumed that
+you introduce variables with var as if JavaScript followed C++ scope rules
+around curly braces, so the declaration must be above the first use.
+
+Use as:
+import jsmin
+minifier = JavaScriptMinifier()
+program1 = minifier.JSMinify(program1)
+program2 = minifier.JSMinify(program2)
+"""
+
+import re
+
+
+class JavaScriptMinifier(object):
+ """An object that you can feed code snippets to to get them minified."""
+
+ def __init__(self):
+ # We prepopulate the list of identifiers that shouldn't be used. These
+ # short language keywords could otherwise be used by the script as variable
+ # names.
+ self.seen_identifiers = {"do": True, "in": True}
+ self.identifier_counter = 0
+ self.in_comment = False
+ self.map = {}
+ self.nesting = 0
+
+ def LookAtIdentifier(self, m):
+ """Records identifiers or keywords that we see in use.
+
+ (So we can avoid renaming variables to these strings.)
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ Nothing.
+ """
+ identifier = m.group(1)
+ self.seen_identifiers[identifier] = True
+
+ def Push(self):
+ """Called when we encounter a '{'."""
+ self.nesting += 1
+
+ def Pop(self):
+ """Called when we encounter a '}'."""
+ self.nesting -= 1
+ # We treat each top-level opening brace as a single scope that can span
+ # several sets of nested braces.
+ if self.nesting == 0:
+ self.map = {}
+ self.identifier_counter = 0
+
+ def Declaration(self, m):
+ """Rewrites bits of the program selected by a regexp.
+
+ These can be curly braces, literal strings, function declarations and var
+ declarations. (These last two must be on one line including the opening
+ curly brace of the function for their variables to be renamed).
+
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ The string that should replace the match in the rewritten program.
+ """
+ matched_text = m.group(0)
+ if matched_text == "{":
+ self.Push()
+ return matched_text
+ if matched_text == "}":
+ self.Pop()
+ return matched_text
+ if re.match("[\"'/]", matched_text):
+ return matched_text
+ m = re.match(r"var ", matched_text)
+ if m:
+ var_names = matched_text[m.end():]
+ var_names = re.split(r",", var_names)
+ return "var " + ",".join(map(self.FindNewName, var_names))
+ m = re.match(r"(function\b[^(]*)\((.*)\)\{$", matched_text)
+ if m:
+ up_to_args = m.group(1)
+ args = m.group(2)
+ args = re.split(r",", args)
+ self.Push()
+ return up_to_args + "(" + ",".join(map(self.FindNewName, args)) + "){"
+
+ if matched_text in self.map:
+ return self.map[matched_text]
+
+ return matched_text
+
+ def CharFromNumber(self, number):
+ """A single-digit base-52 encoding using a-zA-Z."""
+ if number < 26:
+ return chr(number + 97)
+ number -= 26
+ return chr(number + 65)
+
+ def FindNewName(self, var_name):
+ """Finds a new 1-character or 2-character name for a variable.
+
+ Enters it into the mapping table for this scope.
+
+ Args:
+ var_name: The name of the variable before renaming.
+
+ Returns:
+ The new name of the variable.
+ """
+ new_identifier = ""
+ # Variable names that end in _ are member variables of the global object,
+ # so they can be visible from code in a different scope. We leave them
+ # alone.
+ if var_name in self.map:
+ return self.map[var_name]
+ if self.nesting == 0:
+ return var_name
+ while True:
+ identifier_first_char = self.identifier_counter % 52
+ identifier_second_char = self.identifier_counter / 52
+ new_identifier = self.CharFromNumber(identifier_first_char)
+ if identifier_second_char != 0:
+ new_identifier = (
+ self.CharFromNumber(identifier_second_char - 1) + new_identifier)
+ self.identifier_counter += 1
+ if not new_identifier in self.seen_identifiers:
+ break
+
+ self.map[var_name] = new_identifier
+ return new_identifier
+
+ def RemoveSpaces(self, m):
+ """Returns literal strings unchanged, replaces other inputs with group 2.
+
+ Other inputs are replaced with the contents of capture 1. This is either
+ a single space or an empty string.
+
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ The string that should be inserted instead of the matched text.
+ """
+ entire_match = m.group(0)
+ replacement = m.group(1)
+ if re.match(r"'.*'$", entire_match):
+ return entire_match
+ if re.match(r'".*"$', entire_match):
+ return entire_match
+ if re.match(r"/.+/$", entire_match):
+ return entire_match
+ return replacement
+
+ def JSMinify(self, text):
+ """The main entry point. Takes a text and returns a compressed version.
+
+ The compressed version hopefully does the same thing. Line breaks are
+ preserved.
+
+ Args:
+ text: The text of the code snippet as a multiline string.
+
+ Returns:
+ The compressed text of the code snippet as a multiline string.
+ """
+ new_lines = []
+ for line in re.split(r"\n", text):
+ line = line.replace("\t", " ")
+ if self.in_comment:
+ m = re.search(r"\*/", line)
+ if m:
+ line = line[m.end():]
+ self.in_comment = False
+ else:
+ new_lines.append("")
+ continue
+
+ if not self.in_comment:
+ line = re.sub(r"/\*.*?\*/", " ", line)
+ line = re.sub(r"//.*", "", line)
+ m = re.search(r"/\*", line)
+ if m:
+ line = line[:m.start()]
+ self.in_comment = True
+
+ # Strip leading and trailing spaces.
+ line = re.sub(r"^ +", "", line)
+ line = re.sub(r" +$", "", line)
+ # A regexp that matches a literal string surrounded by "double quotes".
+ # This regexp can handle embedded backslash-escaped characters including
+ # embedded backslash-escaped double quotes.
+ double_quoted_string = r'"(?:[^"\\]|\\.)*"'
+ # A regexp that matches a literal string surrounded by 'double quotes'.
+ single_quoted_string = r"'(?:[^'\\]|\\.)*'"
+ # A regexp that matches a regexp literal surrounded by /slashes/.
+ slash_quoted_regexp = r"/(?:[^/\\]|\\.)+/"
+ # Replace multiple spaces with a single space.
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ "( )+"]),
+ self.RemoveSpaces,
+ line)
+ # Strip single spaces unless they have an identifier character both before
+ # and after the space. % and $ are counted as identifier characters.
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
+ self.RemoveSpaces,
+ line)
+ # Collect keywords and identifiers that are already in use.
+ if self.nesting == 0:
+ re.sub(r"([a-zA-Z0-9_$%]+)", self.LookAtIdentifier, line)
+ function_declaration_regexp = (
+ r"\bfunction" # Function definition keyword...
+ r"( [\w$%]+)?" # ...optional function name...
+ r"\([\w$%,]+\)\{") # ...argument declarations.
+ # Unfortunately the keyword-value syntax { key:value } makes the key look
+ # like a variable where in fact it is a literal string. We use the
+ # presence or absence of a question mark to try to distinguish between
+ # this case and the ternary operator: "condition ? iftrue : iffalse".
+ if re.search(r"\?", line):
+ block_trailing_colon = r""
+ else:
+ block_trailing_colon = r"(?![:\w$%])"
+ # Variable use. Cannot follow a period precede a colon.
+ variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ r"\{", # Curly braces.
+ r"\}",
+ r"\bvar [\w$%,]+", # var declarations.
+ function_declaration_regexp,
+ variable_use_regexp]),
+ self.Declaration,
+ line)
+ new_lines.append(line)
+
+ return "\n".join(new_lines) + "\n"
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj
index f402e8b6e5..7a013c0c13 100644
--- a/deps/v8/tools/visual_studio/v8_base.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base.vcproj
@@ -237,18 +237,6 @@
>
</File>
<File
- RelativePath="..\..\src\ia32\cfg-ia32.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
index f0ba07a674..abdb418183 100644
--- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -237,18 +237,6 @@
>
</File>
<File
- RelativePath="..\..\src\arm\cfg-arm.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
index d403da0ecd..7b8b4d35cd 100644
--- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -237,18 +237,6 @@
>
</File>
<File
- RelativePath="..\..\src\x64\cfg-x64.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>