aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/ChangeLog45
-rw-r--r--deps/v8/include/v8.h7
-rw-r--r--deps/v8/src/api.cc52
-rw-r--r--deps/v8/src/apinatives.js12
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc2
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc15
-rw-r--r--deps/v8/src/code-stubs.cc74
-rw-r--r--deps/v8/src/code-stubs.h60
-rw-r--r--deps/v8/src/d8.cc40
-rw-r--r--deps/v8/src/debug.cc3
-rw-r--r--deps/v8/src/extensions/experimental/number-format.cc18
-rw-r--r--deps/v8/src/factory.cc8
-rw-r--r--deps/v8/src/factory.h2
-rw-r--r--deps/v8/src/flag-definitions.h3
-rw-r--r--deps/v8/src/full-codegen.cc19
-rw-r--r--deps/v8/src/full-codegen.h53
-rw-r--r--deps/v8/src/handles.cc56
-rw-r--r--deps/v8/src/handles.h14
-rw-r--r--deps/v8/src/heap-inl.h14
-rw-r--r--deps/v8/src/heap.cc32
-rw-r--r--deps/v8/src/heap.h10
-rw-r--r--deps/v8/src/hydrogen-instructions.cc58
-rw-r--r--deps/v8/src/hydrogen-instructions.h52
-rw-r--r--deps/v8/src/hydrogen.cc122
-rw-r--r--deps/v8/src/hydrogen.h10
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc180
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc1
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc5
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc131
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc157
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc11
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h5
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc7
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc8
-rw-r--r--deps/v8/src/ic.cc37
-rw-r--r--deps/v8/src/ic.h12
-rw-r--r--deps/v8/src/json-parser.h3
-rw-r--r--deps/v8/src/log-utils.cc2
-rw-r--r--deps/v8/src/log.cc1
-rw-r--r--deps/v8/src/macros.py3
-rw-r--r--deps/v8/src/messages.js1
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc2
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc15
-rw-r--r--deps/v8/src/objects-inl.h70
-rw-r--r--deps/v8/src/objects-printer.cc33
-rw-r--r--deps/v8/src/objects.cc482
-rw-r--r--deps/v8/src/objects.h116
-rw-r--r--deps/v8/src/parser.cc58
-rw-r--r--deps/v8/src/platform-cygwin.cc6
-rw-r--r--deps/v8/src/platform-linux.cc38
-rw-r--r--deps/v8/src/platform-posix.cc6
-rw-r--r--deps/v8/src/platform-win32.cc6
-rw-r--r--deps/v8/src/platform.h3
-rw-r--r--deps/v8/src/proxy.js4
-rw-r--r--deps/v8/src/runtime.cc88
-rw-r--r--deps/v8/src/runtime.h1
-rw-r--r--deps/v8/src/scopes.h16
-rw-r--r--deps/v8/src/spaces.cc1
-rw-r--r--deps/v8/src/third_party/valgrind/valgrind.h2768
-rw-r--r--deps/v8/src/type-info.cc7
-rw-r--r--deps/v8/src/type-info.h5
-rw-r--r--deps/v8/src/utils.h25
-rw-r--r--deps/v8/src/v8natives.js103
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc177
-rw-r--r--deps/v8/src/x64/codegen-x64.cc1
-rw-r--r--deps/v8/src/x64/cpu-x64.cc3
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc150
-rw-r--r--deps/v8/src/x64/lithium-x64.cc6
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc7
-rw-r--r--deps/v8/test/benchmarks/testcfg.py2
-rw-r--r--deps/v8/test/cctest/SConscript1
-rw-r--r--deps/v8/test/cctest/cctest.gyp1
-rw-r--r--deps/v8/test/cctest/test-api.cc114
-rw-r--r--deps/v8/test/cctest/test-ast.cc11
-rw-r--r--deps/v8/test/cctest/test-compiler.cc2
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc85
-rw-r--r--deps/v8/test/cctest/test-list.cc13
-rw-r--r--deps/v8/test/cctest/test-serialize.cc8
-rw-r--r--deps/v8/test/es5conform/testcfg.py2
-rw-r--r--deps/v8/test/message/testcfg.py2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-lbranch-double.js40
-rw-r--r--deps/v8/test/mjsunit/function-names.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js217
-rw-r--r--deps/v8/test/mjsunit/math-floor.js27
-rw-r--r--deps/v8/test/mjsunit/math-round.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1563.js (renamed from deps/v8/test/mjsunit/regress/regress-1341167.js)21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1582.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-91008.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-91010.js (renamed from deps/v8/test/mjsunit/execScript-case-insensitive.js)18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-91013.js51
-rw-r--r--deps/v8/test/mjsunit/regress/regress-91120.js48
-rw-r--r--deps/v8/test/mjsunit/scope-calls-eval.js65
-rw-r--r--deps/v8/test/mjsunit/testcfg.py2
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js63
-rw-r--r--deps/v8/test/mozilla/testcfg.py2
-rw-r--r--deps/v8/test/sputnik/testcfg.py2
-rw-r--r--deps/v8/test/test262/testcfg.py2
-rw-r--r--deps/v8/tools/oom_dump/README4
-rwxr-xr-xdeps/v8/tools/test.py2
102 files changed, 4377 insertions, 2090 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 6901729b01..643dd368b7 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,8 +1,51 @@
+2011-08-03: Version 3.5.3
+
+ MIPS: Port of fix to ClassOf check from ARM.
+ Patch from Paul Lind <plind44@gmail.com>.
+
+ Stopped using mprotect on Cygwin.
+ Avoided uninitialized member warning on gcc 4.3.4
+ Both patches by Bert Belder.
+
+ Bug fixes and performance improvements on all platforms.
+
+
+2011-08-01: Version 3.5.2
+
+ Performance improvements on all platforms.
+
+
+2011-07-28: Version 3.5.1
+
+ Fixed setting the readonly flag on the prototype property using the
+ API call FunctionTemplate::SetPrototypeAttributes (issue 1539).
+
+ Changed the tools/test.py script to use d8 instead of shell for
+ testing.
+
+ Fixed crash in ToBooleanStub when GC happens during invocation.
+
+ Enabled automatic unboxing of double arrays.
+
+ Performance improvements on all platforms.
+
+
+2011-07-25: Version 3.5.0
+
+ Implemented Object.prototype.{hasOwnProperty, propertyIsEnumerable} for
+ proxies.
+
+ Removed logging to memory support.
+
+ Bugfixes and performance work.
+
+
2011-07-20: Version 3.4.14
Fix the debugger for strict-mode functions. (Chromium issue 89236)
- Add GetPropertyAttribute method for Object in the API. (Patch by Peter Varga)
+ Add GetPropertyAttribute method for Object in the API. (Patch by
+ Peter Varga)
Fix -Wunused-but-set-variable for gcc-4.6 on x64. (Issue 1291)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index f4f81e4c72..fa597129e1 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -2231,11 +2231,10 @@ class V8EXPORT FunctionTemplate : public Template {
void SetHiddenPrototype(bool value);
/**
- * Sets the property attributes of the 'prototype' property of functions
- * created from this FunctionTemplate. Can be any combination of ReadOnly,
- * DontEnum and DontDelete.
+ * Sets the ReadOnly flag in the attributes of the 'prototype' property
+ * of functions created from this FunctionTemplate to true.
*/
- void SetPrototypeAttributes(int attributes);
+ void ReadOnlyPrototype();
/**
* Returns true if the given object is an instance of this function
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index b0e977564b..fa2c88c03b 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -877,7 +877,6 @@ static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
info->set_flag(0);
- info->set_prototype_attributes(i::Smi::FromInt(v8::None));
}
@@ -1100,14 +1099,13 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
}
-void FunctionTemplate::SetPrototypeAttributes(int attributes) {
+void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
return;
}
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_prototype_attributes(
- i::Smi::FromInt(attributes));
+ Utils::OpenHandle(this)->set_read_only_prototype(true);
}
@@ -3194,39 +3192,7 @@ int v8::Object::GetIdentityHash() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
- if (!hidden_props_obj->IsJSObject()) {
- // We failed to create hidden properties. That's a detached
- // global proxy.
- ASSERT(hidden_props_obj->IsUndefined());
- return 0;
- }
- i::Handle<i::JSObject> hidden_props =
- i::Handle<i::JSObject>::cast(hidden_props_obj);
- i::Handle<i::String> hash_symbol = isolate->factory()->identity_hash_symbol();
- if (hidden_props->HasLocalProperty(*hash_symbol)) {
- i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
- CHECK(!hash.is_null());
- CHECK(hash->IsSmi());
- return i::Smi::cast(*hash)->value();
- }
-
- int hash_value;
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = i::V8::Random(self->GetIsolate()) & i::Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
- CHECK(!i::SetLocalPropertyIgnoreAttributes(
- hidden_props,
- hash_symbol,
- i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
- static_cast<PropertyAttributes>(None)).is_null());
-
- return hash_value;
+ return i::GetIdentityHash(self);
}
@@ -3237,7 +3203,9 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
+ i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+ self,
+ i::JSObject::ALLOW_CREATION));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
@@ -3259,7 +3227,9 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
+ i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+ self,
+ i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return v8::Local<v8::Value>();
}
@@ -3281,7 +3251,9 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
+ i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+ self,
+ i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return true;
}
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index 193863f5c2..c00195d831 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -73,14 +73,10 @@ function InstantiateFunction(data, name) {
if (name) %FunctionSetName(fun, name);
cache[serialNumber] = fun;
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- var attributes = %GetTemplateField(data, kApiPrototypeAttributesOffset);
- if (attributes != NONE) {
- %IgnoreAttributesAndSetProperty(
- fun, "prototype",
- prototype ? Instantiate(prototype) : {},
- attributes);
- } else {
- fun.prototype = prototype ? Instantiate(prototype) : {};
+ var flags = %GetTemplateField(data, kApiFlagOffset);
+ fun.prototype = prototype ? Instantiate(prototype) : {};
+ if (flags & (1 << kReadOnlyPrototypeBit)) {
+ %FunctionSetReadOnlyPrototype(fun);
}
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index c3440eb3ea..cdfc0a9acf 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -2753,7 +2753,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kObject_symbolRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index e4a14af7e4..ee2bb84346 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -1560,7 +1560,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Test the double value. Zero and NaN are false.
__ VFPCompareAndLoadFlags(reg, 0.0, scratch);
__ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- EmitBranch(true_block, false_block, ne);
+ EmitBranch(true_block, false_block, eq);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
@@ -4070,7 +4070,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
DeoptimizeIf(ne, instr->environment());
- __ movt(input_reg, 0);
+ __ mov(result_reg, Operand(0));
__ jmp(&done);
// Heap number
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index c2665f8853..2c60b28a5d 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -4399,11 +4399,18 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
- __ SmiUntag(value_reg, value_reg);
+
+ Register untagged_value = receiver_reg;
+ __ SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(
- masm, value_reg, destination,
- d0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
- scratch4, s2); // These are: scratch2, single_scratch.
+ masm,
+ untagged_value,
+ destination,
+ d0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
__ vstr(d0, scratch, 0);
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 1d1128f2d1..0cba275c3e 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -329,4 +329,78 @@ void CallFunctionStub::PrintName(StringStream* stream) {
stream->Add("CallFunctionStub_Args%d%s%s", argc_, in_loop_name, flags_name);
}
+
+void ToBooleanStub::PrintName(StringStream* stream) {
+ stream->Add("ToBooleanStub_");
+ types_.Print(stream);
+}
+
+
+void ToBooleanStub::Types::Print(StringStream* stream) const {
+ if (IsEmpty()) stream->Add("None");
+ if (Contains(UNDEFINED)) stream->Add("Undefined");
+ if (Contains(BOOLEAN)) stream->Add("Bool");
+ if (Contains(SMI)) stream->Add("Smi");
+ if (Contains(NULL_TYPE)) stream->Add("Null");
+ if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
+ if (Contains(STRING)) stream->Add("String");
+ if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
+ if (Contains(INTERNAL_OBJECT)) stream->Add("InternalObject");
+}
+
+
+void ToBooleanStub::Types::TraceTransition(Types to) const {
+ if (!FLAG_trace_ic) return;
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[ToBooleanIC (");
+ Print(&stream);
+ stream.Add("->");
+ to.Print(&stream);
+ stream.Add(")]\n");
+ stream.OutputToStdOut();
+}
+
+
+bool ToBooleanStub::Types::Record(Handle<Object> object) {
+ if (object->IsUndefined()) {
+ Add(UNDEFINED);
+ return false;
+ } else if (object->IsBoolean()) {
+ Add(BOOLEAN);
+ return object->IsTrue();
+ } else if (object->IsNull()) {
+ Add(NULL_TYPE);
+ return false;
+ } else if (object->IsSmi()) {
+ Add(SMI);
+ return Smi::cast(*object)->value() != 0;
+ } else if (object->IsSpecObject()) {
+ Add(SPEC_OBJECT);
+ return !object->IsUndetectableObject();
+ } else if (object->IsString()) {
+ Add(STRING);
+ return !object->IsUndetectableObject() &&
+ String::cast(*object)->length() != 0;
+ } else if (object->IsHeapNumber()) {
+ Add(HEAP_NUMBER);
+ double value = HeapNumber::cast(*object)->value();
+ return !object->IsUndetectableObject() && value != 0 && !isnan(value);
+ } else {
+ Add(INTERNAL_OBJECT);
+ return !object->IsUndetectableObject();
+ }
+}
+
+
+bool ToBooleanStub::Types::NeedsMap() const {
+ return Contains(ToBooleanStub::SPEC_OBJECT)
+ || Contains(ToBooleanStub::STRING)
+ || Contains(ToBooleanStub::HEAP_NUMBER)
+ || Contains(ToBooleanStub::INTERNAL_OBJECT);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 17c245c80e..43b958b439 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -900,14 +900,68 @@ class KeyedStoreElementStub : public CodeStub {
class ToBooleanStub: public CodeStub {
public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
+ enum Type {
+ UNDEFINED,
+ BOOLEAN,
+ NULL_TYPE,
+ SMI,
+ SPEC_OBJECT,
+ STRING,
+ HEAP_NUMBER,
+ INTERNAL_OBJECT,
+ NUMBER_OF_TYPES
+ };
+
+ // At most 8 different types can be distinguished, because the Code object
+ // only has room for a single byte to hold a set of these types. :-P
+ STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
+
+ class Types {
+ public:
+ Types() {}
+ explicit Types(byte bits) : set_(bits) {}
+
+ bool IsEmpty() const { return set_.IsEmpty(); }
+ bool IsAll() const { return ToByte() == ((1 << NUMBER_OF_TYPES) - 1); }
+ bool Contains(Type type) const { return set_.Contains(type); }
+ void Add(Type type) { set_.Add(type); }
+ byte ToByte() const { return set_.ToIntegral(); }
+ void Print(StringStream* stream) const;
+ void TraceTransition(Types to) const;
+ bool Record(Handle<Object> object);
+ bool NeedsMap() const;
+
+ private:
+ EnumSet<Type, byte> set_;
+ };
+
+ static Types no_types() { return Types(); }
+ static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+
+ explicit ToBooleanStub(Register tos, Types types = Types())
+ : tos_(tos), types_(types) { }
void Generate(MacroAssembler* masm);
+ virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
+ virtual void PrintName(StringStream* stream);
private:
- Register tos_;
Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
+ int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
+
+ virtual void FinishCode(Code* code) {
+ code->set_to_boolean_state(types_.ToByte());
+ }
+
+ void CheckOddball(MacroAssembler* masm,
+ Type type,
+ Heap::RootListIndex value,
+ bool result,
+ Label* patch);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ Register tos_;
+ Types types_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 4917f7d64a..f4ace87eb3 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -199,7 +199,7 @@ Handle<Value> Shell::Write(const Arguments& args) {
printf(" ");
}
v8::String::Utf8Value str(args[i]);
- int n = fwrite(*str, sizeof(**str), str.length(), stdout);
+ int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
if (n != str.length()) {
printf("Error in fwrite\n");
exit(1);
@@ -226,17 +226,24 @@ Handle<Value> Shell::ReadLine(const Arguments& args) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
Handle<String> accumulator = String::New("");
- bool linebreak;
int length;
- do { // Repeat if the line ends with an escape '\'.
- // fgets got an error. Just give up.
+ while (true) {
+ // Continue reading if the line ends with an escape '\\' or the line has
+ // not been fully read into the buffer yet (does not end with '\n').
+ // If fgets gets an error, just give up.
if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
- length = strlen(buffer);
- linebreak = (length > 1 && buffer[length-2] == '\\');
- if (linebreak) buffer[length-2] = '\n';
- accumulator = String::Concat(accumulator, String::New(buffer, length-1));
- } while (linebreak);
- return accumulator;
+ length = static_cast<int>(strlen(buffer));
+ if (length == 0) {
+ return accumulator;
+ } else if (buffer[length-1] != '\n') {
+ accumulator = String::Concat(accumulator, String::New(buffer, length));
+ } else if (length > 1 && buffer[length-2] == '\\') {
+ buffer[length-2] = '\n';
+ accumulator = String::Concat(accumulator, String::New(buffer, length-1));
+ } else {
+ return String::Concat(accumulator, String::New(buffer, length-1));
+ }
+ }
}
@@ -299,9 +306,12 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
Persistent<Object> persistent_array = Persistent<Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
- array->SetIndexedPropertiesToExternalArrayData(data, type, length);
- array->Set(String::New("length"), Int32::New(length), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
+ array->SetIndexedPropertiesToExternalArrayData(data, type,
+ static_cast<int>(length));
+ array->Set(String::New("length"),
+ Int32::New(static_cast<int32_t>(length)), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"),
+ Int32::New(static_cast<int32_t>(element_size)));
return array;
}
@@ -790,7 +800,7 @@ static char* ReadChars(const char* name, int* size_out) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
@@ -981,7 +991,7 @@ Handle<String> SourceGroup::ReadFile(const char* name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index aecbb463b5..5024bce63f 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -169,7 +169,8 @@ void BreakLocationIterator::Next() {
if ((code->is_inline_cache_stub() &&
!code->is_binary_op_stub() &&
!code->is_unary_op_stub() &&
- !code->is_compare_ic_stub()) ||
+ !code->is_compare_ic_stub() &&
+ !code->is_to_boolean_ic_stub()) ||
RelocInfo::IsConstructCall(rmode())) {
break_point_++;
return;
diff --git a/deps/v8/src/extensions/experimental/number-format.cc b/deps/v8/src/extensions/experimental/number-format.cc
index 51e0b959ce..2932c52854 100644
--- a/deps/v8/src/extensions/experimental/number-format.cc
+++ b/deps/v8/src/extensions/experimental/number-format.cc
@@ -36,6 +36,8 @@
#include "unicode/numfmt.h"
#include "unicode/uchar.h"
#include "unicode/ucurr.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
namespace v8 {
namespace internal {
@@ -231,6 +233,8 @@ static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
}
// Generates ICU number format pattern from given skeleton.
+// TODO(cira): Remove once ICU includes equivalent method
+// (see http://bugs.icu-project.org/trac/ticket/8610).
static icu::DecimalFormat* CreateFormatterFromSkeleton(
const icu::Locale& icu_locale,
const icu::UnicodeString& skeleton,
@@ -251,6 +255,7 @@ static icu::DecimalFormat* CreateFormatterFromSkeleton(
// Case of non-consecutive U+00A4 is taken care of in i18n.js.
int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
+#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
icu::NumberFormat::EStyles style;
switch (end_index - index) {
case 0:
@@ -262,6 +267,19 @@ static icu::DecimalFormat* CreateFormatterFromSkeleton(
default:
style = icu::NumberFormat::kPluralCurrencyStyle;
}
+#else // ICU version is 4.8 or above (we ignore versions below 4.0).
+ UNumberFormatStyle style;
+ switch (end_index - index) {
+ case 0:
+ style = UNUM_CURRENCY;
+ break;
+ case 1:
+ style = UNUM_CURRENCY_ISO;
+ break;
+ default:
+ style = UNUM_CURRENCY_PLURAL;
+ }
+#endif
base_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createInstance(icu_locale, style, *status));
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index ac96668d99..05dd5c9661 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -84,6 +84,14 @@ Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
}
+Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(isolate(),
+ ObjectHashTable::Allocate(at_least_space_for),
+ ObjectHashTable);
+}
+
+
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 19f3827931..3217ca906b 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -58,6 +58,8 @@ class Factory {
Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+ Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
Handle<DeoptimizationInputData> NewDeoptimizationInputData(
int deopt_entry_count,
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 2db44c3067..6900a9ed17 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -100,7 +100,7 @@ private:
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
// Flags for experimental implementation features.
-DEFINE_bool(unbox_double_arrays, false, "automatically unbox arrays of doubles")
+DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
// Flags for Crankshaft.
#ifdef V8_TARGET_ARCH_MIPS
@@ -400,6 +400,7 @@ DEFINE_bool(print_json_ast, false, "print source AST as JSON")
DEFINE_bool(print_builtin_json_ast, false,
"print source AST for builtins as JSON")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 8c2f0d178f..e5375fc3ae 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -437,6 +437,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
__ push(reg);
+ codegen()->increment_stack_height();
}
@@ -450,11 +451,13 @@ void FullCodeGenerator::TestContext::Plug(Register reg) const {
void FullCodeGenerator::EffectContext::PlugTOS() const {
__ Drop(1);
+ codegen()->decrement_stack_height();
}
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
__ pop(result_register());
+ codegen()->decrement_stack_height();
}
@@ -465,6 +468,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
__ pop(result_register());
+ codegen()->decrement_stack_height();
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -960,6 +964,7 @@ void FullCodeGenerator::VisitEnterWithContextStatement(
VisitForStackValue(stmt->expression());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushWithContext, 2);
+ decrement_stack_height();
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
@@ -1128,8 +1133,10 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
{
TryCatch try_block(this, &catch_entry);
__ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ increment_stack_height(StackHandlerConstants::kSize / kPointerSize);
Visit(stmt->try_block());
__ PopTryHandler();
+ decrement_stack_height(StackHandlerConstants::kSize / kPointerSize);
}
__ bind(&done);
}
@@ -1161,6 +1168,10 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// cooked before GC.
Label finally_entry;
Label try_handler_setup;
+ const int original_stack_height = stack_height();
+ const int finally_block_stack_height = original_stack_height + 2;
+ const int try_block_stack_height = original_stack_height + 4;
+ STATIC_ASSERT(StackHandlerConstants::kSize / kPointerSize == 4);
// Setup the try-handler chain. Use a call to
// Jump to try-handler setup and try-block code. Use call to put try-handler
@@ -1182,6 +1193,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Finally block implementation.
Finally finally_block(this);
EnterFinallyBlock();
+ set_stack_height(finally_block_stack_height);
Visit(stmt->finally_block());
ExitFinallyBlock(); // Return to the calling code.
}
@@ -1191,8 +1203,10 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Setup try handler (stack pointer registers).
TryFinally try_block(this, &finally_entry);
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ set_stack_height(try_block_stack_height);
Visit(stmt->try_block());
__ PopTryHandler();
+ set_stack_height(original_stack_height);
}
// Execute the finally block on the way out. Clobber the unpredictable
// value in the accumulator with one that's safe for GC. The finally
@@ -1222,6 +1236,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
expr->then_expression_position());
+ int start_stack_height = stack_height();
if (context()->IsTest()) {
const TestContext* for_test = TestContext::cast(context());
VisitForControl(expr->then_expression(),
@@ -1235,6 +1250,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
+ set_stack_height(start_stack_height);
if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
expr->else_expression_position());
@@ -1275,8 +1291,11 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
+ // Throw has no effect on the stack height or the current expression context.
+ // Usually the expression context is null, because throw is a statement.
VisitForStackValue(expr->exception());
__ CallRuntime(Runtime::kThrow, 1);
+ decrement_stack_height();
// Never returns here.
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 6b174f7427..9bd6e5e4dc 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -83,6 +83,7 @@ class FullCodeGenerator: public AstVisitor {
scope_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
+ stack_height_(0),
context_(NULL),
bailout_entries_(0),
stack_checks_(2), // There's always at least one.
@@ -519,6 +520,35 @@ class FullCodeGenerator: public AstVisitor {
loop_depth_--;
}
+#if defined(V8_TARGET_ARCH_IA32)
+ int stack_height() { return stack_height_; }
+ void set_stack_height(int depth) { stack_height_ = depth; }
+ void increment_stack_height() { stack_height_++; }
+ void increment_stack_height(int delta) { stack_height_ += delta; }
+ void decrement_stack_height() {
+ if (FLAG_verify_stack_height) {
+ ASSERT(stack_height_ > 0);
+ }
+ stack_height_--;
+ }
+ void decrement_stack_height(int delta) {
+ stack_height_-= delta;
+ if (FLAG_verify_stack_height) {
+ ASSERT(stack_height_ >= 0);
+ }
+ }
+ // Call this function only if FLAG_verify_stack_height is true.
+ void verify_stack_height(); // Generates a runtime check of esp - ebp.
+#else
+ int stack_height() { return 0; }
+ void set_stack_height(int depth) {}
+ void increment_stack_height() {}
+ void increment_stack_height(int delta) {}
+ void decrement_stack_height() {}
+ void decrement_stack_height(int delta) {}
+ void verify_stack_height() {}
+#endif // V8_TARGET_ARCH_IA32
+
MacroAssembler* masm() { return masm_; }
class ExpressionContext;
@@ -578,6 +608,10 @@ class FullCodeGenerator: public AstVisitor {
virtual ~ExpressionContext() {
codegen_->set_new_context(old_);
+ if (FLAG_verify_stack_height) {
+ ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
+ codegen()->verify_stack_height();
+ }
}
Isolate* isolate() const { return codegen_->isolate(); }
@@ -631,6 +665,7 @@ class FullCodeGenerator: public AstVisitor {
FullCodeGenerator* codegen() const { return codegen_; }
MacroAssembler* masm() const { return masm_; }
MacroAssembler* masm_;
+ int expected_stack_height_; // The expected stack height esp - ebp on exit.
private:
const ExpressionContext* old_;
@@ -640,7 +675,9 @@ class FullCodeGenerator: public AstVisitor {
class AccumulatorValueContext : public ExpressionContext {
public:
explicit AccumulatorValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
+ : ExpressionContext(codegen) {
+ expected_stack_height_ = codegen->stack_height();
+ }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -661,7 +698,9 @@ class FullCodeGenerator: public AstVisitor {
class StackValueContext : public ExpressionContext {
public:
explicit StackValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
+ : ExpressionContext(codegen) {
+ expected_stack_height_ = codegen->stack_height() + 1;
+ }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -690,7 +729,9 @@ class FullCodeGenerator: public AstVisitor {
condition_(condition),
true_label_(true_label),
false_label_(false_label),
- fall_through_(fall_through) { }
+ fall_through_(fall_through) {
+ expected_stack_height_ = codegen->stack_height();
+ }
static const TestContext* cast(const ExpressionContext* context) {
ASSERT(context->IsTest());
@@ -727,7 +768,10 @@ class FullCodeGenerator: public AstVisitor {
class EffectContext : public ExpressionContext {
public:
explicit EffectContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) { }
+ : ExpressionContext(codegen) {
+ expected_stack_height_ = codegen->stack_height();
+ }
+
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -751,6 +795,7 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
+ int stack_height_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_;
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index d73aaf0fca..c9984aa92f 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -422,43 +422,18 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) {
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
- bool create_if_needed) {
- Isolate* isolate = obj->GetIsolate();
- Object* holder = obj->BypassGlobalProxy();
- if (holder->IsUndefined()) return isolate->factory()->undefined_value();
- obj = Handle<JSObject>(JSObject::cast(holder), isolate);
-
- if (obj->HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = obj->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == isolate->heap()->hidden_symbol()) &&
- descriptors->IsProperty(0)) {
- ASSERT(descriptors->GetType(0) == FIELD);
- return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)),
- isolate);
- }
- }
+ JSObject::HiddenPropertiesFlag flag) {
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->GetHiddenProperties(flag),
+ Object);
+}
- // Only attempt to find the hidden properties in the local object and not
- // in the prototype chain. Note that HasLocalProperty() can cause a GC in
- // the general case in the presence of interceptors.
- if (!obj->HasHiddenPropertiesObject()) {
- // Hidden properties object not found. Allocate a new hidden properties
- // object if requested. Otherwise return the undefined value.
- if (create_if_needed) {
- Handle<Object> hidden_obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- CALL_HEAP_FUNCTION(isolate,
- obj->SetHiddenPropertiesObject(*hidden_obj), Object);
- } else {
- return isolate->factory()->undefined_value();
- }
- }
- return Handle<Object>(obj->GetHiddenPropertiesObject(), isolate);
+
+int GetIdentityHash(Handle<JSObject> obj) {
+ CALL_AND_RETRY(obj->GetIsolate(),
+ obj->GetIdentityHash(JSObject::ALLOW_CREATION),
+ return Smi::cast(__object__)->value(),
+ return 0);
}
@@ -908,6 +883,15 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
+Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
+ Handle<JSObject> key,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(table->GetIsolate(),
+ table->Put(*key, *value),
+ ObjectHashTable);
+}
+
+
bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
return shared->is_compiled() || CompileLazyShared(shared, flag);
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 13c6dd67f7..9bb3b1f1d5 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -264,9 +264,13 @@ Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
// Return the object's hidden properties object. If the object has no hidden
-// properties and create_if_needed is true, then a new hidden property object
-// will be allocated. Otherwise the Heap::undefined_value is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj, bool create_if_needed);
+// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
+// hidden property object will be allocated. Otherwise Heap::undefined_value
+// is returned.
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+ JSObject::HiddenPropertiesFlag flag);
+
+int GetIdentityHash(Handle<JSObject> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@@ -343,6 +347,10 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object);
+Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
+ Handle<JSObject> key,
+ Handle<Object> value);
+
// Does lazy compilation of the given function. Returns true on success and
// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 3f5554e2c2..b08655c7ed 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -142,6 +142,11 @@ MaybeObject* Heap::CopyFixedArray(FixedArray* src) {
}
+MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
+ return CopyFixedDoubleArrayWithMap(src, src->map());
+}
+
+
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
@@ -368,11 +373,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
int size_in_words = byte_size / kPointerSize;
- if ((dst < src) || (dst >= (src + size_in_words))) {
- ASSERT((dst >= (src + size_in_words)) ||
- ((OffsetFrom(reinterpret_cast<Address>(src)) -
- OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
-
+ if ((dst < src) || (dst >= (src + byte_size))) {
Object** src_slot = reinterpret_cast<Object**>(src);
Object** dst_slot = reinterpret_cast<Object**>(dst);
Object** end_slot = src_slot + size_in_words;
@@ -390,8 +391,7 @@ void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
- ASSERT((dst >= (src + byte_size)) ||
- ((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
+ ASSERT((dst < src) || (dst >= (src + byte_size)));
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 8dbda270fe..efdb5499d2 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -3388,17 +3388,22 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
object_size);
}
- FixedArray* elements = FixedArray::cast(source->elements());
+ FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
Object* elem;
- { MaybeObject* maybe_elem =
- (elements->map() == fixed_cow_array_map()) ?
- elements : CopyFixedArray(elements);
+ { MaybeObject* maybe_elem;
+ if (elements->map() == fixed_cow_array_map()) {
+ maybe_elem = FixedArray::cast(elements);
+ } else if (source->HasFastDoubleElements()) {
+ maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+ } else {
+ maybe_elem = CopyFixedArray(FixedArray::cast(elements));
+ }
if (!maybe_elem->ToObject(&elem)) return maybe_elem;
}
- JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
+ JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
}
// Update properties if necessary.
if (properties->length() > 0) {
@@ -3757,6 +3762,23 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
+MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
+ Map* map) {
+ int len = src->length();
+ Object* obj;
+ { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ HeapObject* dst = HeapObject::cast(obj);
+ dst->set_map(map);
+ CopyBlock(
+ dst->address() + FixedDoubleArray::kLengthOffset,
+ src->address() + FixedDoubleArray::kLengthOffset,
+ FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+ return obj;
+}
+
+
MaybeObject* Heap::AllocateFixedArray(int length) {
ASSERT(length >= 0);
if (length == 0) return empty_fixed_array();
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 6cd4f840b9..a7a24b0c9e 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -617,6 +617,16 @@ class Heap {
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline MaybeObject* CopyFixedDoubleArray(
+ FixedDoubleArray* src);
+
+ // Make a copy of src, set the map, and return the copy. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
+ FixedDoubleArray* src, Map* map);
+
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index d282f37810..5bea55a77d 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -862,19 +862,10 @@ void HInstanceOf::PrintDataTo(StringStream* stream) {
Range* HValue::InferRange() {
- if (representation().IsTagged()) {
- // Tagged values are always in int32 range when converted to integer,
- // but they can contain -0.
- Range* result = new Range();
- result->set_can_be_minus_zero(true);
- return result;
- } else if (representation().IsNone()) {
- return NULL;
- } else {
- // Untagged integer32 cannot be -0 and we don't compute ranges for
- // untagged doubles.
- return new Range();
- }
+ // Untagged integer32 cannot be -0, all other representations can.
+ Range* result = new Range();
+ result->set_can_be_minus_zero(!representation().IsInteger32());
+ return result;
}
@@ -1230,6 +1221,30 @@ Range* HSar::InferRange() {
}
+Range* HShr::InferRange() {
+ if (right()->IsConstant()) {
+ HConstant* c = HConstant::cast(right());
+ if (c->HasInteger32Value()) {
+ int shift_count = c->Integer32Value() & 0x1f;
+ if (left()->range()->CanBeNegative()) {
+ // Only compute bounds if the result always fits into an int32.
+ return (shift_count >= 1)
+ ? new Range(0, static_cast<uint32_t>(0xffffffff) >> shift_count)
+ : new Range();
+ } else {
+ // For positive inputs we can use the >> operator.
+ Range* result = (left()->range() != NULL)
+ ? left()->range()->Copy()
+ : new Range();
+ result->Sar(c->Integer32Value());
+ return result;
+ }
+ }
+ }
+ return HValue::InferRange();
+}
+
+
Range* HShl::InferRange() {
if (right()->IsConstant()) {
HConstant* c = HConstant::cast(right());
@@ -1798,11 +1813,6 @@ void HSimulate::Verify() {
}
-void HBoundsCheck::Verify() {
- HInstruction::Verify();
-}
-
-
void HCheckSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
@@ -1815,18 +1825,6 @@ void HCheckNonSmi::Verify() {
}
-void HCheckInstanceType::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
-void HCheckMap::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
-
void HCheckFunction::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 15186ff264..23c0ae664c 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -184,6 +184,7 @@ class LChunkBuilder;
V(InobjectFields) \
V(BackingStoreFields) \
V(ArrayElements) \
+ V(DoubleArrayElements) \
V(SpecializedArrayElements) \
V(GlobalVars) \
V(Maps) \
@@ -933,8 +934,12 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
class HBranch: public HUnaryControlInstruction {
public:
- HBranch(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
- : HUnaryControlInstruction(value, true_target, false_target) {
+ HBranch(HValue* value,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target,
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::no_types())
+ : HUnaryControlInstruction(value, true_target, false_target),
+ expected_input_types_(expected_input_types) {
ASSERT(true_target != NULL && false_target != NULL);
}
explicit HBranch(HValue* value)
@@ -945,7 +950,14 @@ class HBranch: public HUnaryControlInstruction {
return Representation::None();
}
+ ToBooleanStub::Types expected_input_types() const {
+ return expected_input_types_;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Branch)
+
+ private:
+ ToBooleanStub::Types expected_input_types_;
};
@@ -1663,12 +1675,14 @@ class HCallRuntime: public HCall<1> {
};
-class HJSArrayLength: public HUnaryOperation {
+class HJSArrayLength: public HTemplateInstruction<2> {
public:
- explicit HJSArrayLength(HValue* value) : HUnaryOperation(value) {
+ HJSArrayLength(HValue* value, HValue* typecheck) {
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
+ SetOperandAt(0, value);
+ SetOperandAt(1, typecheck);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnArrayLengths);
@@ -1679,6 +1693,8 @@ class HJSArrayLength: public HUnaryOperation {
return Representation::Tagged();
}
+ HValue* value() { return OperandAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected:
@@ -1894,10 +1910,14 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
};
-class HCheckMap: public HUnaryOperation {
+class HCheckMap: public HTemplateInstruction<2> {
public:
- HCheckMap(HValue* value, Handle<Map> map)
- : HUnaryOperation(value), map_(map) {
+ HCheckMap(HValue* value, Handle<Map> map, HValue* typecheck = NULL)
+ : map_(map) {
+ SetOperandAt(0, value);
+ // If callers don't depend on a typecheck, they can pass in NULL. In that
+ // case we use a copy of the |value| argument as a dummy value.
+ SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
@@ -1909,10 +1929,7 @@ class HCheckMap: public HUnaryOperation {
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
+ HValue* value() { return OperandAt(0); }
Handle<Map> map() const { return map_; }
DECLARE_CONCRETE_INSTRUCTION(CheckMap)
@@ -1980,10 +1997,6 @@ class HCheckInstanceType: public HUnaryOperation {
return Representation::Tagged();
}
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
virtual HValue* Canonicalize();
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
@@ -2458,10 +2471,6 @@ class HBoundsCheck: public HTemplateInstruction<2> {
return Representation::Integer32();
}
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
@@ -3063,6 +3072,7 @@ class HShr: public HBitwiseBinaryOperation {
HShr(HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right) { }
+ virtual Range* InferRange();
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Shr)
@@ -3527,7 +3537,7 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
SetOperandAt(0, elements);
SetOperandAt(1, key);
set_representation(Representation::Double());
- SetFlag(kDependsOnArrayElements);
+ SetFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
}
@@ -3745,7 +3755,7 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
- SetFlag(kChangesArrayElements);
+ SetFlag(kChangesDoubleArrayElements);
}
virtual Representation RequiredInputRepresentation(int index) const {
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 7e62ec4c42..be56c673b9 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -736,6 +736,8 @@ void HGraph::AssignDominators() {
HPhase phase("Assign dominators", this);
for (int i = 0; i < blocks_.length(); ++i) {
if (blocks_[i]->IsLoopHeader()) {
+ // Only the first predecessor of a loop header is from outside the loop.
+ // All others are back edges, and thus cannot dominate the loop header.
blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
} else {
for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
@@ -743,13 +745,15 @@ void HGraph::AssignDominators() {
}
}
}
+}
- // Propagate flag marking blocks containing unconditional deoptimize.
+// Mark all blocks that are dominated by an unconditional soft deoptimize to
+// prevent code motion across those blocks.
+void HGraph::PropagateDeoptimizingMark() {
+ HPhase phase("Propagate deoptimizing mark", this);
MarkAsDeoptimizingRecursively(entry_block());
}
-
-// Mark all blocks that are dominated by an unconditional deoptimize.
void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
HBasicBlock* dominated = block->dominated_blocks()->at(i);
@@ -836,6 +840,19 @@ void HGraph::EliminateUnreachablePhis() {
}
+bool HGraph::CheckPhis() {
+ int block_count = blocks_.length();
+ for (int i = 0; i < block_count; ++i) {
+ for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+ HPhi* phi = blocks_[i]->phis()->at(j);
+ // We don't support phi uses of arguments for now.
+ if (phi->CheckFlag(HValue::kIsArguments)) return false;
+ }
+ }
+ return true;
+}
+
+
bool HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
@@ -843,8 +860,6 @@ bool HGraph::CollectPhis() {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
- // We don't support phi uses of arguments for now.
- if (phi->CheckFlag(HValue::kIsArguments)) return false;
// Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -2158,7 +2173,9 @@ void TestContext::BuildBranch(HValue* value) {
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HBranch* test = new(zone()) HBranch(value, empty_true, empty_false);
+ unsigned test_id = condition()->test_id();
+ ToBooleanStub::Types expected(builder->oracle()->ToBooleanTypes(test_id));
+ HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
empty_true->Goto(if_true());
@@ -2293,10 +2310,15 @@ HGraph* HGraphBuilder::CreateGraph() {
graph()->OrderBlocks();
graph()->AssignDominators();
+ graph()->PropagateDeoptimizingMark();
graph()->EliminateRedundantPhis();
+ if (!graph()->CheckPhis()) {
+ Bailout("Unsupported phi use of arguments object");
+ return NULL;
+ }
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
if (!graph()->CollectPhis()) {
- Bailout("Unsupported phi-use");
+ Bailout("Unsupported phi use of uninitialized constant");
return NULL;
}
@@ -3274,8 +3296,8 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Load the elements array before the first store.
if (elements == NULL) {
- elements = new(zone()) HLoadElements(literal);
- AddInstruction(elements);
+ elements = new(zone()) HLoadElements(literal);
+ AddInstruction(elements);
}
HValue* key = AddInstruction(
@@ -3908,12 +3930,16 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
: BuildLoadKeyedGeneric(object, key);
}
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(new(zone()) HCheckMap(object, map));
- HInstruction* elements = new(zone()) HLoadElements(object);
+ HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+ HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+ bool fast_double_elements = map->has_fast_double_elements();
+ if (is_store && map->has_fast_elements()) {
+ AddInstruction(new(zone()) HCheckMap(
+ elements, isolate()->factory()->fixed_array_map()));
+ }
HInstruction* length = NULL;
HInstruction* checked_key = NULL;
if (map->has_external_array_elements()) {
- AddInstruction(elements);
length = AddInstruction(new(zone()) HExternalArrayLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
HLoadExternalArrayPointer* external_elements =
@@ -3922,25 +3948,13 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store);
}
- bool fast_double_elements = map->has_fast_double_elements();
ASSERT(map->has_fast_elements() || fast_double_elements);
if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- AddInstruction(elements);
- if (is_store && !fast_double_elements) {
- AddInstruction(new(zone()) HCheckMap(
- elements, isolate()->factory()->fixed_array_map()));
- }
+ length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else {
- AddInstruction(elements);
- if (is_store && !fast_double_elements) {
- AddInstruction(new(zone()) HCheckMap(
- elements, isolate()->factory()->fixed_array_map()));
- }
length = AddInstruction(new(zone()) HFixedArrayLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
}
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
if (is_store) {
if (fast_double_elements) {
return new(zone()) HStoreKeyedFastDoubleElement(elements,
@@ -3992,7 +4006,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
- HInstruction* elements = NULL;
+ HCompareConstantEqAndBranch* elements_kind_branch = NULL;
+ HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
@@ -4008,14 +4023,6 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
JSObject::LAST_ELEMENTS_KIND);
if (elements_kind == JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
&& todo_external_array) {
- elements = AddInstruction(new(zone()) HLoadElements(object));
- // We need to forcibly prevent some ElementsKind-dependent instructions
- // from being hoisted out of any loops they might occur in, because
- // the current loop-invariant-code-motion algorithm isn't clever enough
- // to deal with them properly.
- // There's some performance to be gained by developing a smarter
- // solution for this.
- elements->ClearFlag(HValue::kUseGVN);
HInstruction* length =
AddInstruction(new(zone()) HExternalArrayLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
@@ -4025,18 +4032,23 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
if (type_todo[elements_kind]) {
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareConstantEqAndBranch* compare =
- new(zone()) HCompareConstantEqAndBranch(elements_kind_instr,
- elements_kind,
- Token::EQ_STRICT);
- compare->SetSuccessorAt(0, if_true);
- compare->SetSuccessorAt(1, if_false);
- current_block()->Finish(compare);
+ elements_kind_branch = new(zone()) HCompareConstantEqAndBranch(
+ elements_kind_instr, elements_kind, Token::EQ_STRICT);
+ elements_kind_branch->SetSuccessorAt(0, if_true);
+ elements_kind_branch->SetSuccessorAt(1, if_false);
+ current_block()->Finish(elements_kind_branch);
set_current_block(if_true);
HInstruction* access;
if (elements_kind == JSObject::FAST_ELEMENTS ||
elements_kind == JSObject::FAST_DOUBLE_ELEMENTS) {
+ bool fast_double_elements =
+ elements_kind == JSObject::FAST_DOUBLE_ELEMENTS;
+ if (is_store && elements_kind == JSObject::FAST_ELEMENTS) {
+ AddInstruction(new(zone()) HCheckMap(
+ elements, isolate()->factory()->fixed_array_map(),
+ elements_kind_branch));
+ }
HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
HHasInstanceTypeAndBranch* typecheck =
@@ -4046,14 +4058,9 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
current_block()->Finish(typecheck);
set_current_block(if_jsarray);
- HInstruction* length = new(zone()) HJSArrayLength(object);
+ HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
AddInstruction(length);
- length->ClearFlag(HValue::kUseGVN);
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- elements = AddInstruction(new(zone()) HLoadElements(object));
- elements->ClearFlag(HValue::kUseGVN);
- bool fast_double_elements =
- elements_kind == JSObject::FAST_DOUBLE_ELEMENTS;
if (is_store) {
if (fast_double_elements) {
access = AddInstruction(
@@ -4061,8 +4068,6 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
checked_key,
val));
} else {
- AddInstruction(new(zone()) HCheckMap(
- elements, isolate()->factory()->fixed_array_map()));
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
@@ -4083,12 +4088,6 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
if_jsarray->Goto(join);
set_current_block(if_fastobject);
- elements = AddInstruction(new(zone()) HLoadElements(object));
- elements->ClearFlag(HValue::kUseGVN);
- if (is_store && !fast_double_elements) {
- AddInstruction(new(zone()) HCheckMap(
- elements, isolate()->factory()->fixed_array_map()));
- }
length = AddInstruction(new(zone()) HFixedArrayLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
if (is_store) {
@@ -4233,8 +4232,9 @@ void HGraphBuilder::VisitProperty(Property* expr) {
if (expr->IsArrayLength()) {
HValue* array = Pop();
AddInstruction(new(zone()) HCheckNonSmi(array));
- AddInstruction(HCheckInstanceType::NewIsJSArray(array));
- instr = new(zone()) HJSArrayLength(array);
+ HInstruction* mapcheck =
+ AddInstruction(HCheckInstanceType::NewIsJSArray(array));
+ instr = new(zone()) HJSArrayLength(array, mapcheck);
} else if (expr->IsStringLength()) {
HValue* string = Pop();
@@ -5526,9 +5526,11 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
+ unsigned test_id = expr->left()->test_id();
+ ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
HBranch* test = is_logical_and
- ? new(zone()) HBranch(Top(), eval_right, empty_block)
- : new(zone()) HBranch(Top(), empty_block, eval_right);
+ ? new(zone()) HBranch(Top(), eval_right, empty_block, expected)
+ : new(zone()) HBranch(Top(), empty_block, eval_right, expected);
current_block()->Finish(test);
set_current_block(eval_right);
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index c0d6323fc4..05c42dd7f0 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -238,10 +238,14 @@ class HGraph: public ZoneObject {
void OrderBlocks();
void AssignDominators();
void ReplaceCheckedValues();
- void MarkAsDeoptimizingRecursively(HBasicBlock* block);
+ void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
+ bool CheckPhis();
+
+ // Returns false if there are phi-uses of hole values comming
+ // from uninitialized consts.
bool CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
@@ -293,6 +297,7 @@ class HGraph: public ZoneObject {
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
Object* value);
+ void MarkAsDeoptimizingRecursively(HBasicBlock* block);
void InsertTypeConversions(HInstruction* instr);
void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
@@ -719,6 +724,8 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* second,
int join_id);
+ TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -747,7 +754,6 @@ class HGraphBuilder: public AstVisitor {
CompilationInfo* info() const {
return function_state()->compilation_info();
}
- TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
AstContext* call_context() const {
return function_state()->call_context();
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 71aacf9a3a..ed7e56c110 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -236,69 +236,153 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument on the stack and returns its result in tos_:
+// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
+ Label patch;
Factory* factory = masm->isolate()->factory();
+ const Register argument = eax;
const Register map = edx;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ if (!types_.IsEmpty()) {
+ __ mov(argument, Operand(esp, 1 * kPointerSize));
+ }
// undefined -> false
- __ cmp(eax, factory->undefined_value());
- __ j(equal, &false_result);
+ CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false, &patch);
// Boolean -> its value
- __ cmp(eax, factory->false_value());
- __ j(equal, &false_result);
- __ cmp(eax, factory->true_value());
- __ j(equal, &true_result);
-
- // Smis: 0 -> false, all other -> true
- __ test(eax, Operand(eax));
- __ j(zero, &false_result);
- __ JumpIfSmi(eax, &true_result);
+ CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false, &patch);
+ CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true, &patch);
// 'null' -> false.
- __ cmp(eax, factory->null_value());
- __ j(equal, &false_result, Label::kNear);
+ CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false, &patch);
- // Get the map of the heap object.
- __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
+ if (types_.Contains(SMI)) {
+ // Smis: 0 -> false, all other -> true
+ Label not_smi;
+ __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
+ // argument contains the correct return value already
+ if (!tos_.is(argument)) {
+ __ mov(tos_, argument);
+ }
+ __ ret(1 * kPointerSize);
+ __ bind(&not_smi);
+ } else if (types_.NeedsMap()) {
+ // If we need a map later and have a Smi -> patch.
+ __ JumpIfSmi(argument, &patch, Label::kNear);
+ }
- // Undetectable -> false.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result, Label::kNear);
+ if (types_.NeedsMap()) {
+ __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
- // JavaScript object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, &true_result, Label::kNear);
+ // Everything with a map could be undetectable, so check this now.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ // Undetectable -> false.
+ Label not_undetectable;
+ __ j(zero, &not_undetectable, Label::kNear);
+ __ Set(tos_, Immediate(0));
+ __ ret(1 * kPointerSize);
+ __ bind(&not_undetectable);
+ }
- // String value -> false iff empty.
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result, Label::kNear);
- __ jmp(&true_result, Label::kNear);
+ if (types_.Contains(SPEC_OBJECT)) {
+ // spec object -> true.
+ Label not_js_object;
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, &not_js_object, Label::kNear);
+ __ Set(tos_, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&not_js_object);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // We've seen a spec object for the first time -> patch.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &patch, Label::kNear);
+ }
- __ bind(&not_string);
- // HeapNumber -> false iff +0, -0, or NaN.
- __ cmp(map, factory->heap_number_map());
- __ j(not_equal, &true_result, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result, Label::kNear);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in tos_.
- __ bind(&true_result);
- __ mov(tos_, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ mov(tos_, 0);
- __ ret(1 * kPointerSize);
+ if (types_.Contains(STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
+ __ ret(1 * kPointerSize); // the string length is OK as the return value
+ __ bind(&not_string);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // We've seen a string for the first time -> patch
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(below, &patch, Label::kNear);
+ }
+
+ if (types_.Contains(HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number, false_result;
+ __ cmp(map, factory->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ fldz();
+ __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, &false_result, Label::kNear);
+ __ Set(tos_, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ Set(tos_, Immediate(0));
+ __ ret(1 * kPointerSize);
+ __ bind(&not_heap_number);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // We've seen a heap number for the first time -> patch
+ __ cmp(map, factory->heap_number_map());
+ __ j(equal, &patch, Label::kNear);
+ }
+
+ if (types_.Contains(INTERNAL_OBJECT)) {
+ // internal objects -> true
+ __ Set(tos_, Immediate(1));
+ __ ret(1 * kPointerSize);
+ }
+
+ if (!types_.IsAll()) {
+ __ bind(&patch);
+ GenerateTypeTransition(masm);
+ }
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+ Type type,
+ Heap::RootListIndex value,
+ bool result,
+ Label* patch) {
+ const Register argument = eax;
+ if (types_.Contains(type)) {
+ // If we see an expected oddball, return its ToBoolean value tos_.
+ Label different_value;
+ __ CompareRoot(argument, value);
+ __ j(not_equal, &different_value, Label::kNear);
+ __ Set(tos_, Immediate(result ? 1 : 0));
+ __ ret(1 * kPointerSize);
+ __ bind(&different_value);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // If we see an unexpected oddball and handle internal objects, we must
+ // patch because the code for internal objects doesn't handle it explictly.
+ __ CompareRoot(argument, value);
+ __ j(equal, patch);
+ }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(ecx); // Get return address, operand is now on top of stack.
+ __ push(Immediate(Smi::FromInt(tos_.code())));
+ __ push(Immediate(Smi::FromInt(types_.ToByte())));
+ __ push(ecx); // Push return address.
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+ 3,
+ 1);
}
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 572c36c881..3a657bd541 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -255,6 +255,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
ASSERT(desc.reloc_size == 0);
CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
}
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 615dbfed2b..57e66df9e3 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -67,7 +67,8 @@ void CPU::FlushICache(void* start, size_t size) {
// solution is to run valgrind with --smc-check=all, but this comes at a big
// performance cost. We can notify valgrind to invalidate its cache.
#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
#endif
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index f9f63a70ed..994c9ff61e 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -166,6 +166,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
}
+ set_stack_height(2 + scope()->num_stack_slots());
+ if (FLAG_verify_stack_height) {
+ verify_stack_height();
+ }
+
bool function_in_register = true;
// Possibly allocate a local context.
@@ -358,6 +363,15 @@ void FullCodeGenerator::EmitReturnSequence() {
}
+void FullCodeGenerator::verify_stack_height() {
+ ASSERT(FLAG_verify_stack_height);
+ __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
+ __ cmp(ebp, Operand(esp));
+ __ Assert(equal, "Full codegen stack height not as expected.");
+ __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
+}
+
+
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
}
@@ -372,6 +386,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
// Memory operands can be pushed directly.
__ push(slot_operand);
+ codegen()->increment_stack_height();
}
@@ -425,6 +440,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
} else {
__ push(Immediate(lit));
}
+ codegen()->increment_stack_height();
}
@@ -462,6 +478,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
ASSERT(count > 0);
__ Drop(count);
+ codegen()->decrement_stack_height(count);
}
@@ -471,6 +488,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
ASSERT(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
+ codegen()->decrement_stack_height(count);
}
@@ -479,6 +497,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
__ mov(Operand(esp, 0), reg);
+ codegen()->decrement_stack_height(count - 1);
}
@@ -490,6 +509,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(this);
+ codegen()->decrement_stack_height(count);
}
@@ -523,6 +543,7 @@ void FullCodeGenerator::StackValueContext::Plug(
__ bind(materialize_false);
__ push(Immediate(isolate()->factory()->false_value()));
__ bind(&done);
+ codegen()->increment_stack_height();
}
@@ -550,6 +571,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
? isolate()->factory()->true_value()
: isolate()->factory()->false_value();
__ push(Immediate(value));
+ codegen()->increment_stack_height();
}
@@ -572,7 +594,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
ToBooleanStub stub(result_register());
__ push(result_register());
- __ CallStub(&stub);
+ __ CallStub(&stub, condition->test_id());
__ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -722,14 +744,18 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
+ increment_stack_height(3);
if (mode == Variable::CONST) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
+ increment_stack_height();
} else if (function != NULL) {
VisitForStackValue(function);
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
+ increment_stack_height();
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ decrement_stack_height(4);
break;
}
}
@@ -748,8 +774,10 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
}
__ push(eax);
+ increment_stack_height();
VisitForAccumulatorValue(function);
__ pop(edx);
+ decrement_stack_height();
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
@@ -785,6 +813,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
+ int switch_clause_stack_height = stack_height();
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -849,6 +878,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(default_clause->body_target());
}
+ set_stack_height(switch_clause_stack_height);
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
@@ -890,6 +920,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
__ push(eax);
+ increment_stack_height();
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -973,6 +1004,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
+ increment_stack_height(4);
// Generate code for doing the condition check.
__ bind(&loop);
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
@@ -1028,6 +1060,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.break_target());
__ add(Operand(esp), Immediate(5 * kPointerSize));
+ decrement_stack_height(5);
// Exit and decrement the loop depth.
__ bind(&exit);
decrement_loop_depth();
@@ -1363,6 +1396,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) {
__ push(eax); // Save result on the stack
result_saved = true;
+ increment_stack_height();
}
switch (property->kind()) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1387,6 +1421,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
+ increment_stack_height();
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1395,16 +1430,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
__ Drop(3);
}
+ decrement_stack_height(3);
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(Operand(esp, 0)); // Duplicate receiver.
+ increment_stack_height();
VisitForStackValue(key);
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
+ increment_stack_height();
VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4);
+ decrement_stack_height(4);
break;
default: UNREACHABLE();
}
@@ -1467,6 +1506,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax);
result_saved = true;
+ increment_stack_height();
}
VisitForAccumulatorValue(subexpr);
@@ -1495,7 +1535,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// on the left-hand side.
if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
+ ASSERT(expr->target()->AsThrow() != NULL);
+ VisitInCurrentContext(expr->target()); // Throw does not plug the context
+ context()->Plug(eax);
return;
}
@@ -1520,6 +1562,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
__ push(result_register());
+ increment_stack_height();
} else {
VisitForStackValue(property->obj());
}
@@ -1530,6 +1573,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
+ increment_stack_height();
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1541,7 +1585,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// For compound assignments we need another deoptimization point after the
// variable/property load.
if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
+ AccumulatorValueContext result_context(this);
+ { AccumulatorValueContext left_operand_context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
@@ -1560,13 +1605,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Token::Value op = expr->binary_op();
__ push(eax); // Left operand goes on the stack.
+ increment_stack_height();
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
: NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
@@ -1630,6 +1675,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack. Right operand is in eax.
Label smi_case, done, stub_call;
__ pop(edx);
+ decrement_stack_height();
__ mov(ecx, eax);
__ or_(eax, Operand(edx));
JumpPatchSite patch_site(masm_);
@@ -1721,6 +1767,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
+ decrement_stack_height();
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
@@ -1733,7 +1780,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
+ ASSERT(expr->AsThrow() != NULL);
+ VisitInCurrentContext(expr); // Throw does not plug the context
+ context()->Plug(eax);
return;
}
@@ -1757,9 +1806,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case NAMED_PROPERTY: {
__ push(eax); // Preserve value.
+ increment_stack_height();
VisitForAccumulatorValue(prop->obj());
__ mov(edx, eax);
__ pop(eax); // Restore value.
+ decrement_stack_height();
__ mov(ecx, prop->key()->AsLiteral()->handle());
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
@@ -1769,6 +1820,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
+ increment_stack_height();
if (prop->is_synthetic()) {
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
@@ -1782,8 +1834,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
+ decrement_stack_height();
}
__ pop(eax); // Restore value.
+ decrement_stack_height();
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
@@ -1900,6 +1954,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(edx, Operand(esp, 0));
} else {
__ pop(edx);
+ decrement_stack_height();
}
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
@@ -1913,6 +1968,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
__ Drop(1);
+ decrement_stack_height();
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -1934,10 +1990,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
__ pop(ecx);
+ decrement_stack_height();
if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else {
__ pop(edx);
+ decrement_stack_height();
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
@@ -1953,6 +2011,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ push(edx);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
+ decrement_stack_height();
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -1972,6 +2031,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ pop(edx);
+ decrement_stack_height();
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@@ -1999,6 +2059,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
@@ -2013,6 +2074,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
__ pop(ecx);
__ push(eax);
__ push(ecx);
+ increment_stack_height();
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2032,6 +2094,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ decrement_stack_height(arg_count + 1);
context()->DropAndPlug(1, eax); // Drop the key still on the stack.
}
@@ -2053,6 +2116,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ decrement_stack_height(arg_count + 1);
context()->DropAndPlug(1, eax);
}
@@ -2100,7 +2165,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(fun);
// Reserved receiver slot.
__ push(Immediate(isolate()->factory()->undefined_value()));
-
+ increment_stack_height();
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
@@ -2144,10 +2209,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ decrement_stack_height(arg_count + 1); // Function is left on the stack.
context()->DropAndPlug(1, eax);
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Push global object as receiver for the call IC.
__ push(GlobalObjectOperand());
+ increment_stack_height();
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->AsSlot() != NULL &&
var->AsSlot()->type() == Slot::LOOKUP) {
@@ -2170,7 +2237,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ push(eax); // Function.
+ increment_stack_height();
__ push(edx); // Receiver.
+ increment_stack_height();
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@@ -2179,7 +2248,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Label call;
__ jmp(&call);
__ bind(&done);
- // Push function.
+ // Push function. Stack height already incremented in slow case above.
__ push(eax);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
@@ -2225,9 +2294,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(eax);
+ increment_stack_height();
// Push Global receiver.
__ mov(ecx, GlobalObjectOperand());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+ increment_stack_height();
EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
} else {
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2243,6 +2314,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Load global receiver object.
__ mov(ebx, GlobalObjectOperand());
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ increment_stack_height();
// Emit function call.
EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
@@ -2283,6 +2355,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Handle<Code> construct_builtin =
isolate()->builtins()->JSConstructCall();
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
@@ -2595,6 +2669,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ pop(ebx);
+ decrement_stack_height();
__ cmp(eax, Operand(ebx));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2709,6 +2784,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
+ decrement_stack_height(2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -2774,6 +2850,7 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
+ decrement_stack_height(3);
context()->Plug(eax);
}
@@ -2787,6 +2864,7 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(2));
VisitForStackValue(args->at(3));
__ CallStub(&stub);
+ decrement_stack_height(4);
context()->Plug(eax);
}
@@ -2821,6 +2899,7 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
+ decrement_stack_height(2);
context()->Plug(eax);
}
@@ -2831,6 +2910,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(ebx); // eax = value. ebx = object.
+ decrement_stack_height();
Label done;
// If the object is a smi, return the value.
@@ -2860,6 +2940,7 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
NumberToStringStub stub;
__ CallStub(&stub);
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -2894,6 +2975,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Register result = edx;
__ pop(object);
+ decrement_stack_height();
Label need_conversion;
Label index_out_of_range;
@@ -2942,6 +3024,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Register result = eax;
__ pop(object);
+ decrement_stack_height();
Label need_conversion;
Label index_out_of_range;
@@ -2986,6 +3069,7 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
StringAddStub stub(NO_STRING_ADD_FLAGS);
__ CallStub(&stub);
+ decrement_stack_height(2);
context()->Plug(eax);
}
@@ -2998,6 +3082,7 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
StringCompareStub stub;
__ CallStub(&stub);
+ decrement_stack_height(2);
context()->Plug(eax);
}
@@ -3009,6 +3094,7 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -3020,6 +3106,7 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -3031,6 +3118,7 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -3040,6 +3128,7 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -3059,6 +3148,7 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
__ InvokeFunction(edi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
@@ -3071,6 +3161,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
+ decrement_stack_height(3);
context()->Plug(eax);
}
@@ -3144,6 +3235,7 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
+ decrement_stack_height(3);
context()->Plug(eax);
}
@@ -3229,6 +3321,7 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ mov(eax, Immediate(isolate()->factory()->true_value()));
__ bind(&done);
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -3532,6 +3625,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ decrement_stack_height();
context()->Plug(eax);
}
@@ -3584,6 +3678,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Prepare for calling JS runtime function.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+ increment_stack_height();
}
// Push the arguments ("left-to-right").
@@ -3606,6 +3701,11 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
}
+ decrement_stack_height(arg_count);
+ if (expr->is_jsruntime()) {
+ decrement_stack_height();
+ }
+
context()->Plug(eax);
}
@@ -3627,6 +3727,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(prop->key());
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ decrement_stack_height(2);
context()->Plug(eax);
}
} else if (var != NULL) {
@@ -3696,6 +3797,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ CallRuntime(Runtime::kTypeof, 1);
+ decrement_stack_height();
context()->Plug(eax);
break;
}
@@ -3750,7 +3852,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
+ ASSERT(expr->expression()->AsThrow() != NULL);
+ VisitInCurrentContext(expr->expression());
+ // Visiting Throw does not plug the context.
+ context()->Plug(eax);
return;
}
@@ -3775,17 +3880,20 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ push(Immediate(Smi::FromInt(0)));
+ increment_stack_height();
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
VisitForAccumulatorValue(prop->obj());
__ push(eax);
+ increment_stack_height();
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
+ increment_stack_height();
EmitKeyedPropertyLoad(prop);
}
}
@@ -3816,6 +3924,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE:
__ push(eax);
+ increment_stack_height();
break;
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
@@ -3889,6 +3998,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
+ decrement_stack_height();
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
@@ -3906,6 +4016,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
+ decrement_stack_height();
+ decrement_stack_height();
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
@@ -4063,6 +4175,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4072,6 +4185,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
+ decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
// The stub returns 0 for true.
@@ -4116,6 +4230,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default:
UNREACHABLE();
}
+ decrement_stack_height();
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 7211ba49a7..c0f4e71caa 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -1393,44 +1393,135 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->value()->type().IsBoolean()) {
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
+ } else if (type.IsSmi()) {
+ __ test(reg, Operand(reg));
+ EmitBranch(true_block, false_block, not_equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, false_label);
- __ cmp(reg, factory()->true_value());
- __ j(equal, true_label);
- __ cmp(reg, factory()->false_value());
- __ j(equal, false_label);
- __ test(reg, Operand(reg));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
-
- // Test for double values. Zero is false.
- Label call_stub;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(not_equal, &call_stub, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, false_label);
- __ jmp(true_label);
-
- // The conversion stub doesn't cause garbage collections so it's
- // safe to not record a safepoint after the call.
- __ bind(&call_stub);
- ToBooleanStub stub(eax);
- __ pushad();
- __ push(reg);
- __ CallStub(&stub);
- __ test(eax, Operand(eax));
- __ popad();
- EmitBranch(true_block, false_block, not_zero);
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ cmp(reg, factory()->undefined_value());
+ __ j(equal, false_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen undefined for the first time -> deopt.
+ __ cmp(reg, factory()->undefined_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // true -> true.
+ __ cmp(reg, factory()->true_value());
+ __ j(equal, true_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a boolean for the first time -> deopt.
+ __ cmp(reg, factory()->true_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // false -> false.
+ __ cmp(reg, factory()->false_value());
+ __ j(equal, false_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a boolean for the first time -> deopt.
+ __ cmp(reg, factory()->false_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ cmp(reg, factory()->null_value());
+ __ j(equal, false_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen null for the first time -> deopt.
+ __ cmp(reg, factory()->null_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ test(reg, Operand(reg));
+ __ j(equal, false_label);
+ __ JumpIfSmi(reg, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ test(reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ Register map = no_reg;
+ if (expected.NeedsMap()) {
+ map = ToRegister(instr->TempAt(0));
+ ASSERT(!map.is(reg));
+ __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
+ // Everything with a map could be undetectable, so check this now.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ // Undetectable -> false.
+ __ j(not_zero, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, true_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a spec object for the first time -> deopt.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ DeoptimizeIf(above_equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ j(not_zero, true_label);
+ __ jmp(false_label);
+ __ bind(&not_string);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a string for the first time -> deopt
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ fldz();
+ __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, false_label);
+ __ jmp(true_label);
+ __ bind(&not_heap_number);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a heap number for the first time -> deopt.
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // internal objects -> true
+ __ jmp(true_label);
+ } else {
+ // We've seen something for the first time -> deopt.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
}
}
@@ -2232,7 +2323,6 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3097,7 +3187,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
void LCodeGen::DoStoreKeyedFastDoubleElement(
LStoreKeyedFastDoubleElement* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Label have_value;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 9951d2540b..07867c70f2 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1041,7 +1041,16 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
- return new LBranch(UseRegisterAtStart(v));
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ // We need a temporary register when we have to access the map *or* we have
+ // no type info yet, in which case we handle all cases (including the ones
+ // involving maps).
+ bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+ LInstruction* branch = new LBranch(UseRegister(v), temp);
+ // When we handle all cases, we never deopt, so we don't need to assign the
+ // environment then.
+ return expected.IsAll() ? branch : AssignEnvironment(branch);
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 6b60a6e40e..efa048dd24 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -876,10 +876,11 @@ class LConstantT: public LTemplateInstruction<1, 0, 0> {
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch: public LControlInstruction<1, 1> {
public:
- explicit LBranch(LOperand* value) {
+ explicit LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 3e037d79ac..acb670b700 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -263,6 +263,13 @@ void MacroAssembler::SafePush(const Immediate& x) {
}
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+ // see ROOT_ACCESSOR macro in factory.h
+ Handle<Object> value(&isolate()->heap()->roots_address()[index]);
+ cmp(with, value);
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index dac22731a9..d79df5ea55 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -209,6 +209,9 @@ class MacroAssembler: public Assembler {
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
+ // Compare a register against a known root, e.g. undefined, null, true, ...
+ void CompareRoot(Register with, Heap::RootListIndex index);
+
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 9a690d76bb..73f42a3d18 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -3981,10 +3981,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&smi_value);
// Value is a smi. convert to a double and store.
- __ SmiUntag(eax);
- __ push(eax);
+ // Preserve original value.
+ __ mov(edx, eax);
+ __ SmiUntag(edx);
+ __ push(edx);
__ fild_s(Operand(esp, 0));
- __ pop(eax);
+ __ pop(edx);
__ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
__ ret(0);
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index f70f75a7f6..0d0b93570b 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -88,7 +88,8 @@ void IC::TraceIC(const char* type,
// function and the original code.
JSFunction* function = JSFunction::cast(frame->function());
function->PrintName();
- int code_offset = address() - js_code->instruction_start();
+ int code_offset =
+ static_cast<int>(address() - js_code->instruction_start());
PrintF("+%d", code_offset);
} else {
PrintF("<unknown>");
@@ -309,6 +310,7 @@ void IC::Clear(Address address) {
case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
+ case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
// make any performance difference.
return;
@@ -842,14 +844,6 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
}
-#ifdef DEBUG
-#define TRACE_IC_NAMED(msg, name) \
- if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
-#else
-#define TRACE_IC_NAMED(msg, name)
-#endif
-
-
MaybeObject* LoadIC::Load(State state,
Handle<Object> object,
Handle<String> name) {
@@ -2506,6 +2500,31 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
}
+RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) {
+ ASSERT(args.length() == 3);
+
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Register tos = Register::from_code(args.smi_at(1));
+ ToBooleanStub::Types old_types(args.smi_at(2));
+
+ ToBooleanStub::Types new_types(old_types);
+ bool to_boolean_value = new_types.Record(object);
+ old_types.TraceTransition(new_types);
+
+ ToBooleanStub stub(tos, new_types);
+ Handle<Code> code = stub.GetCode();
+ ToBooleanIC ic(isolate);
+ ic.patch(*code);
+ return Smi::FromInt(to_boolean_value ? 1 : 0);
+}
+
+
+void ToBooleanIC::patch(Code* code) {
+ set_target(code);
+}
+
+
static const Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 11c2e3af45..2236ba37b6 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -59,7 +59,8 @@ namespace internal {
ICU(StoreInterceptorProperty) \
ICU(UnaryOp_Patch) \
ICU(BinaryOp_Patch) \
- ICU(CompareIC_Miss)
+ ICU(CompareIC_Miss) \
+ ICU(ToBoolean_Patch)
//
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
// and KeyedStoreIC.
@@ -720,6 +721,15 @@ class CompareIC: public IC {
Token::Value op_;
};
+
+class ToBooleanIC: public IC {
+ public:
+ explicit ToBooleanIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+
+ void patch(Code* code);
+};
+
+
// Helper for BinaryOpIC and CompareIC.
void PatchInlinedSmiCode(Address address);
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 3ef5afbb89..f93b04d38f 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -166,7 +166,8 @@ class JsonParser BASE_EMBEDDED {
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
isolate_ = source->map()->isolate();
- source_ = Handle<String>(source->TryFlattenGetString());
+ FlattenString(source);
+ source_ = source;
source_length_ = source_->length();
// Optimized fast case where we only have ASCII characters.
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 2d1ce23dc0..6112e34fcb 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -86,8 +86,6 @@ void Log::Initialize() {
if (open_log_file) {
if (strcmp(FLAG_logfile, "-") == 0) {
OpenStdout();
- } else if (strcmp(FLAG_logfile, "*") == 0) {
- // Does nothing for now. Will be removed.
} else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
OpenTemporaryFile();
} else {
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 04fd22ef5c..dedf7e90c5 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1400,6 +1400,7 @@ void Logger::LogCodeObject(Object* object) {
case Code::UNARY_OP_IC: // fall through
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
+ case Code::TO_BOOLEAN_IC: // fall through
case Code::STUB:
description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index fc08cb129f..5ba7ac3afa 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -44,7 +44,7 @@ const kApiSerialNumberOffset = 2;
const kApiConstructorOffset = 2;
const kApiPrototypeTemplateOffset = 5;
const kApiParentTemplateOffset = 6;
-const kApiPrototypeAttributesOffset = 15;
+const kApiFlagOffset = 14;
const NO_HINT = 0;
const NUMBER_HINT = 1;
@@ -65,6 +65,7 @@ const msPerMonth = 2592000000;
# For apinatives.js
const kUninitialized = -1;
+const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h
# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
const kInvalidDate = 'Invalid Date';
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index aaa98ed4ab..c1618e5cc3 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -195,6 +195,7 @@ function FormatMessage(message) {
non_extensible_proto: ["%0", " is not extensible"],
handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+ handler_trap_must_be_callable: ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
handler_returned_false: ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
proxy_prop_not_configurable: ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 3f5ea7b914..4943a03a1a 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -2761,7 +2761,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kObject_symbolRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index f1ffe9b634..84ff991ce2 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -4459,11 +4459,18 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
- __ SmiUntag(value_reg, value_reg);
+
+ Register untagged_value = receiver_reg;
+ __ SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(
- masm, value_reg, destination,
- f0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
- scratch4, f2); // These are: scratch2, single_scratch.
+ masm,
+ untagged_value,
+ destination,
+ f0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ f2);
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatures::Scope scope(FPU);
__ sdc1(f0, MemOperand(scratch, 0));
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 5726b37393..6870ce06dd 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -158,6 +158,12 @@ bool Object::IsString() {
}
+bool Object::IsSpecObject() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
+}
+
+
bool Object::IsSymbol() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -1333,6 +1339,8 @@ void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
ASSERT(map()->has_fast_elements() ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
+ ASSERT(map()->has_fast_double_elements() ==
+ value->IsFixedDoubleArray());
ASSERT(value->HasValidElements());
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
@@ -1961,6 +1969,17 @@ void DescriptorArray::Swap(int first, int second) {
template<typename Shape, typename Key>
+int HashTable<Shape, Key>::ComputeCapacity(int at_least_space_for) {
+ const int kMinCapacity = 32;
+ int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+ if (capacity < kMinCapacity) {
+ capacity = kMinCapacity; // Guarantee min capacity.
+ }
+ return capacity;
+}
+
+
+template<typename Shape, typename Key>
int HashTable<Shape, Key>::FindEntry(Key key) {
return FindEntry(GetIsolate(), key);
}
@@ -2757,7 +2776,8 @@ int Code::major_key() {
ASSERT(kind() == STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC);
+ kind() == COMPARE_IC ||
+ kind() == TO_BOOLEAN_IC);
return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
}
@@ -2766,7 +2786,8 @@ void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC);
+ kind() == COMPARE_IC ||
+ kind() == TO_BOOLEAN_IC);
ASSERT(0 <= major && major < 256);
WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
}
@@ -2908,6 +2929,17 @@ void Code::set_compare_state(byte value) {
}
+byte Code::to_boolean_state() {
+ ASSERT(is_to_boolean_ic_stub());
+ return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
+}
+
+
+void Code::set_to_boolean_state(byte value) {
+ ASSERT(is_to_boolean_ic_stub());
+ WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
+}
+
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3249,8 +3281,6 @@ ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
-ACCESSORS(FunctionTemplateInfo, prototype_attributes, Smi,
- kPrototypeAttributesOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -3305,6 +3335,8 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
+ kReadOnlyPrototypeBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
@@ -4222,6 +4254,11 @@ MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
}
+bool JSObject::HasHiddenProperties() {
+ return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
+}
+
+
bool JSObject::HasElement(uint32_t index) {
return HasElementWithReceiver(this, index);
}
@@ -4337,6 +4374,31 @@ MaybeObject* StringDictionaryShape::AsObject(String* key) {
}
+bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
+ return key == JSObject::cast(other);
+}
+
+
+uint32_t ObjectHashTableShape::Hash(JSObject* key) {
+ MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+ ASSERT(!maybe_hash->IsFailure());
+ return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+}
+
+
+uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
+ MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
+ JSObject::OMIT_CREATION);
+ ASSERT(!maybe_hash->IsFailure());
+ return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+}
+
+
+MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
+ return key;
+}
+
+
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 158789e0da..5cb5269d7a 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -282,6 +282,19 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ // Print in array notation for non-sparse arrays.
+ FixedDoubleArray* p = FixedDoubleArray::cast(elements());
+ for (int i = 0; i < p->length(); i++) {
+ if (p->is_the_hole(i)) {
+ PrintF(out, " %d: <the hole>", i);
+ } else {
+ PrintF(out, " %d: %g", i, p->get(i));
+ }
+ PrintF(out, "\n");
+ }
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* p = ExternalPixelArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
@@ -360,9 +373,6 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
- default:
- UNREACHABLE();
- break;
}
}
@@ -550,6 +560,21 @@ void String::StringPrint(FILE* out) {
}
+// This method is only meant to be called from gdb for debugging purposes.
+// Since the string can also be in two-byte encoding, non-ascii characters
+// will be ignored in the output.
+char* String::ToAsciiArray() {
+ // Static so that subsequent calls frees previously allocated space.
+ // This also means that previous results will be overwritten.
+ static char* buffer = NULL;
+ if (buffer != NULL) free(buffer);
+ buffer = new char[length()+1];
+ WriteToFlat(this, buffer, 0, length());
+ buffer[length()] = 0;
+ return buffer;
+}
+
+
void JSProxy::JSProxyPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSProxy");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 341f929750..e1318bcc3f 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2331,7 +2331,7 @@ void JSProxy::Fix() {
Handle<JSProxy> self(this);
isolate->factory()->BecomeJSObject(self);
- ASSERT(IsJSObject());
+ ASSERT(self->IsJSObject());
// TODO(rossberg): recognize function proxies.
}
@@ -2471,6 +2471,9 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
// callback setter removed. The two lines looking up the LookupResult
// result are also added. If one of the functions is changed, the other
// should be.
+// Note that this method cannot be used to set the prototype of a function
+// because ConvertDescriptorToField() which is called in "case CALLBACKS:"
+// doesn't handle function prototypes correctly.
MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
String* name,
Object* value,
@@ -2896,9 +2899,12 @@ MaybeObject* JSObject::NormalizeElements() {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: array->length();
+ int old_capacity = 0;
+ int used_elements = 0;
+ GetElementsCapacityAndUsage(&old_capacity, &used_elements);
NumberDictionary* dictionary = NULL;
{ Object* object;
- MaybeObject* maybe = NumberDictionary::Allocate(length);
+ MaybeObject* maybe = NumberDictionary::Allocate(used_elements);
if (!maybe->ToObject(&object)) return maybe;
dictionary = NumberDictionary::cast(object);
}
@@ -2961,6 +2967,91 @@ MaybeObject* JSObject::NormalizeElements() {
}
+MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
+ Object* holder = BypassGlobalProxy();
+ if (holder->IsUndefined()) return heap->undefined_value();
+ JSObject* obj = JSObject::cast(holder);
+ if (obj->HasFastProperties()) {
+ // If the object has fast properties, check whether the first slot
+ // in the descriptor array matches the hidden symbol. Since the
+ // hidden symbols hash code is zero (and no other string has hash
+ // code zero) it will always occupy the first entry if present.
+ DescriptorArray* descriptors = obj->map()->instance_descriptors();
+ if ((descriptors->number_of_descriptors() > 0) &&
+ (descriptors->GetKey(0) == heap->hidden_symbol()) &&
+ descriptors->IsProperty(0)) {
+ ASSERT(descriptors->GetType(0) == FIELD);
+ return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
+ }
+ }
+
+ // Only attempt to find the hidden properties in the local object and not
+ // in the prototype chain.
+ if (!obj->HasHiddenPropertiesObject()) {
+ // Hidden properties object not found. Allocate a new hidden properties
+ // object if requested. Otherwise return the undefined value.
+ if (flag == ALLOW_CREATION) {
+ Object* hidden_obj;
+ { MaybeObject* maybe_obj = heap->AllocateJSObject(
+ isolate->context()->global_context()->object_function());
+ if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
+ }
+ return obj->SetHiddenPropertiesObject(hidden_obj);
+ } else {
+ return heap->undefined_value();
+ }
+ }
+ return obj->GetHiddenPropertiesObject();
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
+ Isolate* isolate = GetIsolate();
+ Object* hidden_props_obj;
+ { MaybeObject* maybe_obj = GetHiddenProperties(flag);
+ if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
+ }
+ if (!hidden_props_obj->IsJSObject()) {
+ // We failed to create hidden properties. That's a detached
+ // global proxy.
+ ASSERT(hidden_props_obj->IsUndefined());
+ return Smi::FromInt(0);
+ }
+ JSObject* hidden_props = JSObject::cast(hidden_props_obj);
+ String* hash_symbol = isolate->heap()->identity_hash_symbol();
+ {
+ // Note that HasLocalProperty() can cause a GC in the general case in the
+ // presence of interceptors.
+ AssertNoAllocation no_alloc;
+ if (hidden_props->HasLocalProperty(hash_symbol)) {
+ MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
+ return Smi::cast(hash->ToObjectChecked());
+ }
+ }
+
+ int hash_value;
+ int attempts = 0;
+ do {
+ // Generate a random 32-bit hash value but limit range to fit
+ // within a smi.
+ hash_value = V8::Random(isolate) & Smi::kMaxValue;
+ attempts++;
+ } while (hash_value == 0 && attempts < 30);
+ hash_value = hash_value != 0 ? hash_value : 1; // never return 0
+
+ Smi* hash = Smi::FromInt(hash_value);
+ { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
+ hash_symbol,
+ hash,
+ static_cast<PropertyAttributes>(None));
+ if (result->IsFailure()) return result;
+ }
+ return hash;
+}
+
+
MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
DeleteMode mode) {
// Check local property, ignore interceptor.
@@ -3656,6 +3747,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
if (is_element) {
switch (GetElementsKind()) {
case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3666,7 +3758,6 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
return heap->undefined_value();
@@ -3905,6 +3996,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3915,7 +4007,6 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
return isolate->heap()->undefined_value();
@@ -4688,6 +4779,9 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
switch (array->GetElementsKind()) {
case JSObject::FAST_ELEMENTS:
return UnionOfKeys(FixedArray::cast(array->elements()));
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ return UnionOfDoubleKeys(FixedDoubleArray::cast(array->elements()));
+ break;
case JSObject::DICTIONARY_ELEMENTS: {
NumberDictionary* dict = array->element_dictionary();
int size = dict->NumberOfElements();
@@ -4722,7 +4816,6 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
case JSObject::EXTERNAL_PIXEL_ELEMENTS:
- case JSObject::FAST_DOUBLE_ELEMENTS:
break;
}
UNREACHABLE();
@@ -4784,6 +4877,69 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
}
+MaybeObject* FixedArray::UnionOfDoubleKeys(FixedDoubleArray* other) {
+ int len0 = length();
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ for (int i = 0; i < len0; i++) {
+ ASSERT(get(i)->IsString() || get(i)->IsNumber());
+ }
+ }
+#endif
+ int len1 = other->length();
+ // Optimize if 'other' is empty.
+ // We cannot optimize if 'this' is empty, as other may have holes
+ // or non keys.
+ if (len1 == 0) return this;
+
+ // Compute how many elements are not in this.
+ int extra = 0;
+ Heap* heap = GetHeap();
+ Object* obj;
+ for (int y = 0; y < len1; y++) {
+ if (!other->is_the_hole(y)) {
+ MaybeObject* maybe_obj = heap->NumberFromDouble(other->get(y));
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ if (!HasKey(this, obj)) extra++;
+ }
+ }
+
+ if (extra == 0) return this;
+
+ // Allocate the result
+ { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(len0 + extra);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ // Fill in the content
+ FixedArray* result = FixedArray::cast(obj);
+ {
+ // Limit the scope of the AssertNoAllocation
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < len0; i++) {
+ Object* e = get(i);
+ ASSERT(e->IsString() || e->IsNumber());
+ result->set(i, e, mode);
+ }
+ }
+
+ // Fill in the extra keys.
+ int index = 0;
+ for (int y = 0; y < len1; y++) {
+ if (!other->is_the_hole(y)) {
+ MaybeObject* maybe_obj = heap->NumberFromDouble(other->get(y));
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ if (!HasKey(this, obj)) {
+ result->set(len0 + index, obj);
+ index++;
+ }
+ }
+ }
+ ASSERT(extra == index);
+ return result;
+}
+
+
MaybeObject* FixedArray::CopySize(int new_length) {
Heap* heap = GetHeap();
if (new_length == 0) return heap->empty_fixed_array();
@@ -7207,6 +7363,7 @@ const char* Code::Kind2String(Kind kind) {
case UNARY_OP_IC: return "UNARY_OP_IC";
case BINARY_OP_IC: return "BINARY_OP_IC";
case COMPARE_IC: return "COMPARE_IC";
+ case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
}
UNREACHABLE();
return NULL;
@@ -7539,9 +7696,10 @@ MaybeObject* JSObject::SetSlowElements(Object* len) {
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS:
// Make sure we never try to shrink dense arrays into sparse arrays.
- ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
- new_length);
+ ASSERT(static_cast<uint32_t>(
+ FixedArrayBase::cast(elements())->length()) <= new_length);
MaybeObject* result = NormalizeElements();
if (result->IsFailure()) return result;
@@ -7570,7 +7728,6 @@ MaybeObject* JSObject::SetSlowElements(Object* len) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
UNREACHABLE();
break;
}
@@ -7685,8 +7842,7 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
}
int min = NewElementsCapacity(old_capacity);
int new_capacity = value > min ? value : min;
- if (new_capacity <= kMaxFastElementsLength ||
- !ShouldConvertToSlowElements(new_capacity)) {
+ if (!ShouldConvertToSlowElements(new_capacity)) {
MaybeObject* result;
if (GetElementsKind() == FAST_ELEMENTS) {
result = SetFastElementsCapacityAndLength(new_capacity, value);
@@ -7912,6 +8068,17 @@ bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
+ return true;
+ }
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
@@ -7926,8 +8093,7 @@ bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
@@ -8038,6 +8204,17 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
+ return FAST_ELEMENT;
+ }
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
@@ -8055,9 +8232,6 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
break;
}
- case FAST_DOUBLE_ELEMENTS:
- UNREACHABLE();
- break;
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index) !=
NumberDictionary::kNotFound) {
@@ -8424,8 +8598,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if ((index - length) < kMaxGap) {
// Try allocating extra space.
int new_capacity = NewElementsCapacity(index + 1);
- if (new_capacity <= kMaxFastElementsLength ||
- !ShouldConvertToSlowElements(new_capacity)) {
+ if (!ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
Object* new_elements;
MaybeObject* maybe =
@@ -8533,7 +8706,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
} else {
new_length = dictionary->max_number_key() + 1;
}
- MaybeObject* result = ShouldConvertToFastDoubleElements()
+ MaybeObject* result = CanConvertToFastDoubleElements()
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
: SetFastElementsCapacityAndLength(new_length, new_length);
if (result->IsFailure()) return result;
@@ -8607,8 +8780,7 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
if ((index - elms_length) < kMaxGap) {
// Try allocating extra space.
int new_capacity = NewElementsCapacity(index+1);
- if (new_capacity <= kMaxFastElementsLength ||
- !ShouldConvertToSlowElements(new_capacity)) {
+ if (!ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
Object* obj;
{ MaybeObject* maybe_obj =
@@ -9076,7 +9248,15 @@ MaybeObject* JSObject::GetExternalElement(uint32_t index) {
bool JSObject::HasDenseElements() {
int capacity = 0;
- int number_of_elements = 0;
+ int used = 0;
+ GetElementsCapacityAndUsage(&capacity, &used);
+ return (capacity == 0) || (used > (capacity / 2));
+}
+
+
+void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
+ *capacity = 0;
+ *used = 0;
FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
FixedArray* backing_store = NULL;
@@ -9087,34 +9267,33 @@ bool JSObject::HasDenseElements() {
backing_store = FixedArray::cast(backing_store_base);
if (backing_store->IsDictionary()) {
NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
- capacity = dictionary->Capacity();
- number_of_elements = dictionary->NumberOfElements();
+ *capacity = dictionary->Capacity();
+ *used = dictionary->NumberOfElements();
break;
}
// Fall through.
case FAST_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
- capacity = backing_store->length();
- for (int i = 0; i < capacity; ++i) {
- if (!backing_store->get(i)->IsTheHole()) ++number_of_elements;
+ *capacity = backing_store->length();
+ for (int i = 0; i < *capacity; ++i) {
+ if (!backing_store->get(i)->IsTheHole()) ++(*used);
}
break;
case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary =
NumberDictionary::cast(FixedArray::cast(elements()));
- capacity = dictionary->Capacity();
- number_of_elements = dictionary->NumberOfElements();
+ *capacity = dictionary->Capacity();
+ *used = dictionary->NumberOfElements();
break;
}
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
- capacity = elms->length();
- for (int i = 0; i < capacity; i++) {
- if (!elms->is_the_hole(i)) number_of_elements++;
+ *capacity = elms->length();
+ for (int i = 0; i < *capacity; i++) {
+ if (!elms->is_the_hole(i)) ++(*used);
}
break;
}
- case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -9122,30 +9301,34 @@ bool JSObject::HasDenseElements() {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- return true;
- }
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ // External arrays are considered 100% used.
+ ExternalArray* external_array = ExternalArray::cast(elements());
+ *capacity = external_array->length();
+ *used = external_array->length();
+ break;
}
- return (capacity == 0) || (number_of_elements > (capacity / 2));
}
bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
- // Keep the array in fast case if the current backing storage is
- // almost filled and if the new capacity is no more than twice the
- // old capacity.
- int elements_length = 0;
- if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
- FixedArray* backing_store = FixedArray::cast(elements());
- elements_length = FixedArray::cast(backing_store->get(1))->length();
- } else if (HasFastElements()) {
- elements_length = FixedArray::cast(elements())->length();
- } else if (HasFastDoubleElements()) {
- elements_length = FixedDoubleArray::cast(elements())->length();
- } else {
- UNREACHABLE();
+ STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
+ kMaxUncheckedFastElementsLength);
+ if (new_capacity <= kMaxUncheckedOldFastElementsLength ||
+ (new_capacity <= kMaxUncheckedFastElementsLength &&
+ GetHeap()->InNewSpace(this))) {
+ return false;
}
- return !HasDenseElements() || ((new_capacity / 2) > elements_length);
+ // If the fast-case backing storage takes up roughly three times as
+ // much space (in machine words) as a dictionary backing storage
+ // would, the object should have slow elements.
+ int old_capacity = 0;
+ int used_elements = 0;
+ GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+ int dictionary_size = NumberDictionary::ComputeCapacity(used_elements) *
+ NumberDictionary::kEntrySize;
+ return 3 * dictionary_size <= new_capacity;
}
@@ -9168,20 +9351,21 @@ bool JSObject::ShouldConvertToFastElements() {
// dictionary, we cannot go back to fast case.
if (dictionary->requires_slow_elements()) return false;
// If the dictionary backing storage takes up roughly half as much
- // space as a fast-case backing storage would the array should have
- // fast elements.
- uint32_t length = 0;
+ // space (in machine words) as a fast-case backing storage would,
+ // the object should have fast elements.
+ uint32_t array_size = 0;
if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_size));
} else {
- length = dictionary->max_number_key();
+ array_size = dictionary->max_number_key();
}
- return static_cast<uint32_t>(dictionary->Capacity()) >=
- (length / (2 * NumberDictionary::kEntrySize));
+ uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
+ NumberDictionary::kEntrySize;
+ return 2 * dictionary_size >= array_size;
}
-bool JSObject::ShouldConvertToFastDoubleElements() {
+bool JSObject::CanConvertToFastDoubleElements() {
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
NumberDictionary* dictionary = NumberDictionary::cast(elements());
@@ -9371,6 +9555,15 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
+ case FAST_DOUBLE_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+ return (index < length) &&
+ !FixedDoubleArray::cast(elements())->is_the_hole(index);
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
return index < static_cast<uint32_t>(pixels->length());
@@ -9386,9 +9579,6 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
ExternalArray* array = ExternalArray::cast(elements());
return index < static_cast<uint32_t>(array->length());
}
- case FAST_DOUBLE_ELEMENTS:
- UNREACHABLE();
- break;
case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound;
@@ -10106,11 +10296,8 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
PretenureFlag pretenure) {
- const int kMinCapacity = 32;
- int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
- if (capacity < kMinCapacity) {
- capacity = kMinCapacity; // Guarantee min capacity.
- } else if (capacity > HashTable::kMaxCapacity) {
+ int capacity = ComputeCapacity(at_least_space_for);
+ if (capacity > HashTable::kMaxCapacity) {
return Failure::OutOfMemoryException();
}
@@ -10278,6 +10465,8 @@ template class HashTable<CompilationCacheShape, HashTableKey*>;
template class HashTable<MapCacheShape, HashTableKey*>;
+template class HashTable<ObjectHashTableShape, JSObject*>;
+
template class Dictionary<StringDictionaryShape, String*>;
template class Dictionary<NumberDictionaryShape, uint32_t>;
@@ -10490,19 +10679,19 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
set_map(new_map);
set_elements(fast_elements);
- } else {
+ } else if (!HasFastDoubleElements()) {
Object* obj;
{ MaybeObject* maybe_obj = EnsureWritableFastElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
- ASSERT(HasFastElements());
+ ASSERT(HasFastElements() || HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
- FixedArray* elements = FixedArray::cast(this->elements());
- uint32_t elements_length = static_cast<uint32_t>(elements->length());
+ FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
+ uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
if (limit > elements_length) {
limit = elements_length ;
}
@@ -10521,47 +10710,78 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
result_double = HeapNumber::cast(new_double);
}
- AssertNoAllocation no_alloc;
-
- // Split elements into defined, undefined and the_hole, in that order.
- // Only count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
- unsigned int undefs = limit;
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < undefs; i++) {
- Object* current = elements->get(i);
- if (current->IsTheHole()) {
- holes--;
- undefs--;
- } else if (current->IsUndefined()) {
- undefs--;
- } else {
- continue;
+ uint32_t result = 0;
+ if (elements_base->map() == heap->fixed_double_array_map()) {
+ FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
+ // Split elements into defined and the_hole, in that order.
+ unsigned int holes = limit;
+ // Assume most arrays contain no holes and undefined values, so minimize the
+ // number of stores of non-undefined, non-the-hole values.
+ for (unsigned int i = 0; i < holes; i++) {
+ if (elements->is_the_hole(i)) {
+ holes--;
+ } else {
+ continue;
+ }
+ // Position i needs to be filled.
+ while (holes > i) {
+ if (elements->is_the_hole(holes)) {
+ holes--;
+ } else {
+ elements->set(i, elements->get(holes));
+ break;
+ }
+ }
}
- // Position i needs to be filled.
- while (undefs > i) {
- current = elements->get(undefs);
+ result = holes;
+ while (holes < limit) {
+ elements->set_the_hole(holes);
+ holes++;
+ }
+ } else {
+ FixedArray* elements = FixedArray::cast(elements_base);
+ AssertNoAllocation no_alloc;
+
+ // Split elements into defined, undefined and the_hole, in that order. Only
+ // count locations for undefined and the hole, and fill them afterwards.
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
+ unsigned int undefs = limit;
+ unsigned int holes = limit;
+ // Assume most arrays contain no holes and undefined values, so minimize the
+ // number of stores of non-undefined, non-the-hole values.
+ for (unsigned int i = 0; i < undefs; i++) {
+ Object* current = elements->get(i);
if (current->IsTheHole()) {
holes--;
undefs--;
} else if (current->IsUndefined()) {
undefs--;
} else {
- elements->set(i, current, write_barrier);
- break;
+ continue;
+ }
+ // Position i needs to be filled.
+ while (undefs > i) {
+ current = elements->get(undefs);
+ if (current->IsTheHole()) {
+ holes--;
+ undefs--;
+ } else if (current->IsUndefined()) {
+ undefs--;
+ } else {
+ elements->set(i, current, write_barrier);
+ break;
+ }
}
}
- }
- uint32_t result = undefs;
- while (undefs < holes) {
- elements->set_undefined(undefs);
- undefs++;
- }
- while (holes < limit) {
- elements->set_the_hole(holes);
- holes++;
+ result = undefs;
+ while (undefs < holes) {
+ elements->set_undefined(undefs);
+ undefs++;
+ }
+ while (holes < limit) {
+ elements->set_the_hole(holes);
+ holes++;
+ }
}
if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
@@ -11589,6 +11809,64 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
+Object* ObjectHashTable::Lookup(JSObject* key) {
+ // If the object does not have an identity hash, it was never used as a key.
+ MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+ if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->undefined_value();
+ return get(EntryToIndex(entry) + 1);
+}
+
+
+MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
+ // Make sure the key object has an identity hash code.
+ int hash;
+ { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
+ if (maybe_hash->IsFailure()) return maybe_hash;
+ hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+ }
+ int entry = FindEntry(key);
+
+ // Check whether to perform removal operation.
+ if (value->IsUndefined()) {
+ if (entry == kNotFound) return this;
+ RemoveEntry(entry);
+ return Shrink(key);
+ }
+
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ set(EntryToIndex(entry) + 1, value);
+ return this;
+ }
+
+ // Check whether the hash table should be extended.
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, key);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ ObjectHashTable* table = ObjectHashTable::cast(obj);
+ table->AddEntry(table->FindInsertionEntry(hash), key, value);
+ return table;
+}
+
+
+void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
+ set(EntryToIndex(entry), key);
+ set(EntryToIndex(entry) + 1, value);
+ ElementAdded();
+}
+
+
+void ObjectHashTable::RemoveEntry(int entry) {
+ Object* null_value = GetHeap()->null_value();
+ set(EntryToIndex(entry), null_value);
+ set(EntryToIndex(entry) + 1, null_value);
+ ElementRemoved();
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check if there is a break point at this code position.
bool DebugInfo::HasBreakPoint(int code_position) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 9b55ea7475..ba690ecab5 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -790,6 +790,8 @@ class Object : public MaybeObject {
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
#undef DECLARE_STRUCT_PREDICATE
+ INLINE(bool IsSpecObject());
+
// Oddball testing.
INLINE(bool IsUndefined());
INLINE(bool IsNull());
@@ -1636,6 +1638,23 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
Object* hidden_obj);
+ // Indicates whether the hidden properties object should be created.
+ enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
+
+ // Retrieves the hidden properties object.
+ //
+ // The undefined value might be returned in case no hidden properties object
+ // is present and creation was omitted.
+ inline bool HasHiddenProperties();
+ MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
+
+ // Retrieves a permanent object identity hash code.
+ //
+ // The identity hash is stored as a hidden property. The undefined value might
+ // be returned in case no hidden properties object is present and creation was
+ // omitted.
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
+
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
@@ -1652,7 +1671,7 @@ class JSObject: public JSReceiver {
bool ShouldConvertToFastElements();
// Returns true if the elements of JSObject contains only values that can be
// represented in a FixedDoubleArray.
- bool ShouldConvertToFastDoubleElements();
+ bool CanConvertToFastDoubleElements();
// Tells whether the index'th element is present.
inline bool HasElement(uint32_t index);
@@ -1946,8 +1965,21 @@ class JSObject: public JSReceiver {
// Also maximal value of JSArray's length property.
static const uint32_t kMaxElementCount = 0xffffffffu;
+ // Constants for heuristics controlling conversion of fast elements
+ // to slow elements.
+
+ // Maximal gap that can be introduced by adding an element beyond
+ // the current elements length.
static const uint32_t kMaxGap = 1024;
- static const int kMaxFastElementsLength = 5000;
+
+ // Maximal length of fast elements array that won't be checked for
+ // being dense enough on expansion.
+ static const int kMaxUncheckedFastElementsLength = 5000;
+
+ // Same as above but for old arrays. This limit is more strict. We
+ // don't want to be wasteful with long lived objects.
+ static const int kMaxUncheckedOldFastElementsLength = 500;
+
static const int kInitialMaxFastElementArray = 100000;
static const int kMaxFastProperties = 12;
static const int kMaxInstanceSize = 255 * kPointerSize;
@@ -2013,6 +2045,9 @@ class JSObject: public JSReceiver {
// Returns true if most of the elements backing storage is used.
bool HasDenseElements();
+ // Gets the current elements capacity and the number of used elements.
+ void GetElementsCapacityAndUsage(int* capacity, int* used);
+
bool CanSetCallback(String* name);
MUST_USE_RESULT MaybeObject* SetElementCallback(
uint32_t index,
@@ -2048,6 +2083,7 @@ class FixedArrayBase: public HeapObject {
static const int kHeaderSize = kLengthOffset + kPointerSize;
};
+class FixedDoubleArray;
// FixedArray describes fixed-sized arrays with element type Object*.
class FixedArray: public FixedArrayBase {
@@ -2090,6 +2126,10 @@ class FixedArray: public FixedArrayBase {
// Compute the union of this and other.
MUST_USE_RESULT MaybeObject* UnionOfKeys(FixedArray* other);
+ // Compute the union of this and other.
+ MUST_USE_RESULT MaybeObject* UnionOfDoubleKeys(
+ FixedDoubleArray* other);
+
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -2484,6 +2524,10 @@ class HashTable: public FixedArray {
int at_least_space_for,
PretenureFlag pretenure = NOT_TENURED);
+ // Computes the required capacity for a table holding the given
+ // number of elements. May be more than HashTable::kMaxCapacity.
+ static int ComputeCapacity(int at_least_space_for);
+
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
@@ -2906,6 +2950,40 @@ class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
};
+class ObjectHashTableShape {
+ public:
+ static inline bool IsMatch(JSObject* key, Object* other);
+ static inline uint32_t Hash(JSObject* key);
+ static inline uint32_t HashForObject(JSObject* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
+};
+
+
+// ObjectHashTable maps keys that are JavaScript objects to object values by
+// using the identity hash of the key for hashing purposes.
+class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
+ public:
+ static inline ObjectHashTable* cast(Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<ObjectHashTable*>(obj);
+ }
+
+ // Looks up the value associated with the given key. The undefined value is
+ // returned in case the key is not present.
+ Object* Lookup(JSObject* key);
+
+ // Adds (or overwrites) the value associated with the given key. Mapping a
+ // key to the undefined value causes removal of the whole entry.
+ MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
+
+ private:
+ void AddEntry(int entry, JSObject* key, Object* value);
+ void RemoveEntry(int entry);
+};
+
+
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
@@ -3480,13 +3558,14 @@ class Code: public HeapObject {
UNARY_OP_IC,
BINARY_OP_IC,
COMPARE_IC,
+ TO_BOOLEAN_IC,
// No more than 16 kinds. The value currently encoded in four bits in
// Flags.
// Pseudo-kinds.
REGEXP = BUILTIN,
FIRST_IC_KIND = LOAD_IC,
- LAST_IC_KIND = COMPARE_IC
+ LAST_IC_KIND = TO_BOOLEAN_IC
};
enum {
@@ -3552,13 +3631,10 @@ class Code: public HeapObject {
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
- inline bool is_unary_op_stub() {
- return kind() == UNARY_OP_IC;
- }
- inline bool is_binary_op_stub() {
- return kind() == BINARY_OP_IC;
- }
+ inline bool is_unary_op_stub() { return kind() == UNARY_OP_IC; }
+ inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
+ inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline int major_key();
@@ -3600,21 +3676,24 @@ class Code: public HeapObject {
inline CheckType check_type();
inline void set_check_type(CheckType value);
- // [type-recording unary op type]: For all UNARY_OP_IC.
+ // [type-recording unary op type]: For kind UNARY_OP_IC.
inline byte unary_op_type();
inline void set_unary_op_type(byte value);
- // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
+ // [type-recording binary op type]: For kind BINARY_OP_IC.
inline byte binary_op_type();
inline void set_binary_op_type(byte value);
inline byte binary_op_result_type();
inline void set_binary_op_result_type(byte value);
- // [compare state]: For kind compare IC stubs, tells what state the
- // stub is in.
+ // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
inline byte compare_state();
inline void set_compare_state(byte value);
+ // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
+ inline byte to_boolean_state();
+ inline void set_to_boolean_state(byte value);
+
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -3756,9 +3835,10 @@ class Code: public HeapObject {
static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
- static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
+ static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+ static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
@@ -5847,6 +5927,8 @@ class String: public HeapObject {
StringPrint(stdout);
}
void StringPrint(FILE* out);
+
+ char* ToAsciiArray();
#endif
#ifdef DEBUG
void StringVerify();
@@ -6837,7 +6919,6 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_ACCESSORS(instance_call_handler, Object)
DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(flag, Smi)
- DECL_ACCESSORS(prototype_attributes, Smi)
// Following properties use flag bits.
DECL_BOOLEAN_ACCESSORS(hidden_prototype)
@@ -6845,6 +6926,7 @@ class FunctionTemplateInfo: public TemplateInfo {
// If the bit is set, object instances created by this function
// requires access check.
DECL_BOOLEAN_ACCESSORS(needs_access_check)
+ DECL_BOOLEAN_ACCESSORS(read_only_prototype)
static inline FunctionTemplateInfo* cast(Object* obj);
@@ -6877,14 +6959,14 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kAccessCheckInfoOffset =
kInstanceCallHandlerOffset + kPointerSize;
static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kPrototypeAttributesOffset = kFlagOffset + kPointerSize;
- static const int kSize = kPrototypeAttributesOffset + kPointerSize;
+ static const int kSize = kFlagOffset + kPointerSize;
private:
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
static const int kUndetectableBit = 1;
static const int kNeedsAccessCheckBit = 2;
+ static const int kReadOnlyPrototypeBit = 3;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 5704cb805d..ed6ff49313 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -2755,7 +2755,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Handle<String> name = callee->name();
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
- top_scope_->RecordEvalCall();
+ top_scope_->DeclarationScope()->RecordEvalCall();
}
}
result = NewCall(result, args, pos);
@@ -3641,7 +3641,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
}
int num_parameters = 0;
- Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+ // Function declarations are hoisted.
+ Scope* scope = (type == DECLARATION)
+ ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
+ : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
int materialized_literal_count;
int expected_property_count;
@@ -3715,36 +3718,43 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
RelocInfo::kNoPosition)));
}
- // Determine if the function will be lazily compiled. The mode can
- // only be PARSE_LAZILY if the --lazy flag is true.
+ // Determine if the function will be lazily compiled. The mode can only
+ // be PARSE_LAZILY if the --lazy flag is true. We will not lazily
+ // compile if we do not have preparser data for the function.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
top_scope_->outer_scope()->is_global_scope() &&
top_scope_->HasTrivialOuterContext() &&
- !parenthesized_function_);
+ !parenthesized_function_ &&
+ pre_data() != NULL);
parenthesized_function_ = false; // The bit was set for this function only.
- int function_block_pos = scanner().location().beg_pos;
- if (is_lazily_compiled && pre_data() != NULL) {
+ if (is_lazily_compiled) {
+ int function_block_pos = scanner().location().beg_pos;
FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
if (!entry.is_valid()) {
- ReportInvalidPreparseData(name, CHECK_OK);
- }
- end_pos = entry.end_pos();
- if (end_pos <= function_block_pos) {
- // End position greater than end of stream is safe, and hard to check.
- ReportInvalidPreparseData(name, CHECK_OK);
+ // There is no preparser data for the function, we will not lazily
+ // compile after all.
+ is_lazily_compiled = false;
+ } else {
+ end_pos = entry.end_pos();
+ if (end_pos <= function_block_pos) {
+ // End position greater than end of stream is safe, and hard to check.
+ ReportInvalidPreparseData(name, CHECK_OK);
+ }
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ end_pos - function_block_pos);
+ // Seek to position just before terminal '}'.
+ scanner().SeekForward(end_pos - 1);
+ materialized_literal_count = entry.literal_count();
+ expected_property_count = entry.property_count();
+ if (entry.strict_mode()) top_scope_->EnableStrictMode();
+ only_simple_this_property_assignments = false;
+ this_property_assignments = isolate()->factory()->empty_fixed_array();
+ Expect(Token::RBRACE, CHECK_OK);
}
- isolate()->counters()->total_preparse_skipped()->Increment(
- end_pos - function_block_pos);
- // Seek to position just before terminal '}'.
- scanner().SeekForward(end_pos - 1);
- materialized_literal_count = entry.literal_count();
- expected_property_count = entry.property_count();
- if (entry.strict_mode()) top_scope_->EnableStrictMode();
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
- Expect(Token::RBRACE, CHECK_OK);
- } else {
+ }
+
+ if (!is_lazily_compiled) {
ParseSourceElements(body, Token::RBRACE, CHECK_OK);
materialized_literal_count = lexical_scope.materialized_literal_count();
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 7cb4fd9709..85a5e4f610 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -166,6 +166,12 @@ void OS::Free(void* address, const size_t size) {
}
+void OS::ProtectCode(void* address, const size_t size) {
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index bc280ea0a0..37330be82b 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -78,13 +78,33 @@ double ceiling(double x) {
static Mutex* limit_mutex = NULL;
+static void* GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+ uint64_t rnd1 = V8::RandomPrivate(isolate);
+ uint64_t rnd2 = V8::RandomPrivate(isolate);
+ uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ uint32_t raw_addr = V8::RandomPrivate(isolate);
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+ raw_addr &= 0x3ffff000;
+ raw_addr += 0x20000000;
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
+}
+
+
void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
@@ -367,10 +387,10 @@ size_t OS::AllocateAlignment() {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
- // TODO(805): Port randomization of allocated executable memory to Linux.
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* addr = GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
StringEvent("OS::Allocate", "mmap failed"));
@@ -586,7 +606,7 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
+ address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 9727efeee0..52cf02963a 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -71,6 +71,12 @@ intptr_t OS::MaxVirtualMemory() {
#ifndef __CYGWIN__
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+ mprotect(address, size, PROT_READ | PROT_EXEC);
+}
+
+
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
mprotect(address, size, PROT_NONE);
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index b23e25ec97..e5df5ff3bf 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -957,6 +957,12 @@ void OS::Free(void* address, const size_t size) {
}
+void OS::ProtectCode(void* address, const size_t size) {
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 4d7f9cf775..6b2348c890 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -207,6 +207,9 @@ class OS {
bool is_executable);
static void Free(void* address, const size_t size);
+ // Mark code segments non-writable.
+ static void ProtectCode(void* address, const size_t size);
+
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 27524bd918..2839159563 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -136,6 +136,10 @@ function DerivedHasTrap(name) {
return !!this.getPropertyDescriptor(name)
}
+function DerivedHasOwnTrap(name) {
+ return !!this.getOwnPropertyDescriptor(name)
+}
+
function DerivedKeysTrap() {
var names = this.getOwnPropertyNames()
var enumerableNames = []
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index b4259c4a5c..8c43d64536 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -219,8 +219,20 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
}
break;
}
- default:
- UNREACHABLE();
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ // No contained objects, nothing to do.
break;
}
return copy;
@@ -619,7 +631,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSProxy, proxy, args[0]);
proxy->Fix();
- return proxy;
+ return isolate->heap()->undefined_value();
}
@@ -1666,7 +1678,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
ASSERT(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(elements_count, 0);
- if (elements_count > JSArray::kMaxFastElementsLength) {
+ if (elements_count < 0 ||
+ elements_count > FixedArray::kMaxLength ||
+ !Smi::IsValid(elements_count)) {
return isolate->ThrowIllegalOperation();
}
Object* new_object;
@@ -1968,6 +1982,61 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
+ NoHandleAllocation ha;
+ RUNTIME_ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSFunction, function, args[0]);
+
+ MaybeObject* maybe_name =
+ isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
+ String* name;
+ if (!maybe_name->To(&name)) return maybe_name;
+
+ if (function->HasFastProperties()) {
+ // Construct a new field descriptor with updated attributes.
+ DescriptorArray* instance_desc = function->map()->instance_descriptors();
+ int index = instance_desc->Search(name);
+ ASSERT(index != DescriptorArray::kNotFound);
+ PropertyDetails details(instance_desc->GetDetails(index));
+ CallbacksDescriptor new_desc(name,
+ instance_desc->GetValue(index),
+ static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
+ details.index());
+ // Construct a new field descriptors array containing the new descriptor.
+ Object* descriptors_unchecked;
+ { MaybeObject* maybe_descriptors_unchecked =
+ instance_desc->CopyInsert(&new_desc, REMOVE_TRANSITIONS);
+ if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
+ return maybe_descriptors_unchecked;
+ }
+ }
+ DescriptorArray* new_descriptors =
+ DescriptorArray::cast(descriptors_unchecked);
+ // Create a new map featuring the new field descriptors array.
+ Object* map_unchecked;
+ { MaybeObject* maybe_map_unchecked = function->map()->CopyDropDescriptors();
+ if (!maybe_map_unchecked->ToObject(&map_unchecked)) {
+ return maybe_map_unchecked;
+ }
+ }
+ Map* new_map = Map::cast(map_unchecked);
+ new_map->set_instance_descriptors(new_descriptors);
+ function->set_map(new_map);
+ } else { // Dictionary properties.
+ // Directly manipulate the property details.
+ int entry = function->property_dictionary()->FindEntry(name);
+ ASSERT(entry != StringDictionary::kNotFound);
+ PropertyDetails details = function->property_dictionary()->DetailsAt(entry);
+ PropertyDetails new_details(
+ static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
+ details.type(),
+ details.index());
+ function->property_dictionary()->DetailsAtPut(entry, new_details);
+ }
+ return function;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -4463,7 +4532,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
for (int i = 0; i < length; i++) {
jsproto->GetLocalPropertyNames(*names,
i == 0 ? 0 : local_property_count[i - 1]);
- if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
+ if (jsproto->HasHiddenProperties()) {
proto_with_hidden_properties++;
}
if (i < length - 1) {
@@ -9521,6 +9590,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
if (new_elements->map() == isolate->heap()->fixed_array_map() ||
new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
maybe_new_map = to->map()->GetFastElementsMap();
+ } else if (new_elements->map() ==
+ isolate->heap()->fixed_double_array_map()) {
+ maybe_new_map = to->map()->GetFastDoubleElementsMap();
} else {
maybe_new_map = to->map()->GetSlowElementsMap();
}
@@ -9608,12 +9680,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
- ASSERT(array->HasFastElements());
+ ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
+ FixedArrayBase* elements = FixedArrayBase::cast(array->elements());
uint32_t actual_length =
- static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
+ static_cast<uint32_t>(elements->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
isolate->factory()->NewNumber(static_cast<double>(min_length));
@@ -11192,7 +11265,6 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate,
if (sinfo->number_of_stack_slots() > 0) {
index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index != -1) {
- CHECK(false);
return Handle<Object>(frame->GetExpression(index), isolate);
}
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index ac912d8129..a42b3bc4b4 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -210,6 +210,7 @@ namespace internal {
F(FunctionSetInstanceClassName, 2, 1) \
F(FunctionSetLength, 2, 1) \
F(FunctionSetPrototype, 2, 1) \
+ F(FunctionSetReadOnlyPrototype, 1, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
F(FunctionSetBound, 1, 1) \
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index e76fb50598..d4eb17cd56 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -357,11 +357,17 @@ class Scope: public ZoneObject {
// Illegal redeclaration.
Expression* illegal_redecl_;
- // Scope-specific information.
- bool scope_inside_with_; // this scope is inside a 'with' of some outer scope
- bool scope_contains_with_; // this scope contains a 'with' statement
- bool scope_calls_eval_; // this scope contains an 'eval' call
- bool strict_mode_; // this scope is a strict mode scope
+ // Scope-specific information computed during parsing.
+ //
+ // This scope is inside a 'with' of some outer scope.
+ bool scope_inside_with_;
+ // This scope contains a 'with' statement.
+ bool scope_contains_with_;
+ // This scope or a nested catch scope or with scope contain an 'eval' call. At
+ // the 'eval' call site this scope is the declaration scope.
+ bool scope_calls_eval_;
+ // This scope is a strict mode scope.
+ bool strict_mode_;
// Computed via PropagateScopeInfo.
bool outer_scope_calls_eval_;
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 0f80496b0a..54361da88a 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -1542,6 +1542,7 @@ static void ReportCodeKindStatistics() {
CASE(UNARY_OP_IC);
CASE(BINARY_OP_IC);
CASE(COMPARE_IC);
+ CASE(TO_BOOLEAN_IC);
}
}
diff --git a/deps/v8/src/third_party/valgrind/valgrind.h b/deps/v8/src/third_party/valgrind/valgrind.h
index a94dc58bd6..7a3ee2f1fb 100644
--- a/deps/v8/src/third_party/valgrind/valgrind.h
+++ b/deps/v8/src/third_party/valgrind/valgrind.h
@@ -12,7 +12,7 @@
This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2000-2007 Julian Seward. All rights reserved.
+ Copyright (C) 2000-2010 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -73,6 +73,25 @@
#ifndef __VALGRIND_H
#define __VALGRIND_H
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+ conditionally compile based on our version number. Note that these
+ were introduced at version 3.6 and so do not exist in version 3.5
+ or earlier. The recommended way to use them to check for "version
+ X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
+ && (__VALGRIND_MAJOR__ > 3 \
+ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__ 3
+#define __VALGRIND_MINOR__ 6
+
+
#include <stdarg.h>
#include <stdint.h>
@@ -85,34 +104,44 @@
identifying architectures, which are different to the ones we use
within the rest of Valgrind. Note, __powerpc__ is active for both
32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is). */
+ latter (on Linux, that is).
+
+ Misc note: how to find out what's predefined in gcc by default:
+ gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
-
-#if !defined(_AIX) && defined(__i386__)
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
+
+
+#if defined(__APPLE__) && defined(__i386__)
+# define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+# define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) \
+ || (defined(_WIN32) && defined(_M_IX86))
+# define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
-#elif !defined(_AIX) && defined(__x86_64__)
+#elif defined(__linux__) && defined(__x86_64__)
# define PLAT_amd64_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
# define PLAT_ppc32_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
# define PLAT_ppc64_linux 1
-#elif defined(_AIX) && defined(__64BIT__)
-# define PLAT_ppc64_aix5 1
-#elif defined(_AIX) && !defined(__64BIT__)
-# define PLAT_ppc32_aix5 1
-#endif
-
-
+#elif defined(__linux__) && defined(__arm__)
+# define PLAT_arm_linux 1
+#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
+# define PLAT_s390x_linux 1
+#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
-#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
- && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
- && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
# if !defined(NVALGRIND)
# define NVALGRIND 1
# endif
@@ -124,17 +153,31 @@
/* in here of use to end-users -- skip to the next section. */
/* ------------------------------------------------------------------ */
+/*
+ * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
+ * request. Accepts both pointers and integers as arguments.
+ *
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result. Accepts
+ * both pointers and integers as arguments.
+ */
+
+#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
+ _zzq_request, _zzq_arg1, _zzq_arg2, \
+ _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
+ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
+ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); }
+
#if defined(NVALGRIND)
/* Define NVALGRIND to completely remove the Valgrind magic sequence
from the compiled code (analogous to NDEBUG's effects on
assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { \
- (_zzq_rlval) = (_zzq_default); \
- }
+ (_zzq_default)
#else /* ! NVALGRIND */
@@ -173,9 +216,10 @@
inline asm stuff to be useful.
*/
-/* ------------------------- x86-linux ------------------------- */
+/* ------------------------- x86-{linux,darwin} ---------------- */
-#if defined(PLAT_x86_linux)
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
+ || (defined(PLAT_x86_win32) && defined(__GNUC__))
typedef
struct {
@@ -187,10 +231,11 @@ typedef
"roll $3, %%edi ; roll $13, %%edi\n\t" \
"roll $29, %%edi ; roll $19, %%edi\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned int _zzq_args[6]; \
+ __extension__ \
+ ({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
@@ -205,8 +250,8 @@ typedef
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -225,11 +270,77 @@ typedef
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%EAX */ \
"xchgl %%edx,%%edx\n\t"
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm rol edi, 3 __asm rol edi, 13 \
+ __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
+ (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
+ (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
+ (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
+
+static __inline uintptr_t
+valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
+ uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
+ uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
+ uintptr_t _zzq_arg5)
+{
+ volatile uintptr_t _zzq_args[6];
+ volatile unsigned int _zzq_result;
+ _zzq_args[0] = (uintptr_t)(_zzq_request);
+ _zzq_args[1] = (uintptr_t)(_zzq_arg1);
+ _zzq_args[2] = (uintptr_t)(_zzq_arg2);
+ _zzq_args[3] = (uintptr_t)(_zzq_arg3);
+ _zzq_args[4] = (uintptr_t)(_zzq_arg4);
+ _zzq_args[5] = (uintptr_t)(_zzq_arg5);
+ __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
+ __SPECIAL_INSTRUCTION_PREAMBLE
+ /* %EDX = client_request ( %EAX ) */
+ __asm xchg ebx,ebx
+ __asm mov _zzq_result, edx
+ }
+ return _zzq_result;
+}
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ __asm xchg ecx,ecx \
+ __asm mov __addr, eax \
+ } \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
-/* ------------------------ amd64-linux ------------------------ */
+/* ------------------------ amd64-{linux,darwin} --------------- */
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
typedef
struct {
@@ -241,10 +352,11 @@ typedef
"rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
"rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile uint64_t _zzq_args[6]; \
+ __extension__ \
+ ({ volatile uint64_t _zzq_args[6]; \
volatile uint64_t _zzq_result; \
_zzq_args[0] = (uint64_t)(_zzq_request); \
_zzq_args[1] = (uint64_t)(_zzq_arg1); \
@@ -259,8 +371,8 @@ typedef
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -279,7 +391,7 @@ typedef
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%RAX */ \
"xchgq %%rdx,%%rdx\n\t"
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
/* ------------------------ ppc32-linux ------------------------ */
@@ -295,11 +407,12 @@ typedef
"rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
"rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned int _zzq_args[6]; \
+ __extension__ \
+ ({ unsigned int _zzq_args[6]; \
unsigned int _zzq_result; \
unsigned int* _zzq_ptr; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
@@ -318,8 +431,8 @@ typedef
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -356,11 +469,12 @@ typedef
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { uint64_t _zzq_args[6]; \
+ __extension__ \
+ ({ uint64_t _zzq_args[6]; \
register uint64_t _zzq_result __asm__("r3"); \
register uint64_t* _zzq_ptr __asm__("r4"); \
_zzq_args[0] = (uint64_t)(_zzq_request); \
@@ -376,8 +490,8 @@ typedef
: "=r" (_zzq_result) \
: "0" (_zzq_default), "r" (_zzq_ptr) \
: "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -407,149 +521,135 @@ typedef
#endif /* PLAT_ppc64_linux */
-/* ------------------------ ppc32-aix5 ------------------------- */
+/* ------------------------- arm-linux ------------------------- */
-#if defined(PLAT_ppc32_aix5)
+#if defined(PLAT_arm_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
- unsigned int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned int _zzq_args[7]; \
- register unsigned int _zzq_result; \
- register unsigned int* _zzq_ptr; \
+ __extension__ \
+ ({volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "lwz 3, 24(4)\n\t" \
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
+ "mov r4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
+ /* R3 = client_request ( R4 ) */ \
+ "orr r10, r10, r10\n\t" \
+ "mov %0, r3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "cc","memory", "r3", "r4"); \
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned int __addr; \
+ unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
+ /* R3 = guest_NRADDR */ \
+ "orr r11, r11, r11\n\t" \
+ "mov %0, r3" \
+ : "=r" (__addr) \
: \
- : "r3", "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
}
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
+ /* branch-and-link-to-noredir *%R4 */ \
+ "orr r12, r12, r12\n\t"
-#endif /* PLAT_ppc32_aix5 */
+#endif /* PLAT_arm_linux */
-/* ------------------------ ppc64-aix5 ------------------------- */
+/* ------------------------ s390x-linux ------------------------ */
-#if defined(PLAT_ppc64_aix5)
+#if defined(PLAT_s390x_linux)
typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- uint64_t r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- { uint64_t _zzq_args[7]; \
- register uint64_t _zzq_result; \
- register uint64_t* _zzq_ptr; \
- _zzq_args[0] = (unsigned int long long)(_zzq_request); \
- _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int long long)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "ld 3, 48(4)\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
+ struct {
+ uint64_t nraddr; /* where's the code? */
}
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register uint64_t __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#endif /* PLAT_ppc64_aix5 */
+ OrigFn;
+
+/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
+ * code. This detection is implemented in platform specific toIR.c
+ * (e.g. VEX/priv/guest_s390_decoder.c).
+ */
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "lr 15,15\n\t" \
+ "lr 1,1\n\t" \
+ "lr 2,2\n\t" \
+ "lr 3,3\n\t"
+
+#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
+#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
+#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({volatile uint64_t _zzq_args[6]; \
+ volatile uint64_t _zzq_result; \
+ _zzq_args[0] = (uint64_t)(_zzq_request); \
+ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
+ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
+ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
+ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
+ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
+ __asm__ volatile(/* r2 = args */ \
+ "lgr 2,%1\n\t" \
+ /* r3 = default */ \
+ "lgr 3,%2\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ __CLIENT_REQUEST_CODE \
+ /* results = r3 */ \
+ "lgr %0, 3\n\t" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "2", "3", "memory" \
+ ); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile uint64_t __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ __GET_NR_CONTEXT_CODE \
+ "lgr %0, 3\n\t" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "3", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_R1 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ __CALL_NO_REDIR_CODE
+
+#endif /* PLAT_s390x_linux */
/* Insert assembly code for other platforms here... */
@@ -582,11 +682,15 @@ typedef
/* Use these to write the name of your wrapper. NOTE: duplicates
VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+ args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- _vgwZU_##soname##_##fnname
+ VG_CONCAT4(_vgwZU_,soname,_,fnname)
#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- _vgwZZ_##soname##_##fnname
+ VG_CONCAT4(_vgwZZ_,soname,_,fnname)
/* Use this macro from within a wrapper function to collect the
context (address and possibly other info) of the original function.
@@ -613,9 +717,25 @@ typedef
do { volatile unsigned long _junk; \
CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
-/* ------------------------- x86-linux ------------------------- */
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
-#if defined(PLAT_x86_linux)
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
/* These regs are trashed by the hidden call. No need to mention eax
as gcc can already see that, plus causes gcc to bomb. */
@@ -648,10 +768,11 @@ typedef
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ "subl $12, %%esp\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $4, %%esp\n" \
+ "addl $16, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -668,11 +789,12 @@ typedef
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ "subl $8, %%esp\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $8, %%esp\n" \
+ "addl $16, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -690,12 +812,13 @@ typedef
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ "subl $4, %%esp\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $12, %%esp\n" \
+ "addl $16, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -740,6 +863,7 @@ typedef
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ "subl $12, %%esp\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
@@ -747,7 +871,7 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $20, %%esp\n" \
+ "addl $32, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -768,6 +892,7 @@ typedef
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ "subl $8, %%esp\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
@@ -776,7 +901,7 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $24, %%esp\n" \
+ "addl $32, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -799,6 +924,7 @@ typedef
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
+ "subl $4, %%esp\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
@@ -808,7 +934,7 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $28, %%esp\n" \
+ "addl $32, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -867,6 +993,7 @@ typedef
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
+ "subl $12, %%esp\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
@@ -878,7 +1005,7 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $36, %%esp\n" \
+ "addl $48, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -904,6 +1031,7 @@ typedef
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ "subl $8, %%esp\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
@@ -916,7 +1044,7 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $40, %%esp\n" \
+ "addl $48, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -944,6 +1072,7 @@ typedef
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
+ "subl $4, %%esp\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
@@ -957,7 +1086,7 @@ typedef
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $44, %%esp\n" \
+ "addl $48, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -1008,11 +1137,11 @@ typedef
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
-/* ------------------------ amd64-linux ------------------------ */
+/* ------------------------ amd64-{linux,darwin} --------------- */
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
@@ -1020,6 +1149,78 @@ typedef
#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
"rdi", "r8", "r9", "r10", "r11"
+/* This is all pretty complex. It's so as to make stack unwinding
+ work reliably. See bug 243270. The basic problem is the sub and
+ add of 128 of %rsp in all of the following macros. If gcc believes
+ the CFA is in %rsp, then unwinding may fail, because what's at the
+ CFA is not what gcc "expected" when it constructs the CFIs for the
+ places where the macros are instantiated.
+
+ But we can't just add a CFI annotation to increase the CFA offset
+ by 128, to match the sub of 128 from %rsp, because we don't know
+ whether gcc has chosen %rsp as the CFA at that point, or whether it
+ has chosen some other register (eg, %rbp). In the latter case,
+ adding a CFI annotation to change the CFA offset is simply wrong.
+
+ So the solution is to get hold of the CFA using
+ __builtin_dwarf_cfa(), put it in a known register, and add a
+ CFI annotation to say what the register is. We choose %rbp for
+ this (perhaps perversely), because:
+
+ (1) %rbp is already subject to unwinding. If a new register was
+ chosen then the unwinder would have to unwind it in all stack
+ traces, which is expensive, and
+
+ (2) %rbp is already subject to precise exception updates in the
+ JIT. If a new register was chosen, we'd have to have precise
+ exceptions for it too, which reduces performance of the
+ generated code.
+
+ However .. one extra complication. We can't just whack the result
+ of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+ list of trashed registers at the end of the inline assembly
+ fragments; gcc won't allow %rbp to appear in that list. Hence
+ instead we need to stash %rbp in %r15 for the duration of the asm,
+ and say that %r15 is trashed instead. gcc seems happy to go with
+ that.
+
+ Oh .. and this all needs to be conditionalised so that it is
+ unchanged from before this commit, when compiled with older gccs
+ that don't support __builtin_dwarf_cfa. Furthermore, since
+ this header file is freestanding, it has to be independent of
+ config.h, and so the following conditionalisation cannot depend on
+ configure time checks.
+
+ Although it's not clear from
+ 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+ this expression excludes Darwin.
+ .cfi directives in Darwin assembly appear to be completely
+ different and I haven't investigated how they work.
+
+ For even more entertainment value, note we have to use the
+ completely undocumented __builtin_dwarf_cfa(), which appears to
+ really compute the CFA, whereas __builtin_frame_address(0) claims
+ to but actually doesn't. See
+ https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"r"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ "movq %%rbp, %%r15\n\t" \
+ "movq %2, %%rbp\n\t" \
+ ".cfi_remember_state\n\t" \
+ ".cfi_def_cfa rbp, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "movq %%r15, %%rbp\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE
+# define VALGRIND_CFI_EPILOGUE
+#endif
+
+
/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
long) == 8. */
@@ -1039,7 +1240,7 @@ typedef
redzone, for the duration of the hidden call, to make it safe.
Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ (ppc64-linux); but for those, the stack is
self describing (none of this CFI nonsense) so at least messing
with the stack pointer doesn't give a danger of non-unwindable
stack. */
@@ -1051,13 +1252,15 @@ typedef
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1070,14 +1273,16 @@ typedef
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1091,15 +1296,17 @@ typedef
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1114,6 +1321,7 @@ typedef
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
@@ -1121,9 +1329,10 @@ typedef
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1139,6 +1348,7 @@ typedef
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
@@ -1147,9 +1357,10 @@ typedef
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1166,6 +1377,7 @@ typedef
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
@@ -1175,9 +1387,10 @@ typedef
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1195,6 +1408,7 @@ typedef
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
@@ -1203,11 +1417,12 @@ typedef
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
- "addq $128,%%rsp\n\t" \
VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1227,7 +1442,8 @@ typedef
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
- "subq $128,%%rsp\n\t" \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
@@ -1238,10 +1454,11 @@ typedef
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $8, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1262,6 +1479,7 @@ typedef
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
@@ -1275,9 +1493,10 @@ typedef
VALGRIND_CALL_NOREDIR_RAX \
"addq $16, %%rsp\n" \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1299,7 +1518,8 @@ typedef
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
- "subq $128,%%rsp\n\t" \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
@@ -1312,10 +1532,11 @@ typedef
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $24, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1338,6 +1559,7 @@ typedef
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
@@ -1353,9 +1575,10 @@ typedef
VALGRIND_CALL_NOREDIR_RAX \
"addq $32, %%rsp\n" \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1379,7 +1602,8 @@ typedef
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
- "subq $128,%%rsp\n\t" \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
"pushq 88(%%rax)\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
@@ -1394,10 +1618,11 @@ typedef
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $40, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1422,6 +1647,7 @@ typedef
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"pushq 96(%%rax)\n\t" \
"pushq 88(%%rax)\n\t" \
@@ -1439,14 +1665,15 @@ typedef
VALGRIND_CALL_NOREDIR_RAX \
"addq $48, %%rsp\n" \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
/* ------------------------ ppc32-linux ------------------------ */
@@ -2439,54 +2666,28 @@ typedef
#endif /* PLAT_ppc64_linux */
-/* ------------------------ ppc32-aix5 ------------------------- */
-
-#if defined(PLAT_ppc32_aix5)
+/* ------------------------- arm-linux ------------------------- */
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+#if defined(PLAT_arm_linux)
/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
-
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "lwz 3," #_n_fr "(1)\n\t" \
- "stw 3,0(1)\n\t"
-
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2495,27 +2696,18 @@ typedef
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2523,28 +2715,19 @@ typedef
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2553,30 +2736,21 @@ typedef
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2585,32 +2759,23 @@ typedef
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2619,34 +2784,27 @@ typedef
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #4 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2655,36 +2813,29 @@ typedef
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #8 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2694,38 +2845,31 @@ typedef
arg7) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #12 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2735,40 +2879,33 @@ typedef
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "push {r0, r1, r2, r3} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #16 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2778,47 +2915,35 @@ typedef
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #20 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2828,738 +2953,612 @@ typedef
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,68(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc32_aix5 */
-
-/* ------------------------ ppc64-aix5 ------------------------- */
-
-#if defined(PLAT_ppc64_aix5)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
-
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "ld 3," #_n_fr "(1)\n\t" \
- "std 3,0(1)\n\t"
-
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
- long) == 8. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #40] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #24 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #28 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "ldr r2, [%1, #48] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #32 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
+#endif /* PLAT_arm_linux */
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
+/* ------------------------- s390x-linux ------------------------- */
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
+#if defined(PLAT_s390x_linux)
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
+/* Similar workaround as amd64 (see above), but we use r11 as frame
+ pointer and save the old r11 in r7. r11 might be used for
+ argvec, therefore we copy argvec in r1 since r1 is clobbered
+ after the call anyway. */
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"d"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ ".cfi_remember_state\n\t" \
+ "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
+ "lgr 7,11\n\t" \
+ "lgr 11,%2\n\t" \
+ ".cfi_def_cfa r11, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "lgr 11, 7\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE \
+ "lgr 1,%1\n\t"
+# define VALGRIND_CFI_EPILOGUE
+#endif
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-#endif /* PLAT_ppc64_aix5 */
+/* These regs are trashed by the hidden call. Note that we overwrite
+ r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
+ function a proper return address. All others are ABI defined call
+ clobbers. */
+#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
+ "f0","f1","f2","f3","f4","f5","f6","f7"
+
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 1, 0(1)\n\t" /* target->r1 */ \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+/* The call abi has the arguments in r2-r6 and stack */
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-168\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,168\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-176\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,176\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-184\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,184\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-192\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,192\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-200\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,200\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-208\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,208\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-216\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "mvc 208(8,15), 96(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,216\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+
+#endif /* PLAT_s390x_linux */
/* ------------------------------------------------------------------ */
@@ -3605,9 +3604,14 @@ typedef
errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
+ /* Allows a string (gdb monitor command) to be passed to the tool
+ Used for interaction with vgdb/gdb */
+ VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
+
/* These are useful and can be interpreted by any tool that
tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__CREATE_MEMPOOL = 0x1303,
@@ -3620,30 +3624,43 @@ typedef
VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
/* Allow printfs to valgrind log. */
+ /* The first two pass the va_list argument by value, which
+ assumes it is the same size as or smaller than a UWord,
+ which generally isn't the case. Hence are deprecated.
+ The second two pass the vargs by reference and so are
+ immune to this problem. */
+ /* both :: char* fmt, va_list vargs (DEPRECATED) */
VG_USERREQ__PRINTF = 0x1401,
VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+ /* both :: char* fmt, va_list* vargs */
+ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
/* Stack support. */
VG_USERREQ__STACK_REGISTER = 0x1501,
VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+ /* Querying of debug info. */
+ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
} Vg_ClientRequest;
#if !defined(__GNUC__)
# define __extension__ /* */
#endif
+
/* Returns the number of Valgrinds this code is running under. That
is, 0 if running natively, 1 if running under Valgrind, 2 if
running under Valgrind which is running under another Valgrind,
etc. */
-#define RUNNING_ON_VALGRIND __extension__ \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0); \
- _qzz_res; \
- })
+#define RUNNING_ON_VALGRIND \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0) \
/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
@@ -3651,56 +3668,93 @@ typedef
since it provides a way to make sure valgrind will retranslate the
invalidated area. Returns no value. */
#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0); \
- }
+ _qzz_addr, _qzz_len, 0, 0, 0)
/* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. */
-
-#if defined(NVALGRIND)
-
-# define VALGRIND_PRINTF(...)
-# define VALGRIND_PRINTF_BACKTRACE(...)
-
-#else /* NVALGRIND */
+ Possibly with a backtrace. This is a really ugly hack. The return value
+ is the number of characters printed, excluding the "**<pid>** " part at the
+ start and the backtrace (if present). */
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
static int VALGRIND_PRINTF(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
+#endif
static int
+#if defined(_MSC_VER)
+__inline
+#endif
VALGRIND_PRINTF(const char *format, ...)
{
+#if defined(NVALGRIND)
+ return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER)
+ uintptr_t _qzz_res;
+#else
unsigned long _qzz_res;
+#endif
va_list vargs;
va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
- (unsigned long)format, (unsigned long)vargs,
+#if defined(_MSC_VER)
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
0, 0, 0);
+#endif
va_end(vargs);
return (int)_qzz_res;
+#endif /* NVALGRIND */
}
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
+#endif
static int
+#if defined(_MSC_VER)
+__inline
+#endif
VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
{
+#if defined(NVALGRIND)
+ return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER)
+ uintptr_t _qzz_res;
+#else
unsigned long _qzz_res;
+#endif
va_list vargs;
va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
- (unsigned long)format, (unsigned long)vargs,
+#if defined(_MSC_VER)
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
0, 0, 0);
+#endif
va_end(vargs);
return (int)_qzz_res;
-}
-
#endif /* NVALGRIND */
+}
/* These requests allow control to move from the simulated CPU to the
@@ -3727,199 +3781,253 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
with a lot in the past.
*/
#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0); \
- _qyy_res; \
- })
-
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0); \
- _qyy_res; \
- })
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0)
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0)
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0)
#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0); \
- _qyy_res; \
- })
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0)
/* Counts the number of errors that have been recorded by a tool. Nb:
the tool must record the errors with VG_(maybe_record_error)() or
VG_(unique_error)() for them to be counted. */
#define VALGRIND_COUNT_ERRORS \
- __extension__ \
- ({unsigned int _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ 0 /* default return */, \
VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0); \
- _qyy_res; \
- })
-
-/* Mark a block of memory as having been allocated by a malloc()-like
- function. `addr' is the start of the usable block (ie. after any
- redzone) `rzB' is redzone size if the allocator can apply redzones;
- use '0' if not. Adding redzones makes it more likely Valgrind will spot
- block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
- for calloc(). Put it immediately after the point where a block is
- allocated.
+ 0, 0, 0, 0, 0)
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+ when heap blocks are allocated in order to give accurate results. This
+ happens automatically for the standard allocator functions such as
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+ delete[], etc.
+
+ But if your program uses a custom allocator, this doesn't automatically
+ happen, and Valgrind will not do as well. For example, if you allocate
+ superblocks with mmap() and then allocates chunks of the superblocks, all
+ Valgrind's observations will be at the mmap() level and it won't know that
+ the chunks should be considered separate entities. In Memcheck's case,
+ that means you probably won't get heap block overrun detection (because
+ there won't be redzones marked as unaddressable) and you definitely won't
+ get any leak detection.
+
+ The following client requests allow a custom allocator to be annotated so
+ that it can be handled accurately by Valgrind.
+
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+ by a malloc()-like function. For Memcheck (an illustrative case), this
+ does two things:
+
+ - It records that the block has been allocated. This means any addresses
+ within the block mentioned in error messages will be
+ identified as belonging to the block. It also means that if the block
+ isn't freed it will be detected by the leak checker.
+
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
+ not set), or addressable and defined (if 'is_zeroed' is set). This
+ controls how accesses to the block by the program are handled.
- If you're using Memcheck: If you're allocating memory via superblocks,
- and then handing out small chunks of each superblock, if you don't have
- redzones on your small blocks, it's worth marking the superblock with
- VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
- detected. But if you can put redzones on, it's probably better to not do
- this, so that messages for small overruns are described in terms of the
- small block rather than the superblock (but if you have a big overrun
- that skips over a redzone, you could miss an error this way). See
- memcheck/tests/custom_alloc.c for an example.
-
- WARNING: if your allocator uses malloc() or 'new' to allocate
- superblocks, rather than mmap() or brk(), this will not work properly --
- you'll likely get assertion failures during leak detection. This is
- because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
-
- Nb: block must be freed via a free()-like function specified
- with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+ 'addr' is the start of the usable block (ie. after any
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
+ can apply redzones -- these are blocks of padding at the start and end of
+ each block. Adding redzones is recommended as it makes it much more likely
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
+ zeroed (or filled with another predictable value), as is the case for
+ calloc().
+
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+ heap block -- that will be used by the client program -- is allocated.
+ It's best to put it at the outermost level of the allocator if possible;
+ for example, if you have a function my_alloc() which calls
+ internal_alloc(), and the client request is put inside internal_alloc(),
+ stack traces relating to the heap block will contain entries for both
+ my_alloc() and internal_alloc(), which is probably not what you want.
+
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
+
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
+ Memcheck, it does two things:
+
+ - It records that the block has been deallocated. This assumes that the
+ block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - It marks the block as being unaddressable.
+
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+ heap block is deallocated.
+
+ VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
+ Memcheck, it does four things:
+
+ - It records that the size of a block has been changed. This assumes that
+ the block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - If the block shrunk, it marks the freed memory as being unaddressable.
+
+ - If the block grew, it marks the new area as undefined and defines a red
+ zone past the end of the new block.
+
+ - The V-bits of the overlap between the old and the new block are preserved.
+
+ VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
+ and before deallocation of the old block.
+
+ In many cases, these three client requests will not be enough to get your
+ allocator working well with Memcheck. More specifically, if your allocator
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+ will be necessary to mark the memory as addressable just before the zeroing
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
+ you'll need to do this if your allocator recycles freed blocks, but it
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+ Really, what's happening is a blurring of the lines between the client
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+ memory should be considered unaddressable to the client program, but the
+ allocator knows more than the rest of the client program and so may be able
+ to safely access it. Extra client requests are necessary for Valgrind to
+ understand the distinction between the allocator and the rest of the
+ program.
+
+ Ignored if addr == 0.
+*/
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0); \
- }
+ addr, sizeB, rzB, is_zeroed, 0)
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__RESIZEINPLACE_BLOCK, \
+ addr, oldSizeB, newSizeB, rzB, 0)
-/* Mark a block of memory as having been freed by a free()-like function.
- `rzB' is redzone size; it must match that given to
- VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
- checker. Put it immediately after the point where the block is freed. */
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0); \
- }
+ addr, rzB, 0, 0, 0)
/* Create a memory pool. */
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0); \
- }
+ pool, rzB, is_zeroed, 0, 0)
/* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0); \
- }
+ pool, 0, 0, 0, 0)
/* Associate a piece of memory with a memory pool. */
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0); \
- }
+ pool, addr, size, 0, 0)
/* Disassociate a piece of memory from a memory pool. */
#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0); \
- }
+ pool, addr, 0, 0, 0)
/* Disassociate any pieces outside a particular range. */
#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0); \
- }
+ pool, addr, size, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0); \
- }
+ poolA, poolB, 0, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0); \
- }
+ pool, addrA, addrB, size, 0)
/* Return 1 if a mempool exists, else 0. */
#define VALGRIND_MEMPOOL_EXISTS(pool) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0); \
- _qzz_res; \
- })
+ pool, 0, 0, 0, 0)
/* Mark a piece of memory as being a stack. Returns a stack id. */
#define VALGRIND_STACK_REGISTER(start, end) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0); \
- _qzz_res; \
- })
+ start, end, 0, 0, 0)
/* Unmark the piece of memory associated with a stack id as being a
stack. */
#define VALGRIND_STACK_DEREGISTER(id) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0); \
- }
+ id, 0, 0, 0, 0)
/* Change the start and end address of the stack id. */
#define VALGRIND_STACK_CHANGE(id, start, end) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0); \
- }
-
-
+ id, start, end, 0, 0)
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__LOAD_PDB_DEBUGINFO, \
+ fd, ptr, total_size, delta, 0)
+
+/* Map a code address to a source file name and line number. buf64
+ must point to a 64-byte buffer in the caller's address space. The
+ result will be dumped in there and is guaranteed to be zero
+ terminated. If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__MAP_IP_TO_SRCLOC, \
+ addr, buf64, 0, 0, 0)
+
+
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
#endif /* __VALGRIND_H */
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index defb1ae966..e10c5f40e4 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -439,6 +439,12 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(
}
+byte TypeFeedbackOracle::ToBooleanTypes(unsigned ast_id) {
+ Handle<Object> object = GetInfo(ast_id);
+ return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
+}
+
+
// Things are a bit tricky here: The iterator for the RelocInfos and the infos
// themselves are not GC-safe, so we first get all infos, then we create the
// dictionary (possibly triggering GC), and finally we relocate the collected
@@ -523,6 +529,7 @@ void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
+ case Code::TO_BOOLEAN_IC:
SetInfo(ast_id, target);
break;
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 0a8c935dfa..dee4c34c92 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -238,6 +238,11 @@ class TypeFeedbackOracle BASE_EMBEDDED {
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
+ // TODO(1571) We can't use ToBooleanStub::Types as the return value because
+ // of various cylces in our headers. Death to tons of implementations in
+ // headers!! :-P
+ byte ToBooleanTypes(unsigned ast_id);
+
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
TypeInfo BinaryType(BinaryOperation* expr);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index ecdf1c70e7..785bc4373c 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -30,6 +30,7 @@
#include <stdlib.h>
#include <string.h>
+#include <climits>
#include "globals.h"
#include "checks.h"
@@ -885,6 +886,30 @@ class SimpleStringBuilder {
DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
};
+
+// A poor man's version of STL's bitset: A bit set of enums E (without explicit
+// values), fitting into an integral type T.
+template <class E, class T = int>
+class EnumSet {
+ public:
+ explicit EnumSet(T bits = 0) : bits_(bits) {}
+ bool IsEmpty() const { return bits_ == 0; }
+ bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
+ void Add(E element) { bits_ |= Mask(element); }
+ void Remove(E element) { bits_ &= ~Mask(element); }
+ T ToIntegral() const { return bits_; }
+
+ private:
+ T Mask(E element) const {
+ // The strange typing in ASSERT is necessary to avoid stupid warnings, see:
+ // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
+ ASSERT(element < static_cast<int>(sizeof(T) * CHAR_BIT));
+ return 1 << element;
+ }
+
+ T bits_;
+};
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 0df546863e..be9b297f81 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -157,17 +157,6 @@ function GlobalEval(x) {
}
-// execScript for IE compatibility.
-function GlobalExecScript(expr, lang) {
- // NOTE: We don't care about the character casing.
- if (!lang || /javascript/i.test(lang)) {
- var f = %CompileString(ToString(expr));
- f.call(%GlobalReceiver(global));
- }
- return null;
-}
-
-
// ----------------------------------------------------------------------------
@@ -187,8 +176,7 @@ function SetupGlobal() {
"isFinite", GlobalIsFinite,
"parseInt", GlobalParseInt,
"parseFloat", GlobalParseFloat,
- "eval", GlobalEval,
- "execScript", GlobalExecScript
+ "eval", GlobalEval
));
}
@@ -221,7 +209,7 @@ function ObjectToString() {
if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
return '[object Undefined]';
}
- if (IS_NULL(this)) return '[object Null]';
+ if (IS_NULL(this)) return '[object Null]';
return "[object " + %_ClassOf(ToObject(this)) + "]";
}
@@ -244,6 +232,10 @@ function ObjectValueOf() {
// ECMA-262 - 15.2.4.5
function ObjectHasOwnProperty(V) {
+ if (%IsJSProxy(this)) {
+ var handler = %GetHandler(this);
+ return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, TO_STRING_INLINE(V));
+ }
return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
}
@@ -261,7 +253,12 @@ function ObjectIsPrototypeOf(V) {
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
- return %IsPropertyEnumerable(ToObject(this), ToString(V));
+ var P = ToString(V);
+ if (%IsJSProxy(this)) {
+ var desc = GetOwnProperty(this, P);
+ return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
+ }
+ return %IsPropertyEnumerable(ToObject(this), P);
}
@@ -322,9 +319,7 @@ function ObjectKeys(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var keys = handler.keys;
- if (IS_UNDEFINED(keys)) keys = DerivedKeysTrap;
- var names = %_CallFunction(handler, keys);
+ var names = CallTrap0(handler, "keys", DerivedKeysTrap);
return ToStringArray(names);
}
return %LocalKeys(obj);
@@ -595,16 +590,41 @@ function ConvertDescriptorArrayToDescriptor(desc_array) {
}
+// For Harmony proxies.
+function GetTrap(handler, name, defaultTrap) {
+ var trap = handler[name];
+ if (IS_UNDEFINED(trap)) {
+ if (IS_UNDEFINED(defaultTrap)) {
+ throw MakeTypeError("handler_trap_missing", [handler, name]);
+ }
+ trap = defaultTrap;
+ } else if (!IS_FUNCTION(trap)) {
+ throw MakeTypeError("handler_trap_must_be_callable", [handler, name]);
+ }
+ return trap;
+}
+
+
+function CallTrap0(handler, name, defaultTrap) {
+ return %_CallFunction(handler, GetTrap(handler, name, defaultTrap));
+}
+
+
+function CallTrap1(handler, name, defaultTrap, x) {
+ return %_CallFunction(handler, x, GetTrap(handler, name, defaultTrap));
+}
+
+
+function CallTrap2(handler, name, defaultTrap, x, y) {
+ return %_CallFunction(handler, x, y, GetTrap(handler, name, defaultTrap));
+}
+
+
// ES5 section 8.12.2.
function GetProperty(obj, p) {
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var getProperty = handler.getPropertyDescriptor;
- if (IS_UNDEFINED(getProperty)) {
- throw MakeTypeError("handler_trap_missing",
- [handler, "getPropertyDescriptor"]);
- }
- var descriptor = %_CallFunction(handler, p, getProperty);
+ var descriptor = CallTrap1(obj, "getPropertyDescriptor", void 0, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
@@ -625,9 +645,7 @@ function GetProperty(obj, p) {
function HasProperty(obj, p) {
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var has = handler.has;
- if (IS_UNDEFINED(has)) has = DerivedHasTrap;
- return ToBoolean(%_CallFunction(handler, obj, p, has));
+ return ToBoolean(CallTrap1(handler, "has", DerivedHasTrap, p));
}
var desc = GetProperty(obj, p);
return IS_UNDEFINED(desc) ? false : true;
@@ -635,15 +653,11 @@ function HasProperty(obj, p) {
// ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
+function GetOwnProperty(obj, v) {
+ var p = ToString(v);
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var getOwnProperty = handler.getOwnPropertyDescriptor;
- if (IS_UNDEFINED(getOwnProperty)) {
- throw MakeTypeError("handler_trap_missing",
- [handler, "getOwnPropertyDescriptor"]);
- }
- var descriptor = %_CallFunction(handler, p, getOwnProperty);
+ var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
@@ -656,7 +670,7 @@ function GetOwnProperty(obj, p) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), ToString(p));
+ var props = %GetOwnProperty(ToObject(obj), ToString(v));
// A false value here means that access checks failed.
if (props === false) return void 0;
@@ -668,11 +682,7 @@ function GetOwnProperty(obj, p) {
// Harmony proxies.
function DefineProxyProperty(obj, p, attributes, should_throw) {
var handler = %GetHandler(obj);
- var defineProperty = handler.defineProperty;
- if (IS_UNDEFINED(defineProperty)) {
- throw MakeTypeError("handler_trap_missing", [handler, "defineProperty"]);
- }
- var result = %_CallFunction(handler, p, attributes, defineProperty);
+ var result = CallTrap2(handler, "defineProperty", void 0, p, attributes);
if (!ToBoolean(result)) {
if (should_throw) {
throw MakeTypeError("handler_returned_false",
@@ -901,12 +911,7 @@ function ObjectGetOwnPropertyNames(obj) {
// Special handling for proxies.
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var getOwnPropertyNames = handler.getOwnPropertyNames;
- if (IS_UNDEFINED(getOwnPropertyNames)) {
- throw MakeTypeError("handler_trap_missing",
- [handler, "getOwnPropertyNames"]);
- }
- var names = %_CallFunction(handler, getOwnPropertyNames);
+ var names = CallTrap0(handler, "getOwnPropertyNames", void 0);
return ToStringArray(names, "getOwnPropertyNames");
}
@@ -1036,11 +1041,7 @@ function ObjectDefineProperties(obj, properties) {
// Harmony proxies.
function ProxyFix(obj) {
var handler = %GetHandler(obj);
- var fix = handler.fix;
- if (IS_UNDEFINED(fix)) {
- throw MakeTypeError("handler_trap_missing", [handler, "fix"]);
- }
- var props = %_CallFunction(handler, fix);
+ var props = CallTrap0(handler, "fix", void 0);
if (IS_UNDEFINED(props)) {
throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index b8ae218792..16579df620 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 4
-#define BUILD_NUMBER 14
+#define MINOR_VERSION 5
+#define BUILD_NUMBER 3
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 1a6efcbd61..56fbf9a339 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -230,68 +230,151 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument on the stack and returns its result in tos_:
+// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
+ Label patch;
+ const Register argument = rax;
const Register map = rdx;
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ if (!types_.IsEmpty()) {
+ __ movq(argument, Operand(rsp, 1 * kPointerSize));
+ }
// undefined -> false
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &false_result);
+ CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false, &patch);
// Boolean -> its value
- __ CompareRoot(rax, Heap::kFalseValueRootIndex);
- __ j(equal, &false_result);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, &true_result);
-
- // Smis: 0 -> false, all other -> true
- __ Cmp(rax, Smi::FromInt(0));
- __ j(equal, &false_result);
- __ JumpIfSmi(rax, &true_result);
+ CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false, &patch);
+ CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true, &patch);
// 'null' -> false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, &false_result, Label::kNear);
+ CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false, &patch);
+
+ if (types_.Contains(SMI)) {
+ // Smis: 0 -> false, all other -> true
+ Label not_smi;
+ __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
+ // argument contains the correct return value already
+ if (!tos_.is(argument)) {
+ __ movq(tos_, argument);
+ }
+ __ ret(1 * kPointerSize);
+ __ bind(&not_smi);
+ } else if (types_.NeedsMap()) {
+ // If we need a map later and have a Smi -> patch.
+ __ JumpIfSmi(argument, &patch, Label::kNear);
+ }
- // Get the map of the heap object.
- __ movq(map, FieldOperand(rax, HeapObject::kMapOffset));
+ if (types_.NeedsMap()) {
+ __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
- // Undetectable -> false.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result, Label::kNear);
+ // Everything with a map could be undetectable, so check this now.
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ // Undetectable -> false.
+ Label not_undetectable;
+ __ j(zero, &not_undetectable, Label::kNear);
+ __ Set(tos_, 0);
+ __ ret(1 * kPointerSize);
+ __ bind(&not_undetectable);
+ }
- // JavaScript object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, &true_result, Label::kNear);
+ if (types_.Contains(SPEC_OBJECT)) {
+ // spec object -> true.
+ Label not_js_object;
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, &not_js_object, Label::kNear);
+ __ Set(tos_, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&not_js_object);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // We've seen a spec object for the first time -> patch.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &patch, Label::kNear);
+ }
- // String value -> false iff empty.
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(rax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result, Label::kNear);
- __ jmp(&true_result, Label::kNear);
+ if (types_.Contains(STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
+ __ ret(1 * kPointerSize); // the string length is OK as the return value
+ __ bind(&not_string);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // We've seen a string for the first time -> patch
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(below, &patch, Label::kNear);
+ }
- __ bind(&not_string);
- // HeapNumber -> false iff +0, -0, or NaN.
- // These three cases set the zero flag when compared to zero using ucomisd.
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result, Label::kNear);
- // Fall through to |true_result|.
+ if (types_.Contains(HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number, false_result;
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
+ __ j(zero, &false_result, Label::kNear);
+ __ Set(tos_, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ Set(tos_, 0);
+ __ ret(1 * kPointerSize);
+ __ bind(&not_heap_number);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // We've seen a heap number for the first time -> patch
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &patch, Label::kNear);
+ }
- // Return 1/0 for true/false in tos_.
- __ bind(&true_result);
- __ Set(tos_, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
+ if (types_.Contains(INTERNAL_OBJECT)) {
+ // internal objects -> true
+ __ Set(tos_, 1);
+ __ ret(1 * kPointerSize);
+ }
+
+ if (!types_.IsAll()) {
+ __ bind(&patch);
+ GenerateTypeTransition(masm);
+ }
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+ Type type,
+ Heap::RootListIndex value,
+ bool result,
+ Label* patch) {
+ const Register argument = rax;
+ if (types_.Contains(type)) {
+ // If we see an expected oddball, return its ToBoolean value tos_.
+ Label different_value;
+ __ CompareRoot(argument, value);
+ __ j(not_equal, &different_value, Label::kNear);
+ __ Set(tos_, result ? 1 : 0);
+ __ ret(1 * kPointerSize);
+ __ bind(&different_value);
+ } else if (types_.Contains(INTERNAL_OBJECT)) {
+ // If we see an unexpected oddball and handle internal objects, we must
+ // patch because the code for internal objects doesn't handle it explictly.
+ __ CompareRoot(argument, value);
+ __ j(equal, patch);
+ }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(rcx); // Get return address, operand is now on top of stack.
+ __ Push(Smi::FromInt(tos_.code()));
+ __ Push(Smi::FromInt(types_.ToByte()));
+ __ push(rcx); // Push return address.
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+ 3,
+ 1);
}
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f8f2d6e687..507bbd44c3 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -132,6 +132,7 @@ ModuloFunction CreateModuloFunction() {
CodeDesc desc;
masm.GetCode(&desc);
+ OS::ProtectCode(buffer, actual_size);
// Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index e637ba124d..ae5045f0df 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -67,7 +67,8 @@ void CPU::FlushICache(void* start, size_t size) {
// solution is to run valgrind with --smc-check=all, but this comes at a big
// performance cost. We can notify valgrind to invalidate its cache.
#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
#endif
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 49361e4f34..ce6a9105f2 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -1402,39 +1402,119 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, false_label);
- __ Cmp(reg, Smi::FromInt(0));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
-
- // Test for double values. Plus/minus zero and NaN are false.
- Label call_stub;
- __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_stub, Label::kNear);
-
- // HeapNumber => false iff +0, -0, or NaN. These three cases set the
- // zero flag when compared to zero using ucomisd.
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
-
- // The conversion stub doesn't cause garbage collections so it's
- // safe to not record a safepoint after the call.
- __ bind(&call_stub);
- ToBooleanStub stub(rax);
- __ Pushad();
- __ push(reg);
- __ CallStub(&stub);
- __ testq(rax, rax);
- __ Popad();
- EmitBranch(true_block, false_block, not_zero);
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, false_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen undefined for the first time -> deopt.
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // true -> true.
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a boolean for the first time -> deopt.
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // false -> false.
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ j(equal, false_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a boolean for the first time -> deopt.
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ __ j(equal, false_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen null for the first time -> deopt.
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ Cmp(reg, Smi::FromInt(0));
+ __ j(equal, false_label);
+ __ JumpIfSmi(reg, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ testb(reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ const Register map = kScratchRegister;
+ if (expected.NeedsMap()) {
+ __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
+ // Everything with a map could be undetectable, so check this now.
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ // Undetectable -> false.
+ __ j(not_zero, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, true_label);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a spec object for the first time -> deopt.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ DeoptimizeIf(above_equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ j(not_zero, true_label);
+ __ jmp(false_label);
+ __ bind(&not_string);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a string for the first time -> deopt
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ j(zero, false_label);
+ __ jmp(true_label);
+ __ bind(&not_heap_number);
+ } else if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // We've seen a heap number for the first time -> deopt.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ if (expected.Contains(ToBooleanStub::INTERNAL_OBJECT)) {
+ // internal objects -> true
+ __ jmp(true_label);
+ } else {
+ // We've seen something for the first time -> deopt.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
}
}
@@ -2246,7 +2326,6 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
XMMRegister result(ToDoubleRegister(instr->result()));
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3101,14 +3180,11 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
void LCodeGen::DoStoreKeyedFastDoubleElement(
LStoreKeyedFastDoubleElement* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
Label have_value;
__ ucomisd(value, value);
__ j(parity_odd, &have_value); // NaN.
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
__ Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
__ movq(value, kScratchRegister);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 5b502a80c9..7f4490f6de 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1036,7 +1036,11 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
- return new LBranch(UseRegisterAtStart(v));
+ LInstruction* branch = new LBranch(UseRegister(v));
+ // When we handle all cases, we never deopt, so we don't need to assign the
+ // environment then.
+ bool all_cases_handled = instr->expected_input_types().IsAll();
+ return all_cases_handled ? branch : AssignEnvironment(branch);
}
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index b8e5f22ed5..e195aecc38 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -3752,10 +3752,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&smi_value);
// Value is a smi. convert to a double and store.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
+ // Preserve original value.
+ __ SmiToInteger32(rdx, rax);
+ __ push(rdx);
__ fild_s(Operand(rsp, 0));
- __ pop(rax);
+ __ pop(rdx);
__ SmiToInteger32(rcx, rcx);
__ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
__ ret(0);
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 51d852084d..ab9d40fec5 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -91,7 +91,7 @@ class BenchmarkTestConfiguration(test.TestConfiguration):
return [test]
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
pass
diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript
index b0a71664cc..a228ac1533 100644
--- a/deps/v8/test/cctest/SConscript
+++ b/deps/v8/test/cctest/SConscript
@@ -65,6 +65,7 @@ SOURCES = {
'test-debug.cc',
'test-decls.cc',
'test-deoptimization.cc',
+ 'test-dictionary.cc',
'test-diy-fp.cc',
'test-double.cc',
'test-dtoa.cc',
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 9cbcb9cfa4..07d12c899e 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -61,6 +61,7 @@
'test-debug.cc',
'test-decls.cc',
'test-deoptimization.cc',
+ 'test-dictionary.cc',
'test-diy-fp.cc',
'test-double.cc',
'test-dtoa.cc',
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index bc9a0e2ca2..dad5e1b803 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -3560,6 +3560,68 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
}
+static v8::Handle<Value> UnboxedDoubleIndexedPropertyGetter(
+ uint32_t index,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index < 25) {
+ return v8::Handle<Value>(v8_num(index));
+ }
+ return v8::Handle<Value>();
+}
+
+
+static v8::Handle<Value> UnboxedDoubleIndexedPropertySetter(
+ uint32_t index,
+ Local<Value> value,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index < 25) {
+ return v8::Handle<Value>(v8_num(index));
+ }
+ return v8::Handle<Value>();
+}
+
+
+Handle<v8::Array> UnboxedDoubleIndexedPropertyEnumerator(
+ const AccessorInfo& info) {
+ // Force the list of returned keys to be stored in a FastDoubleArray.
+ Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+ "keys = new Array(); keys[125000] = 1;"
+ "for(i = 0; i < 80000; i++) { keys[i] = i; };"
+ "keys.length = 25; keys;"));
+ Local<Value> result = indexed_property_names_script->Run();
+ return Local<v8::Array>(::v8::Array::Cast(*result));
+}
+
+
+// Make sure that the the interceptor code in the runtime properly handles
+// merging property name lists for double-array-backed arrays.
+THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(UnboxedDoubleIndexedPropertyGetter,
+ UnboxedDoubleIndexedPropertySetter,
+ 0,
+ 0,
+ UnboxedDoubleIndexedPropertyEnumerator);
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ // When obj is created, force it to be Stored in a FastDoubleArray.
+ Local<Script> create_unboxed_double_script = Script::Compile(v8_str(
+ "obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
+ "key_count = 0; "
+ "for (x in obj) {key_count++;};"
+ "obj;"));
+ Local<Value> result = create_unboxed_double_script->Run();
+ CHECK(result->ToObject()->HasRealIndexedProperty(2000));
+ Local<Script> key_count_check = Script::Compile(v8_str(
+ "key_count;"));
+ result = key_count_check->Run();
+ CHECK_EQ(v8_num(40013), result);
+}
+
+
static v8::Handle<Value> IdentityIndexedPropertyGetter(
uint32_t index,
const AccessorInfo& info) {
@@ -6993,53 +7055,34 @@ THREADED_TEST(SetPrototype) {
}
-THREADED_TEST(SetPrototypeProperties) {
+THREADED_TEST(FunctionReadOnlyPrototype) {
v8::HandleScope handle_scope;
LocalContext context;
Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
- t1->SetPrototypeAttributes(v8::DontDelete);
+ t1->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(42));
+ t1->ReadOnlyPrototype();
context->Global()->Set(v8_str("func1"), t1->GetFunction());
+ // Configured value of ReadOnly flag.
CHECK(CompileRun(
"(function() {"
" descriptor = Object.getOwnPropertyDescriptor(func1, 'prototype');"
- " return (descriptor['writable'] == true) &&"
- " (descriptor['enumerable'] == true) &&"
- " (descriptor['configurable'] == false);"
+ " return (descriptor['writable'] == false);"
"})()")->BooleanValue());
+ CHECK_EQ(42, CompileRun("func1.prototype.x")->Int32Value());
+ CHECK_EQ(42,
+ CompileRun("func1.prototype = {}; func1.prototype.x")->Int32Value());
Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
- t2->SetPrototypeAttributes(v8::DontEnum);
+ t2->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(42));
context->Global()->Set(v8_str("func2"), t2->GetFunction());
+ // Default value of ReadOnly flag.
CHECK(CompileRun(
"(function() {"
" descriptor = Object.getOwnPropertyDescriptor(func2, 'prototype');"
- " return (descriptor['writable'] == true) &&"
- " (descriptor['enumerable'] == false) &&"
- " (descriptor['configurable'] == true);"
- "})()")->BooleanValue());
-
- Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
- t3->SetPrototypeAttributes(v8::ReadOnly);
- context->Global()->Set(v8_str("func3"), t3->GetFunction());
- CHECK(CompileRun(
- "(function() {"
- " descriptor = Object.getOwnPropertyDescriptor(func3, 'prototype');"
- " return (descriptor['writable'] == false) &&"
- " (descriptor['enumerable'] == true) &&"
- " (descriptor['configurable'] == true);"
- "})()")->BooleanValue());
-
- Local<v8::FunctionTemplate> t4 = v8::FunctionTemplate::New();
- t4->SetPrototypeAttributes(v8::ReadOnly | v8::DontEnum | v8::DontDelete);
- context->Global()->Set(v8_str("func4"), t4->GetFunction());
- CHECK(CompileRun(
- "(function() {"
- " descriptor = Object.getOwnPropertyDescriptor(func4, 'prototype');"
- " return (descriptor['writable'] == false) &&"
- " (descriptor['enumerable'] == false) &&"
- " (descriptor['configurable'] == false);"
+ " return (descriptor['writable'] == true);"
"})()")->BooleanValue());
+ CHECK_EQ(42, CompileRun("func2.prototype.x")->Int32Value());
}
@@ -10620,17 +10663,16 @@ TEST(PreCompileInvalidPreparseDataError) {
*exception_value);
try_catch.Reset();
+
// Overwrite function bar's start position with 200. The function entry
- // will not be found when searching for it by position.
+ // will not be found when searching for it by position and we should fall
+ // back on eager compilation.
sd = v8::ScriptData::PreCompile(script, i::StrLength(script));
sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
compiled_script = Script::New(source, NULL, sd);
- CHECK(try_catch.HasCaught());
- String::AsciiValue second_exception_value(try_catch.Message()->Get());
- CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
- *second_exception_value);
+ CHECK(!try_catch.HasCaught());
delete sd;
}
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 786a54a117..2aa72078af 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -56,14 +56,3 @@ TEST(List) {
CHECK_EQ(0, list->length());
delete list;
}
-
-
-TEST(DeleteEmpty) {
- {
- List<int>* list = new List<int>(0);
- delete list;
- }
- {
- List<int> list(0);
- }
-}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 72907b6e9a..8f226f6cde 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -377,7 +377,7 @@ static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
while (pc < end) {
int num_const = d.ConstantPoolSizeAt(pc);
if (num_const >= 0) {
- pc += num_const * kPointerSize;
+ pc += (num_const + 1) * kPointerSize;
} else {
pc += d.InstructionDecode(decode_buffer, pc);
CHECK(strstr(decode_buffer.start(), "mov eax,0x178c29c") == NULL);
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
new file mode 100644
index 0000000000..15a854b363
--- /dev/null
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -0,0 +1,85 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "api.h"
+#include "debug.h"
+#include "execution.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "objects.h"
+#include "global-handles.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+TEST(ObjectHashTable) {
+ v8::HandleScope scope;
+ LocalContext context;
+ Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(23);
+ Handle<JSObject> a = FACTORY->NewJSArray(7);
+ Handle<JSObject> b = FACTORY->NewJSArray(11);
+ table = PutIntoObjectHashTable(table, a, b);
+ CHECK_EQ(table->NumberOfElements(), 1);
+ CHECK_EQ(table->Lookup(*a), *b);
+ CHECK_EQ(table->Lookup(*b), HEAP->undefined_value());
+
+ // Keys still have to be valid after objects were moved.
+ HEAP->CollectGarbage(NEW_SPACE);
+ CHECK_EQ(table->NumberOfElements(), 1);
+ CHECK_EQ(table->Lookup(*a), *b);
+ CHECK_EQ(table->Lookup(*b), HEAP->undefined_value());
+
+ // Keys that are overwritten should not change number of elements.
+ table = PutIntoObjectHashTable(table, a, FACTORY->NewJSArray(13));
+ CHECK_EQ(table->NumberOfElements(), 1);
+ CHECK_NE(table->Lookup(*a), *b);
+
+ // Keys mapped to undefined should be removed permanently.
+ table = PutIntoObjectHashTable(table, a, FACTORY->undefined_value());
+ CHECK_EQ(table->NumberOfElements(), 0);
+ CHECK_EQ(table->NumberOfDeletedElements(), 1);
+ CHECK_EQ(table->Lookup(*a), HEAP->undefined_value());
+
+ // Keys should map back to their respective values.
+ for (int i = 0; i < 100; i++) {
+ Handle<JSObject> key = FACTORY->NewJSArray(7);
+ Handle<JSObject> value = FACTORY->NewJSArray(11);
+ table = PutIntoObjectHashTable(table, key, value);
+ CHECK_EQ(table->NumberOfElements(), i + 1);
+ CHECK_NE(table->FindEntry(*key), ObjectHashTable::kNotFound);
+ CHECK_EQ(table->Lookup(*key), *value);
+ }
+
+ // Keys never added to the map should not be found.
+ for (int i = 0; i < 1000; i++) {
+ Handle<JSObject> o = FACTORY->NewJSArray(100);
+ CHECK_EQ(table->FindEntry(*o), ObjectHashTable::kNotFound);
+ CHECK_EQ(table->Lookup(*o), HEAP->undefined_value());
+ }
+}
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
index e20ee8a360..7520b05fcb 100644
--- a/deps/v8/test/cctest/test-list.cc
+++ b/deps/v8/test/cctest/test-list.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -138,3 +138,14 @@ TEST(Clear) {
list.Clear();
CHECK_EQ(0, list.length());
}
+
+
+TEST(DeleteEmpty) {
+ {
+ List<int>* list = new List<int>(0);
+ delete list;
+ }
+ {
+ List<int> list(0);
+ }
+}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 730d72a95b..46f8ad6cd7 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -459,7 +459,9 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
CHECK(root->IsString());
}
v8::HandleScope handle_scope;
- Handle<Object>root_handle(root);
+ Handle<Object> root_handle(root);
+
+ ReserveSpaceForPartialSnapshot(file_name);
Object* root2;
{
@@ -542,7 +544,9 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
CHECK(root->IsContext());
}
v8::HandleScope handle_scope;
- Handle<Object>root_handle(root);
+ Handle<Object> root_handle(root);
+
+ ReserveSpaceForPartialSnapshot(file_name);
Object* root2;
{
diff --git a/deps/v8/test/es5conform/testcfg.py b/deps/v8/test/es5conform/testcfg.py
index af74b8c520..b6a17d9b69 100644
--- a/deps/v8/test/es5conform/testcfg.py
+++ b/deps/v8/test/es5conform/testcfg.py
@@ -97,7 +97,7 @@ class ES5ConformTestConfiguration(test.TestConfiguration):
return tests
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'es5conform.status')
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index aabbfef9b0..af467e699e 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -125,7 +125,7 @@ class MessageTestConfiguration(test.TestConfiguration):
return result
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'message.status')
diff --git a/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js b/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js
new file mode 100644
index 0000000000..dca6d5bace
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js
@@ -0,0 +1,40 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// ARM's code generator for LBranch had a bug, swapping the true/false
+// branches when the representation of the condition is a double.
+
+function foo() {
+ return Math.sqrt(2.6415) ? 88 : 99;
+}
+
+assertEquals(88, foo());
+assertEquals(88, foo());
+%OptimizeFunctionOnNextCall(foo)
+assertEquals(88, foo());
diff --git a/deps/v8/test/mjsunit/function-names.js b/deps/v8/test/mjsunit/function-names.js
index c083f18f5d..5ed0b794e8 100644
--- a/deps/v8/test/mjsunit/function-names.js
+++ b/deps/v8/test/mjsunit/function-names.js
@@ -128,6 +128,6 @@ var globalFunctions = [
"encodeURI", "encodeURIComponent", "Error", "TypeError",
"RangeError", "SyntaxError", "ReferenceError", "EvalError",
"URIError", "isNaN", "isFinite", "parseInt", "parseFloat",
- "eval", "execScript"];
+ "eval"];
TestFunctionNames(this, globalFunctions);
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index 84641d589e..640033d9cc 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -42,22 +42,27 @@ function TestGet(handler) {
TestGet({
get: function(r, k) { return 42 }
})
+
TestGet({
get: function(r, k) { return this.get2(r, k) },
get2: function(r, k) { return 42 }
})
+
TestGet({
getPropertyDescriptor: function(k) { return {value: 42} }
})
+
TestGet({
getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
getPropertyDescriptor2: function(k) { return {value: 42} }
})
+
TestGet({
getPropertyDescriptor: function(k) {
return {get value() { return 42 }}
}
})
+
TestGet({
get: undefined,
getPropertyDescriptor: function(k) { return {value: 42} }
@@ -83,32 +88,38 @@ function TestGetCall(handler) {
TestGetCall({
get: function(r, k) { return function() { return 55 } }
})
+
TestGetCall({
get: function(r, k) { return this.get2(r, k) },
get2: function(r, k) { return function() { return 55 } }
})
+
TestGetCall({
getPropertyDescriptor: function(k) {
return {value: function() { return 55 }}
}
})
+
TestGetCall({
getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
getPropertyDescriptor2: function(k) {
return {value: function() { return 55 }}
}
})
+
TestGetCall({
getPropertyDescriptor: function(k) {
return {get value() { return function() { return 55 } }}
}
})
+
TestGetCall({
get: undefined,
getPropertyDescriptor: function(k) {
return {value: function() { return 55 }}
}
})
+
TestGetCall({
get: function(r, k) {
if (k == "gg") {
@@ -146,14 +157,17 @@ function TestSet(handler) {
TestSet({
set: function(r, k, v) { key = k; val = v; return true }
})
+
TestSet({
set: function(r, k, v) { return this.set2(r, k, v) },
set2: function(r, k, v) { key = k; val = v; return true }
})
+
TestSet({
getOwnPropertyDescriptor: function(k) { return {writable: true} },
defineProperty: function(k, desc) { key = k; val = desc.value }
})
+
TestSet({
getOwnPropertyDescriptor: function(k) {
return this.getOwnPropertyDescriptor2(k)
@@ -162,22 +176,26 @@ TestSet({
defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
defineProperty2: function(k, desc) { key = k; val = desc.value }
})
+
TestSet({
getOwnPropertyDescriptor: function(k) {
return {get writable() { return true }}
},
defineProperty: function(k, desc) { key = k; val = desc.value }
})
+
TestSet({
getOwnPropertyDescriptor: function(k) {
return {set: function(v) { key = k; val = v }}
}
})
+
TestSet({
getOwnPropertyDescriptor: function(k) { return null },
getPropertyDescriptor: function(k) { return {writable: true} },
defineProperty: function(k, desc) { key = k; val = desc.value }
})
+
TestSet({
getOwnPropertyDescriptor: function(k) { return null },
getPropertyDescriptor: function(k) {
@@ -185,12 +203,14 @@ TestSet({
},
defineProperty: function(k, desc) { key = k; val = desc.value }
})
+
TestSet({
getOwnPropertyDescriptor: function(k) { return null },
getPropertyDescriptor: function(k) {
return {set: function(v) { key = k; val = v }}
}
})
+
TestSet({
getOwnPropertyDescriptor: function(k) { return null },
getPropertyDescriptor: function(k) { return null },
@@ -279,10 +299,12 @@ function TestDefine(handler) {
TestDefine({
defineProperty: function(k, d) { key = k; desc = d; return true }
})
+
TestDefine({
defineProperty: function(k, d) { return this.defineProperty2(k, d) },
defineProperty2: function(k, d) { key = k; desc = d; return true }
})
+
TestDefine(Proxy.create({
get: function(pr, pk) {
return function(k, d) { key = k; desc = d; return true }
@@ -323,10 +345,12 @@ function TestDelete(handler) {
TestDelete({
'delete': function(k) { key = k; return k < "z" }
})
+
TestDelete({
'delete': function(k) { return this.delete2(k) },
delete2: function(k) { key = k; return k < "z" }
})
+
TestDelete(Proxy.create({
get: function(pr, pk) {
return function(k) { key = k; return k < "z" }
@@ -363,6 +387,7 @@ TestDescriptor({
defineProperty: function(k, d) { this["__" + k] = d; return true },
getOwnPropertyDescriptor: function(k) { return this["__" + k] }
})
+
TestDescriptor({
defineProperty: function(k, d) { this["__" + k] = d; return true },
getOwnPropertyDescriptor: function(k) {
@@ -404,7 +429,7 @@ assertTrue("object" == typeof Proxy.create({}))
-// Element (in).
+// Membership test (in).
var key
function TestIn(handler) {
@@ -442,26 +467,31 @@ function TestIn(handler) {
TestIn({
has: function(k) { key = k; return k < "z" }
})
+
TestIn({
has: function(k) { return this.has2(k) },
has2: function(k) { key = k; return k < "z" }
})
+
TestIn({
getPropertyDescriptor: function(k) {
key = k; return k < "z" ? {value: 42} : void 0
}
})
+
TestIn({
getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
getPropertyDescriptor2: function(k) {
key = k; return k < "z" ? {value: 42} : void 0
}
})
+
TestIn({
getPropertyDescriptor: function(k) {
key = k; return k < "z" ? {get value() { return 42 }} : void 0
}
})
+
TestIn({
get: undefined,
getPropertyDescriptor: function(k) {
@@ -477,7 +507,65 @@ TestIn(Proxy.create({
-// Instanceof (instanceof).
+// Own Properties (Object.prototype.hasOwnProperty).
+
+var key
+function TestHasOwn(handler) {
+ var o = Proxy.create(handler)
+ assertTrue(Object.prototype.hasOwnProperty.call(o, "a"))
+ assertEquals("a", key)
+ assertTrue(Object.prototype.hasOwnProperty.call(o, 99))
+ assertEquals("99", key)
+ assertFalse(Object.prototype.hasOwnProperty.call(o, "z"))
+ assertEquals("z", key)
+}
+
+TestHasOwn({
+ hasOwn: function(k) { key = k; return k < "z" }
+})
+
+TestHasOwn({
+ hasOwn: function(k) { return this.hasOwn2(k) },
+ hasOwn2: function(k) { key = k; return k < "z" }
+})
+
+TestHasOwn({
+ getOwnPropertyDescriptor: function(k) {
+ key = k; return k < "z" ? {value: 42} : void 0
+ }
+})
+
+TestHasOwn({
+ getOwnPropertyDescriptor: function(k) {
+ return this.getOwnPropertyDescriptor2(k)
+ },
+ getOwnPropertyDescriptor2: function(k) {
+ key = k; return k < "z" ? {value: 42} : void 0
+ }
+})
+
+TestHasOwn({
+ getOwnPropertyDescriptor: function(k) {
+ key = k; return k < "z" ? {get value() { return 42 }} : void 0
+ }
+})
+
+TestHasOwn({
+ hasOwn: undefined,
+ getOwnPropertyDescriptor: function(k) {
+ key = k; return k < "z" ? {value: 42} : void 0
+ }
+})
+
+TestHasOwn(Proxy.create({
+ get: function(pr, pk) {
+ return function(k) { key = k; return k < "z" }
+ }
+}))
+
+
+
+// Instanceof (instanceof)
function TestInstanceof() {
var o = {}
@@ -514,7 +602,7 @@ TestInstanceof()
-// Prototype (Object.getPrototypeOf).
+// Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
function TestPrototype() {
var o = {}
@@ -528,6 +616,32 @@ function TestPrototype() {
assertSame(Object.getPrototypeOf(p2), o)
assertSame(Object.getPrototypeOf(p3), p2)
assertSame(Object.getPrototypeOf(p4), null)
+
+ assertTrue(Object.prototype.isPrototypeOf(o))
+ assertFalse(Object.prototype.isPrototypeOf(p1))
+ assertTrue(Object.prototype.isPrototypeOf(p2))
+ assertTrue(Object.prototype.isPrototypeOf(p3))
+ assertFalse(Object.prototype.isPrototypeOf(p4))
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o))
+ assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
+ assertFalse(Object.prototype.isPrototypeOf.call(o, o))
+ assertFalse(Object.prototype.isPrototypeOf.call(o, p1))
+ assertTrue(Object.prototype.isPrototypeOf.call(o, p2))
+ assertTrue(Object.prototype.isPrototypeOf.call(o, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(o, p4))
+ assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
+ assertFalse(Object.prototype.isPrototypeOf.call(p1, o))
+ assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
+ assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
+ assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
+ assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
+ assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
+ assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
}
TestPrototype()
@@ -544,13 +658,16 @@ function TestPropertyNames(names, handler) {
TestPropertyNames([], {
getOwnPropertyNames: function() { return [] }
})
+
TestPropertyNames(["a", "zz", " ", "0"], {
getOwnPropertyNames: function() { return ["a", "zz", " ", 0] }
})
+
TestPropertyNames(["throw", "function "], {
getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
getOwnPropertyNames2: function() { return ["throw", "function "] }
})
+
TestPropertyNames(["[object Object]"], {
get getOwnPropertyNames() {
return function() { return [{}] }
@@ -566,22 +683,27 @@ function TestKeys(names, handler) {
TestKeys([], {
keys: function() { return [] }
})
+
TestKeys(["a", "zz", " ", "0"], {
keys: function() { return ["a", "zz", " ", 0] }
})
+
TestKeys(["throw", "function "], {
keys: function() { return this.keys2() },
keys2: function() { return ["throw", "function "] }
})
+
TestKeys(["[object Object]"], {
get keys() {
return function() { return [{}] }
}
})
+
TestKeys(["a", "0"], {
getOwnPropertyNames: function() { return ["a", 23, "zz", "", 0] },
getOwnPropertyDescriptor: function(k) { return {enumerable: k.length == 1} }
})
+
TestKeys(["23", "zz", ""], {
getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
getOwnPropertyNames2: function() { return ["a", 23, "zz", "", 0] },
@@ -590,6 +712,7 @@ TestKeys(["23", "zz", ""], {
},
getOwnPropertyDescriptor2: function(k) { return {enumerable: k.length != 1} }
})
+
TestKeys(["a", "b", "c", "5"], {
get getOwnPropertyNames() {
return function() { return ["0", 4, "a", "b", "c", 5] }
@@ -598,6 +721,7 @@ TestKeys(["a", "b", "c", "5"], {
return function(k) { return {enumerable: k >= "44"} }
}
})
+
TestKeys([], {
get getOwnPropertyNames() {
return function() { return ["a", "b", "c"] }
@@ -661,6 +785,7 @@ function TestFix(names, handler) {
TestFix([], {
fix: function() { return {} }
})
+
TestFix(["a", "b", "c", "d", "zz"], {
fix: function() {
return {
@@ -672,12 +797,14 @@ TestFix(["a", "b", "c", "d", "zz"], {
}
}
})
+
TestFix(["a"], {
fix: function() { return this.fix2() },
fix2: function() {
return {a: {value: 4, writable: true, configurable: true, enumerable: true}}
}
})
+
TestFix(["b"], {
get fix() {
return function() {
@@ -685,3 +812,87 @@ TestFix(["b"], {
}
}
})
+
+
+
+// String conversion (Object.prototype.toString, Object.prototype.toLocaleString)
+
+var key
+function TestToString(handler) {
+ var o = Proxy.create(handler)
+ key = ""
+ assertEquals("[object Object]", Object.prototype.toString.call(o))
+ assertEquals("", key)
+ assertEquals("my_proxy", Object.prototype.toLocaleString.call(o))
+ assertEquals("toString", key)
+}
+
+TestToString({
+ get: function(r, k) { key = k; return function() { return "my_proxy" } }
+})
+
+TestToString({
+ get: function(r, k) { return this.get2(r, k) },
+ get2: function(r, k) { key = k; return function() { return "my_proxy" } }
+})
+
+TestToString(Proxy.create({
+ get: function(pr, pk) {
+ return function(r, k) { key = k; return function() { return "my_proxy" } }
+ }
+}))
+
+
+
+// Value conversion (Object.prototype.toValue)
+
+function TestValueOf(handler) {
+ var o = Proxy.create(handler)
+ assertSame(o, Object.prototype.valueOf.call(o))
+}
+
+TestValueOf({})
+
+
+
+// Enumerability (Object.prototype.propertyIsEnumerable)
+
+var key
+function TestIsEnumerable(handler) {
+ var o = Proxy.create(handler)
+ assertTrue(Object.prototype.propertyIsEnumerable.call(o, "a"))
+ assertEquals("a", key)
+ assertTrue(Object.prototype.propertyIsEnumerable.call(o, 2))
+ assertEquals("2", key)
+ assertFalse(Object.prototype.propertyIsEnumerable.call(o, "z"))
+ assertEquals("z", key)
+}
+
+TestIsEnumerable({
+ getOwnPropertyDescriptor: function(k) {
+ key = k; return {enumerable: k < "z", configurable: true}
+ },
+})
+
+TestIsEnumerable({
+ getOwnPropertyDescriptor: function(k) {
+ return this.getOwnPropertyDescriptor2(k)
+ },
+ getOwnPropertyDescriptor2: function(k) {
+ key = k; return {enumerable: k < "z", configurable: true}
+ },
+})
+
+TestIsEnumerable({
+ getOwnPropertyDescriptor: function(k) {
+ key = k; return {get enumerable() { return k < "z" }, configurable: true}
+ },
+})
+
+TestIsEnumerable(Proxy.create({
+ get: function(pr, pk) {
+ return function(k) {
+ key = k; return {enumerable: k < "z", configurable: true}
+ }
+ }
+}))
diff --git a/deps/v8/test/mjsunit/math-floor.js b/deps/v8/test/mjsunit/math-floor.js
index 11f4cd789c..c2992031f6 100644
--- a/deps/v8/test/mjsunit/math-floor.js
+++ b/deps/v8/test/mjsunit/math-floor.js
@@ -51,6 +51,17 @@ function test() {
testFloor(-Infinity, -Infinity);
testFloor(NaN, NaN);
+ // Ensure that a negative zero coming from Math.floor is properly handled
+ // by other operations.
+ function ifloor(x) {
+ return 1 / Math.floor(x);
+ }
+ assertEquals(-Infinity, ifloor(-0));
+ assertEquals(-Infinity, ifloor(-0));
+ assertEquals(-Infinity, ifloor(-0));
+ %OptimizeFunctionOnNextCall(ifloor);
+ assertEquals(-Infinity, ifloor(-0));
+
testFloor(0, 0.1);
testFloor(0, 0.49999999999999994);
testFloor(0, 0.5);
@@ -129,3 +140,19 @@ function test() {
for (var i = 0; i < 500; i++) {
test();
}
+
+
+// Regression test for a bug where a negative zero coming from Math.floor
+// was not properly handled by other operations.
+function floorsum(i, n) {
+ var ret = Math.floor(n);
+ while (--i > 0) {
+ ret += Math.floor(n);
+ }
+ return ret;
+}
+assertEquals(-0, floorsum(1, -0));
+%OptimizeFunctionOnNextCall(floorsum);
+// The optimized function will deopt. Run it with enough iterations to try
+// to optimize via OSR (triggering the bug).
+assertEquals(-0, floorsum(100000, -0));
diff --git a/deps/v8/test/mjsunit/math-round.js b/deps/v8/test/mjsunit/math-round.js
index 1366557f6b..973040d5c0 100644
--- a/deps/v8/test/mjsunit/math-round.js
+++ b/deps/v8/test/mjsunit/math-round.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,6 +44,21 @@ testRound(Infinity, Infinity);
testRound(-Infinity, -Infinity);
testRound(NaN, NaN);
+// Regression test for a bug where a negative zero coming from Math.round
+// was not properly handled by other operations.
+function roundsum(i, n) {
+ var ret = Math.round(n);
+ while (--i > 0) {
+ ret += Math.round(n);
+ }
+ return ret;
+}
+assertEquals(-0, roundsum(1, -0));
+%OptimizeFunctionOnNextCall(roundsum);
+// The optimized function will deopt. Run it with enough iterations to try
+// to optimize via OSR (triggering the bug).
+assertEquals(-0, roundsum(100000, -0));
+
testRound(1, 0.5);
testRound(1, 0.7);
testRound(1, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-1341167.js b/deps/v8/test/mjsunit/regress/regress-1563.js
index 194a7b886a..c25b6c7f63 100644
--- a/deps/v8/test/mjsunit/regress/regress-1341167.js
+++ b/deps/v8/test/mjsunit/regress/regress-1563.js
@@ -25,9 +25,20 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Make sure that 'this' is bound to the global object when using
-// execScript.
+// Flags: --allow-natives-syntax
-var result;
-execScript("result = this");
-assertTrue(result === this);
+obj = new PixelArray(10);
+
+// Test that undefined gets properly clamped in Crankshafted pixel array
+// assignments.
+function set_pixel(obj, arg) {
+ obj[0] = arg;
+}
+
+set_pixel(obj, 1.5);
+set_pixel(obj, NaN);
+%OptimizeFunctionOnNextCall(set_pixel);
+set_pixel(obj, undefined);
+set_pixel(obj, undefined);
+
+assertEquals(0, obj[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-1582.js b/deps/v8/test/mjsunit/regress/regress-1582.js
new file mode 100644
index 0000000000..346d68ac34
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1582.js
@@ -0,0 +1,47 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(restIsArray, rest) {
+ var arr;
+ if (typeof rest === "object" && (rest instanceof Array)) {
+ arr = rest;
+ } else {
+ arr = arguments;
+ }
+ var i = arr.length;
+ while (--i >= 0) arr[i];
+ var arrIsArguments = (arr[1] !== rest);
+ assertEquals(restIsArray, arrIsArguments);
+}
+
+f(false, 'b', 'c');
+f(false, 'b', 'c');
+f(false, 'b', 'c');
+%OptimizeFunctionOnNextCall(f);
+f(true, ['b', 'c']);
diff --git a/deps/v8/test/mjsunit/regress/regress-91008.js b/deps/v8/test/mjsunit/regress/regress-91008.js
new file mode 100644
index 0000000000..d7ea2df9ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-91008.js
@@ -0,0 +1,43 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function testsort(n) {
+ var numbers=new Array(n);
+ for (var i=0;i<n;i++) numbers[i]=i;
+ delete numbers[50];
+ delete numbers[150];
+ delete numbers[25000];
+ delete numbers[n-1];
+ delete numbers[n-2];
+ delete numbers[30];
+ delete numbers[2];
+ delete numbers[1];
+ delete numbers[0];
+ numbers.sort();
+}
+
+testsort(100000)
diff --git a/deps/v8/test/mjsunit/execScript-case-insensitive.js b/deps/v8/test/mjsunit/regress/regress-91010.js
index 468d65747e..a0779999bd 100644
--- a/deps/v8/test/mjsunit/execScript-case-insensitive.js
+++ b/deps/v8/test/mjsunit/regress/regress-91010.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,10 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-var x = 0;
-execScript('x = 1', 'javascript');
-assertEquals(1, x);
-
-execScript('x = 2', 'JavaScript');
-assertEquals(2, x);
-
+try {
+ try {
+ var N = 100*1000;
+ var array = Array(N);
+ for (var i = 0; i != N; ++i)
+ array[i] = i;
+ } catch(ex) {}
+ array.unshift('Kibo');
+} catch(ex) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-91013.js b/deps/v8/test/mjsunit/regress/regress-91013.js
new file mode 100644
index 0000000000..c61e2b1eee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-91013.js
@@ -0,0 +1,51 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that KeyedStore stub for unboxed double arrays backing store
+// correctly returns stored value as the result.
+
+// Flags: --allow-natives-syntax --unbox-double-arrays
+
+// Create array with unboxed double array backing store.
+var i = 100000;
+var a = new Array(i);
+for (var j = 0; j < i; j++) {
+ a[j] = 0.5;
+}
+
+assertTrue(%HasFastDoubleElements(a));
+
+// Store some smis into it.
+for (var j = 0; j < 10; j++) {
+ assertEquals(j, a[j] = j);
+}
+
+// Store some heap numbers into it.
+for (var j = 0; j < 10; j++) {
+ var v = j + 0.5;
+ assertEquals(v, a[j] = v);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-91120.js b/deps/v8/test/mjsunit/regress/regress-91120.js
new file mode 100644
index 0000000000..117acac6cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-91120.js
@@ -0,0 +1,48 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// We intend that the function declaration for g inside catch is hoisted to
+// function f's scope. Invoke it before try/catch, in the try block, in the
+// catch block, after try/catch, and outside f, and verify that it has
+// access to the proper binding of x.
+var x = 'global';
+
+function f() {
+ var x = 'function';
+ assertEquals('function', g());
+ try {
+ assertEquals('function', g());
+ throw 'catch';
+ } catch (x) {
+ function g() { return x; }
+ assertEquals('function', g());
+ }
+ assertEquals('function', g());
+ return g;
+}
+
+assertEquals('function', f()());
diff --git a/deps/v8/test/mjsunit/scope-calls-eval.js b/deps/v8/test/mjsunit/scope-calls-eval.js
new file mode 100644
index 0000000000..4a941aaa94
--- /dev/null
+++ b/deps/v8/test/mjsunit/scope-calls-eval.js
@@ -0,0 +1,65 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests if the information about eval calls in a function is
+// propagated correctly through catch and with blocks.
+
+
+function f1() {
+ var x = 5;
+ function g() {
+ try {
+ throw '';
+ } catch (e) {
+ eval('var x = 3;');
+ }
+ try {
+ throw '';
+ } catch (e) {
+ return x;
+ }
+ }
+ return g();
+}
+
+
+function f2() {
+ var x = 5;
+ function g() {
+ with ({e:42}) {
+ eval('var x = 3;');
+ }
+ with ({e:42}) {
+ return x;
+ }
+ }
+ return g();
+}
+
+
+assertEquals(3, f1());
+assertEquals(3, f2());
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 7c6311b1a7..87ed4fae8f 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -145,7 +145,7 @@ class MjsunitTestConfiguration(test.TestConfiguration):
return result
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'mjsunit.status')
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index 351765eec8..feecaec8f0 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -29,12 +29,12 @@
// Flags: --allow-natives-syntax --unbox-double-arrays --expose-gc
var large_array_size = 100000;
-var approx_dict_to_elements_threshold = 75000;
+var approx_dict_to_elements_threshold = 70000;
var name = 0;
function expected_array_value(i) {
- if ((i % 2) == 0) {
+ if ((i % 50) != 0) {
return i;
} else {
return i + 0.5;
@@ -466,3 +466,62 @@ test_for_in();
test_for_in();
test_for_in();
test_for_in();
+
+function test_get_property_names() {
+ names = %GetPropertyNames(large_array3);
+ property_name_count = 0;
+ for (x in names) { property_name_count++; };
+ assertEquals(26, property_name_count);
+}
+
+test_get_property_names();
+test_get_property_names();
+test_get_property_names();
+
+// Test elements getters.
+assertEquals(expected_array_value(10), large_array3[10]);
+assertEquals(expected_array_value(-NaN), large_array3[2]);
+large_array3.__defineGetter__("2", function(){
+ return expected_array_value(10);
+});
+
+function test_getter() {
+ assertEquals(expected_array_value(10), large_array3[10]);
+ assertEquals(expected_array_value(10), large_array3[2]);
+}
+
+test_getter();
+test_getter();
+test_getter();
+%OptimizeFunctionOnNextCall(test_getter);
+test_getter();
+test_getter();
+test_getter();
+
+// Test element setters.
+large_array4 = new Array(large_array_size);
+force_to_fast_double_array(large_array4);
+
+var setter_called = false;
+
+assertEquals(expected_array_value(10), large_array4[10]);
+assertEquals(expected_array_value(2), large_array4[2]);
+large_array4.__defineSetter__("10", function(value){
+ setter_called = true;
+ });
+
+function test_setter() {
+ setter_called = false;
+ large_array4[10] = 119;
+ assertTrue(setter_called);
+ assertEquals(undefined, large_array4[10]);
+ assertEquals(expected_array_value(2), large_array4[2]);
+}
+
+test_setter();
+test_setter();
+test_setter();
+%OptimizeFunctionOnNextCall(test_setter);
+test_setter();
+test_setter();
+test_setter();
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 3728f7900b..587781d11f 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -125,7 +125,7 @@ class MozillaTestConfiguration(test.TestConfiguration):
return tests
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'mozilla.status')
diff --git a/deps/v8/test/sputnik/testcfg.py b/deps/v8/test/sputnik/testcfg.py
index c9eb4f2150..1032c134f6 100644
--- a/deps/v8/test/sputnik/testcfg.py
+++ b/deps/v8/test/sputnik/testcfg.py
@@ -101,7 +101,7 @@ class SputnikTestConfiguration(test.TestConfiguration):
return result
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'sputnik.status')
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index aa1212e48f..9482046034 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -111,7 +111,7 @@ class Test262TestConfiguration(test.TestConfiguration):
return tests
def GetBuildRequirements(self):
- return ['sample', 'sample=shell']
+ return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'test262.status')
diff --git a/deps/v8/tools/oom_dump/README b/deps/v8/tools/oom_dump/README
index 0be75116a0..1d840b9a9c 100644
--- a/deps/v8/tools/oom_dump/README
+++ b/deps/v8/tools/oom_dump/README
@@ -16,7 +16,9 @@ put a soft link into /usr/lib directory).
Next step is to build v8. Note: you should build x64 version of v8,
if you're on 64-bit platform, otherwise you would get a link error when
-building oom_dump.
+building oom_dump. Also, if you are testing against an older version of chrome
+you should build the corresponding version of V8 to make sure that the type-id
+enum have the correct values.
The last step is to build oom_dump itself. The following command should work:
diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py
index ec21ffed94..613261447d 100755
--- a/deps/v8/tools/test.py
+++ b/deps/v8/tools/test.py
@@ -1208,7 +1208,7 @@ def BuildOptions():
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
- result.add_option("--shell", help="Path to V8 shell", default="shell")
+ result.add_option("--shell", help="Path to V8 shell", default="d8")
result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",