summaryrefslogtreecommitdiff
path: root/deps/v8/src/ic
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2015-01-07 18:38:38 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2015-01-07 22:11:18 +0100
commitdad73f645cde6920e79db956e7ef82ed640d7615 (patch)
tree7ba3f3fc7e0722c5f130065461b7c56f571af383 /deps/v8/src/ic
parent53ba494537259b18b346dc6150d6a100c557e08f (diff)
downloadandroid-node-v8-dad73f645cde6920e79db956e7ef82ed640d7615.tar.gz
android-node-v8-dad73f645cde6920e79db956e7ef82ed640d7615.tar.bz2
android-node-v8-dad73f645cde6920e79db956e7ef82ed640d7615.zip
deps: upgrade v8 to 3.31.74.1
PR-URL: https://github.com/iojs/io.js/pull/243 Reviewed-By: Fedor Indutny <fedor@indutny.com> Reviewed-By: Trevor Norris <trev.norris@gmail.com>
Diffstat (limited to 'deps/v8/src/ic')
-rw-r--r--deps/v8/src/ic/access-compiler.h13
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc120
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc44
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc19
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc42
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc113
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc41
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc18
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc32
-rw-r--r--deps/v8/src/ic/handler-compiler.cc96
-rw-r--r--deps/v8/src/ic/handler-compiler.h26
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc133
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc21
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc55
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc39
-rw-r--r--deps/v8/src/ic/ic-compiler.cc14
-rw-r--r--deps/v8/src/ic/ic-inl.h9
-rw-r--r--deps/v8/src/ic/ic-state.cc13
-rw-r--r--deps/v8/src/ic/ic-state.h4
-rw-r--r--deps/v8/src/ic/ic.cc428
-rw-r--r--deps/v8/src/ic/ic.h86
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc117
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc25
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc43
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc42
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc116
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc25
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc43
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc42
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc46
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc698
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc130
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc1047
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc191
-rw-r--r--deps/v8/src/ic/stub-cache.h8
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc121
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc20
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc76
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc32
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc133
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc19
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc55
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc40
43 files changed, 3731 insertions, 704 deletions
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index 928b70b749..fe4369a1ad 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -40,7 +40,10 @@ class PropertyAccessCompiler BASE_EMBEDDED {
kind_(kind),
cache_holder_(cache_holder),
isolate_(isolate),
- masm_(isolate, NULL, 256) {}
+ masm_(isolate, NULL, 256) {
+ // TODO(yangguo): remove this once we can serialize IC stubs.
+ masm_.enable_serializer();
+ }
Code::Kind kind() const { return kind_; }
CacheHolderFlag cache_holder() const { return cache_holder_; }
@@ -51,6 +54,14 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register receiver() const { return registers_[0]; }
Register name() const { return registers_[1]; }
+ Register slot() const {
+ DCHECK(FLAG_vector_ics);
+ return VectorLoadICDescriptor::SlotRegister();
+ }
+ Register vector() const {
+ DCHECK(FLAG_vector_ics);
+ return VectorLoadICDescriptor::VectorRegister();
+ }
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
Register scratch3() const { return registers_[4]; }
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index b29e78677f..9905e4eb7d 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -92,6 +92,28 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ push(vector);
+ __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ pop(slot);
+ __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -140,26 +162,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ldr(scratch, MemOperand(cp, offset));
- __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
- __ Move(ip, function);
- __ cmp(ip, scratch);
- __ b(ne, miss);
-
+ __ ldr(result, MemOperand(cp, offset));
+ __ ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ldr(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
+ __ ldr(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+ __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
@@ -326,18 +338,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Operand(name));
- __ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ ldr(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ tst(scratch, Operand(Map::Deprecated::kMask));
+ __ b(ne, miss);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ Move(scratch1(), handle(constant, isolate()));
- __ cmp(value_reg, scratch1());
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ ldr(scratch,
+ FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ cmp(value_reg, scratch);
__ b(ne, miss_label);
}
@@ -416,11 +448,11 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- } else {
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ b(ne, miss);
}
// Check access rights to the global object. This has to happen after
@@ -438,17 +470,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- if (load_prototype_from_map) {
- __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ mov(reg, Operand(prototype));
- }
+ __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -461,7 +483,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ b(ne, miss);
}
// Perform security check for access to the global object.
@@ -481,6 +506,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -579,6 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
@@ -596,6 +626,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
@@ -625,7 +656,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ push(receiver()); // receiver
__ push(holder_reg);
@@ -666,11 +697,15 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ mov(result, Operand(cell));
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -682,6 +717,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret();
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 52aafca7c1..70a5d84e93 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -265,18 +265,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static const Register LoadIC_TempRegister() { return r3; }
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+
+ __ Push(receiver, name, slot, vector);
+ } else {
+ __ Push(receiver, name);
+ }
+}
+
+
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
- __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -403,15 +420,18 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -804,8 +824,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, r3, r4, r5, r6);
// Cache miss.
__ b(&miss);
@@ -866,8 +886,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index 7bef56e94d..b44702ba3d 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -41,9 +41,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -72,8 +75,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ mov(ip, Operand(map));
- __ cmp(map_reg, ip);
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
@@ -100,16 +103,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(scratch1(), ip);
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
} else {
Label next_map;
__ b(ne, &next_map);
- __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
index bc8b0fba84..1d6bd30b76 100644
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -94,10 +96,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -109,15 +112,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check scratch, extra and extra2 registers are valid.
DCHECK(!scratch.is(no_reg));
@@ -125,6 +120,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
@@ -147,8 +153,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ and_(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -157,8 +163,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 8cef505caa..1c28bf51a2 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -15,6 +15,27 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector);
+ __ Push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(slot);
+ __ Pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ Drop(2);
+}
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -57,24 +78,15 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
- __ Ldr(scratch, GlobalObjectMemOperand());
- __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ Ldr(scratch, ContextMemOperand(scratch, index));
- __ Cmp(scratch, Operand(function));
- __ B(ne, miss);
-
+ MacroAssembler* masm, int index, Register result, Label* miss) {
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ Ldr(result, ContextMemOperand(result, index));
// Load its initial map. The global functions all have initial maps.
- __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ __ Ldr(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+ __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
@@ -315,11 +327,15 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ Mov(result, Operand(cell));
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -329,6 +345,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret();
FrontendFooter(name, &miss);
@@ -370,18 +389,37 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ Mov(this->name(), Operand(name));
- __ Mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ Ldrsw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, miss);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ LoadObject(scratch1(), handle(constant, isolate()));
- __ Cmp(value_reg, scratch1());
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ Cmp(value_reg, scratch);
__ B(ne, miss_label);
}
@@ -457,17 +495,13 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
__ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
Register map_reg = scratch1;
__ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ B(ne, miss);
}
// Check access rights to the global object. This has to happen after
@@ -486,11 +520,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
- if (load_prototype_from_map) {
- __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ Mov(reg, Operand(prototype));
- }
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -504,7 +534,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Check the holder map.
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ B(ne, miss);
}
// Perform security check for access to the global object.
@@ -525,6 +558,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ B(&success);
__ Bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ Bind(&success);
@@ -637,6 +674,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
@@ -653,6 +691,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ Ret();
__ Bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(this->name(), holder_reg, receiver());
} else {
@@ -683,7 +722,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
// Stub never generated for non-global objects that require access checks.
DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 4804a23be7..a01015c186 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -354,12 +354,23 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ASM_LOCATION("LoadIC::GenerateMiss");
- __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(x4, x5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
// Perform tail call to the entry.
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ if (FLAG_vector_ics) {
+ __ Push(VectorLoadICDescriptor::ReceiverRegister(),
+ VectorLoadICDescriptor::NameRegister(),
+ VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister());
+ } else {
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ }
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -420,15 +431,25 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(x10, x11, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ if (FLAG_vector_ics) {
+ __ Push(VectorLoadICDescriptor::ReceiverRegister(),
+ VectorLoadICDescriptor::NameRegister(),
+ VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister());
+ } else {
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ }
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -837,8 +858,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, x3, x4, x5, x6);
// Cache miss.
__ B(&miss);
@@ -897,8 +918,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, x3, x4, x5, x6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index ffc1069f23..a3d0d481fe 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -42,9 +42,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -71,8 +74,9 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
Label try_next;
- __ Cmp(map_reg, Operand(map));
__ B(ne, &try_next);
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
@@ -104,16 +108,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; i++) {
- __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
Label skip;
__ B(&skip, ne);
if (!transitioned_maps->at(i).is_null()) {
// This argument is used by the handler stub. For example, see
// ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
}
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ Bind(&skip);
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
index 4d31d49882..a9c56a31e7 100644
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -23,7 +25,7 @@ namespace internal {
//
// 'receiver', 'name' and 'offset' registers are preserved on miss.
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
Register offset, Register scratch, Register scratch2,
Register scratch3) {
@@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -108,6 +111,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
@@ -125,8 +139,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
CountTrailingZeros(kPrimaryTableSize, 64));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary table.
__ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -134,8 +148,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ And(scratch, scratch, kSecondaryTableSize - 1);
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 7f440c07ca..ae977c3915 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -129,11 +129,17 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
}
-Register PropertyHandlerCompiler::Frontend(Register object_reg,
- Handle<Name> name) {
+Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
Label miss;
- Register reg = FrontendHeader(object_reg, name, &miss);
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
+ Register reg = FrontendHeader(receiver(), name, &miss);
FrontendFooter(name, &miss);
+ // The footer consumes the vector and slot from the stack if miss occurs.
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
return reg;
}
@@ -179,7 +185,7 @@ void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
FieldIndex field) {
- Register reg = Frontend(receiver(), name);
+ Register reg = Frontend(name);
__ Move(receiver(), reg);
LoadFieldStub stub(isolate(), field);
GenerateTailCall(masm(), stub.GetCode());
@@ -189,7 +195,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
int constant_index) {
- Register reg = Frontend(receiver(), name);
+ Register reg = Frontend(name);
__ Move(receiver(), reg);
LoadConstantStub stub(isolate(), constant_index);
GenerateTailCall(masm(), stub.GetCode());
@@ -200,7 +206,14 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Name> name) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PushVectorAndSlot();
+ }
NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
GenerateLoadConstant(isolate()->factory()->undefined_value());
FrontendFooter(name, &miss);
return GetCode(kind(), Code::FAST, name);
@@ -209,7 +222,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
- Register reg = Frontend(receiver(), name);
+ Register reg = Frontend(name);
GenerateLoadCallback(reg, callback);
return GetCode(kind(), Code::FAST, name);
}
@@ -218,7 +231,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, const CallOptimization& call_optimization) {
DCHECK(call_optimization.is_simple_api_call());
- Frontend(receiver(), name);
+ Frontend(name);
Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
scratch1(), false, 0, NULL);
@@ -226,6 +239,35 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
}
+void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
+ if (IC::ICUseVector(kind())) {
+ if (holder_reg.is(receiver())) {
+ PushVectorAndSlot();
+ } else {
+ DCHECK(holder_reg.is(scratch1()));
+ PushVectorAndSlot(scratch2(), scratch3());
+ }
+ }
+}
+
+
+void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
+ PopMode mode) {
+ if (IC::ICUseVector(kind())) {
+ if (mode == DISCARD) {
+ DiscardVectorAndSlot();
+ } else {
+ if (holder_reg.is(receiver())) {
+ PopVectorAndSlot();
+ } else {
+ DCHECK(holder_reg.is(scratch1()));
+ PopVectorAndSlot(scratch2(), scratch3());
+ }
+ }
+ }
+}
+
+
Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
LookupIterator* it) {
// So far the most popular follow ups for interceptor loads are FIELD and
@@ -241,7 +283,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
case LookupIterator::NOT_FOUND:
break;
case LookupIterator::DATA:
- inline_followup = it->property_details().type() == FIELD;
+ inline_followup =
+ it->property_details().type() == FIELD && !it->is_dictionary_holder();
break;
case LookupIterator::ACCESSOR: {
Handle<Object> accessors = it->GetAccessors();
@@ -255,7 +298,12 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
}
}
- Register reg = Frontend(receiver(), it->name());
+ Label miss;
+ InterceptorVectorSlotPush(receiver());
+ Register reg = FrontendHeader(receiver(), it->name(), &miss);
+ FrontendFooter(it->name(), &miss);
+ InterceptorVectorSlotPop(reg);
+
if (inline_followup) {
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
@@ -273,7 +321,13 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
set_type_for_object(holder());
set_holder(real_named_property_holder);
- Register reg = Frontend(interceptor_reg, it->name());
+
+ Label miss;
+ InterceptorVectorSlotPush(interceptor_reg);
+ Register reg = FrontendHeader(interceptor_reg, it->name(), &miss);
+ FrontendFooter(it->name(), &miss);
+ // We discard the vector and slot now because we don't miss below this point.
+ InterceptorVectorSlotPop(reg, DISCARD);
switch (it->state()) {
case LookupIterator::ACCESS_CHECK:
@@ -300,7 +354,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
Handle<Name> name, Handle<JSFunction> getter) {
- Frontend(receiver(), name);
+ Frontend(name);
GenerateLoadViaGetter(masm(), type(), receiver(), getter);
return GetCode(kind(), Code::FAST, name);
}
@@ -311,9 +365,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss;
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1(), &miss);
-
// Check that we are allowed to write this.
bool is_nonexistent = holder()->map() == transition->GetBackPointer();
if (is_nonexistent) {
@@ -332,7 +383,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
}
int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
+ Handle<DescriptorArray> descriptors(transition->instance_descriptors());
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
DCHECK(!representation.IsNone());
@@ -342,9 +393,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
// Call to respective StoreTransitionStub.
if (details.type() == CONSTANT) {
- GenerateConstantCheck(descriptors->GetValue(descriptor), value(), &miss);
-
- GenerateRestoreNameAndMap(name, transition);
+ GenerateRestoreMap(transition, scratch2(), &miss);
+ DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
+ GenerateRestoreName(name);
StoreTransitionStub stub(isolate());
GenerateTailCall(masm(), stub.GetCode());
@@ -358,7 +411,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
: StoreTransitionStub::StoreMapAndValue;
- GenerateRestoreNameAndMap(name, transition);
+ GenerateRestoreMap(transition, scratch2(), &miss);
+ GenerateRestoreName(name);
StoreTransitionStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
representation, store_mode);
@@ -388,7 +442,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
- Frontend(receiver(), name);
+ Frontend(name);
GenerateStoreViaSetter(masm(), type(), receiver(), setter);
return GetCode(kind(), Code::FAST, name);
@@ -398,7 +452,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
const CallOptimization& call_optimization) {
- Frontend(receiver(), name);
+ Frontend(name);
Register values[] = {value()};
GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
receiver(), scratch1(), true, 1, values);
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 4fedd4e8d7..bed65775c8 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -38,10 +38,21 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
- Register Frontend(Register object_reg, Handle<Name> name);
+ // Frontend loads from receiver(), returns holder register which may be
+ // different.
+ Register Frontend(Handle<Name> name);
void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
Register scratch1, Register scratch2);
+ // When FLAG_vector_ics is true, handlers that have the possibility of missing
+ // will need to save and pass these to miss handlers.
+ void PushVectorAndSlot() { PushVectorAndSlot(vector(), slot()); }
+ void PushVectorAndSlot(Register vector, Register slot);
+ void PopVectorAndSlot() { PopVectorAndSlot(vector(), slot()); }
+ void PopVectorAndSlot(Register vector, Register slot);
+
+ void DiscardVectorAndSlot();
+
// TODO(verwaest): Make non-static.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
@@ -170,6 +181,12 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Handle<ExecutableAccessorInfo> callback);
void GenerateLoadCallback(const CallOptimization& call_optimization,
Handle<Map> receiver_map);
+
+ // Helper emits no code if vector-ics are disabled.
+ void InterceptorVectorSlotPush(Register holder_reg);
+ enum PopMode { POP, DISCARD };
+ void InterceptorVectorSlotPop(Register holder_reg, PopMode mode = POP);
+
void GenerateLoadInterceptor(Register holder_reg);
void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
Register holder_reg);
@@ -230,9 +247,12 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
void GenerateRestoreName(Label* label, Handle<Name> name);
private:
- void GenerateRestoreNameAndMap(Handle<Name> name, Handle<Map> transition);
+ void GenerateRestoreName(Handle<Name> name);
+ void GenerateRestoreMap(Handle<Map> transition, Register scratch,
+ Label* miss);
- void GenerateConstantCheck(Object* constant, Register value_reg,
+ void GenerateConstantCheck(Register map_reg, int descriptor,
+ Register value_reg, Register scratch,
Label* miss_label);
void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 3df2140f57..90512e9bc8 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -47,6 +47,28 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ push(vector);
+ __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ pop(slot);
+ __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ add(esp, Immediate(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -88,28 +110,23 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->native_context()->get(index)));
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(scratch, Operand(esi, offset));
- __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
- __ j(not_equal, miss);
-
+ __ mov(result, Operand(esi, offset));
+ __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+ __ mov(result, Operand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+ __ mov(result,
+ FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
}
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
+ DCHECK(!FLAG_vector_ics);
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(eax, scratch1);
__ ret(0);
@@ -329,17 +346,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Immediate(name));
- __ mov(StoreTransitionDescriptor::MapRegister(), Immediate(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
+ __ and_(scratch, Immediate(Map::Deprecated::kMask));
+ __ j(not_zero, miss);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ CmpObject(value_reg, handle(constant, isolate()));
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ mov(scratch,
+ FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ cmp(value_reg, scratch);
__ j(not_equal, miss_label);
}
@@ -415,14 +453,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map = in_new_space || depth == 1;
+ Register map_reg = scratch1;
+ __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ j(not_equal, miss);
}
// Check access rights to the global object. This has to happen after
@@ -432,24 +468,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ __ CheckAccessGlobalProxy(reg, map_reg, scratch2, miss);
+ // Restore map_reg.
+ __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
} else if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
}
-
- if (load_prototype_from_map) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
-
reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (load_prototype_from_map) {
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- __ mov(reg, prototype);
- }
+ __ mov(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -462,7 +489,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ j(not_equal, miss);
}
// Perform security check for access to the global object.
@@ -482,6 +512,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -581,7 +615,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
}
__ push(holder_reg);
__ push(this->name());
-
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
@@ -605,6 +639,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
}
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
@@ -637,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -683,16 +718,15 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
-
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- if (masm()->serializer_enabled()) {
- __ mov(result, Immediate(cell));
- __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
- } else {
- __ mov(result, Operand::ForCell(cell));
- }
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
+ __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
@@ -706,6 +740,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
// The code above already loads the result into the return register.
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ ret(0);
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index ac42f30bf5..f43b641134 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -44,10 +44,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Label miss;
if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
@@ -75,7 +78,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ cmp(map_reg, map);
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
@@ -99,16 +103,19 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(scratch1(), receiver_maps->at(i));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ j(equal, handler_stubs->at(i));
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index a622ba4faf..9822f26ced 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -692,8 +692,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, ebx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
// Cache miss.
__ jmp(&miss);
@@ -767,31 +767,52 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
+ !edi.is(vector));
+
+ __ pop(edi);
+ __ push(receiver);
+ __ push(name);
+ __ push(slot);
+ __ push(vector);
+ __ push(edi);
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
+ }
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
__ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
- LoadIC_PushArgs(masm);
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -807,13 +828,21 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
- LoadIC_PushArgs(masm);
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -825,7 +854,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index c1f7c9ad31..cb560f12d7 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register name, Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
@@ -56,6 +58,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ __ pop(VectorLoadICDescriptor::SlotRegister());
+ }
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
@@ -100,6 +109,17 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!offset.is(vector) && !offset.is(slot));
+
+ __ pop(vector);
+ __ pop(slot);
+ }
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
@@ -113,10 +133,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
@@ -159,8 +180,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
+ receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
@@ -172,8 +193,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
+ receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index 1f6eb4e079..e087acfcb0 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -63,6 +63,9 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
KeyedStoreIC::IcCheckTypeField::update(extra_ic_state, PROPERTY);
DCHECK(STANDARD_STORE ==
KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ extra_ic_state = KeyedLoadIC::IcCheckTypeField::update(extra_ic_state,
+ PROPERTY);
}
Handle<Code> ic;
@@ -86,6 +89,7 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
Handle<Map> receiver_map) {
Isolate* isolate = receiver_map->GetIsolate();
+ DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
@@ -240,7 +244,8 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
}
Code::FindAndReplacePattern pattern;
- pattern.Add(isolate->factory()->meta_map(), receiver_map);
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ pattern.Add(isolate->factory()->meta_map(), cell);
Handle<Code> ic = stub->GetCodeCopy(pattern);
if (!receiver_map->is_dictionary_map()) {
@@ -255,6 +260,7 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
MapHandleList* receiver_maps) {
Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
Handle<PolymorphicCodeCache> cache =
isolate->factory()->polymorphic_code_cache();
@@ -374,7 +380,6 @@ Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
Code::Flags flags =
Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
- IC::RegisterWeakMapDependency(code);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
return code;
}
@@ -445,7 +450,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
stub = StoreElementStub(isolate(), elements_kind).GetCode();
}
- __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+
+ __ DispatchWeakMap(receiver(), scratch1(), scratch2(), cell, stub,
+ DO_SMI_CHECK);
TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 22f66d0409..58d7d4608f 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -96,6 +96,12 @@ Code* IC::GetTargetAtAddress(Address address,
void IC::SetTargetAtAddress(Address address, Code* target,
ConstantPoolArray* constant_pool) {
DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
+
+ // Don't use this for load_ics when --vector-ics is turned on.
+ DCHECK(!(FLAG_vector_ics && target->is_inline_cache_stub()) ||
+ (target->kind() != Code::LOAD_IC &&
+ target->kind() != Code::KEYED_LOAD_IC));
+
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
#ifdef DEBUG
@@ -119,9 +125,6 @@ void IC::SetTargetAtAddress(Address address, Code* target,
void IC::set_target(Code* code) {
-#ifdef VERIFY_HEAP
- code->VerifyEmbeddedObjectsDependency();
-#endif
SetTargetAtAddress(address(), code, constant_pool());
target_set_ = true;
}
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 18ea7f3105..9c883ad5e3 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -17,19 +17,6 @@ void ICUtility::Clear(Isolate* isolate, Address address,
}
-// static
-template <class Nexus>
-void ICUtility::Clear(Isolate* isolate, Code::Kind kind, Code* host,
- Nexus* nexus) {
- IC::Clear<Nexus>(isolate, kind, host, nexus);
-}
-
-
-// Force instantiation of template instances for vector-based IC clearing.
-template void ICUtility::Clear<CallICNexus>(Isolate*, Code::Kind, Code*,
- CallICNexus*);
-
-
CallICState::CallICState(ExtraICState extra_ic_state)
: argc_(ArgcBits::decode(extra_ic_state)),
call_type_(CallTypeBits::decode(extra_ic_state)) {}
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 9bb877a383..72fc865c68 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -19,10 +19,6 @@ class ICUtility : public AllStatic {
// Clear the inline cache to initial state.
static void Clear(Isolate* isolate, Address address,
ConstantPoolArray* constant_pool);
- // Clear a vector-based inline cache to initial state.
- template <class Nexus>
- static void Clear(Isolate* isolate, Code::Kind kind, Code* host,
- Nexus* nexus);
};
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index a9369ed4e0..48cef68f5a 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -258,7 +258,11 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
Handle<String> name) {
if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate());
- maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+ if (UseVector()) {
+ maybe_handler_ = nexus()->FindHandlerForMap(receiver_map);
+ } else {
+ maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+ }
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
@@ -310,7 +314,8 @@ bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
if (target()->is_keyed_stub()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
- Name* stub_name = target()->FindFirstName();
+ Name* stub_name =
+ UseVector() ? nexus()->FindFirstName() : target()->FindFirstName();
if (*name != stub_name) return false;
}
@@ -339,7 +344,7 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
// an inline cache miss for the builtins object after lazily loading
// JavaScript builtins, we return uninitialized as the state to
// force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
+ if (receiver->IsJSBuiltinsObject()) state_ = PREMONOMORPHIC;
}
@@ -452,7 +457,7 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host,
void IC::PostPatching(Address address, Code* target, Code* old_target) {
// Type vector based ICs update these statistics at a different time because
// they don't always patch on state change.
- if (target->kind() == Code::CALL_IC) return;
+ if (ICUseVector(target->kind())) return;
Isolate* isolate = target->GetHeap()->isolate();
State old_state = UNINITIALIZED;
@@ -469,42 +474,6 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::RegisterWeakMapDependency(Handle<Code> stub) {
- if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
- stub->CanBeWeakStub()) {
- DCHECK(!stub->is_weak_stub());
- MapHandleList maps;
- stub->FindAllMaps(&maps);
- if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
- Map::AddDependentIC(maps.at(0), stub);
- stub->mark_as_weak_stub();
- if (FLAG_enable_ool_constant_pool) {
- stub->constant_pool()->set_weak_object_state(
- ConstantPoolArray::WEAK_OBJECTS_IN_IC);
- }
- }
- }
-}
-
-
-void IC::InvalidateMaps(Code* stub) {
- DCHECK(stub->is_weak_stub());
- stub->mark_as_invalidated_weak_stub();
- Isolate* isolate = stub->GetIsolate();
- Heap* heap = isolate->heap();
- Object* undefined = heap->undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
- }
- }
- CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size());
-}
-
-
void IC::Clear(Isolate* isolate, Address address,
ConstantPoolArray* constant_pool) {
Code* target = GetTargetAtAddress(address, constant_pool);
@@ -514,8 +483,10 @@ void IC::Clear(Isolate* isolate, Address address,
switch (target->kind()) {
case Code::LOAD_IC:
+ if (FLAG_vector_ics) return;
return LoadIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_LOAD_IC:
+ if (FLAG_vector_ics) return;
return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
case Code::STORE_IC:
return StoreIC::Clear(isolate, address, target, constant_pool);
@@ -537,23 +508,9 @@ void IC::Clear(Isolate* isolate, Address address,
}
-template <class Nexus>
-void IC::Clear(Isolate* isolate, Code::Kind kind, Code* host, Nexus* nexus) {
- switch (kind) {
- case Code::CALL_IC:
- return CallIC::Clear(isolate, host, nexus);
- default:
- UNREACHABLE();
- }
-}
-
-
-// Force instantiation of template instances for vector-based IC clearing.
-template void IC::Clear(Isolate*, Code::Kind, Code*, CallICNexus*);
-
-
void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
+ DCHECK(!FLAG_vector_ics);
if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
@@ -563,6 +520,17 @@ void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
}
+void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
+ if (IsCleared(nexus)) return;
+ // Make sure to also clear the map used in inline fast cases. If we
+ // do not clear these maps, cached code can keep objects alive
+ // through the embedded maps.
+ State state = nexus->StateFromFeedback();
+ nexus->ConfigurePremonomorphic();
+ OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+}
+
+
void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
// Determine our state.
Object* feedback = nexus->vector()->Get(nexus->slot());
@@ -578,6 +546,7 @@ void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
+ DCHECK(!FLAG_vector_ics);
if (IsCleared(target)) return;
Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
target->extra_ic_state());
@@ -585,6 +554,14 @@ void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
}
+void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
+ if (IsCleared(nexus)) return;
+ State state = nexus->StateFromFeedback();
+ nexus->ConfigurePremonomorphic();
+ OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+}
+
+
void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
@@ -635,6 +612,69 @@ static bool MigrateDeprecated(Handle<Object> object) {
}
+void IC::ConfigureVectorState(IC::State new_state) {
+ DCHECK(UseVector());
+ if (kind() == Code::LOAD_IC) {
+ LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+ if (new_state == PREMONOMORPHIC) {
+ nexus->ConfigurePremonomorphic();
+ } else if (new_state == MEGAMORPHIC) {
+ nexus->ConfigureMegamorphic();
+ } else {
+ UNREACHABLE();
+ }
+ } else if (kind() == Code::KEYED_LOAD_IC) {
+ KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+ if (new_state == GENERIC) {
+ nexus->ConfigureGeneric();
+ } else if (new_state == PREMONOMORPHIC) {
+ nexus->ConfigurePremonomorphic();
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+ new_state);
+}
+
+
+void IC::ConfigureVectorState(Handle<Name> name, Handle<HeapType> type,
+ Handle<Code> handler) {
+ DCHECK(UseVector());
+ if (kind() == Code::LOAD_IC) {
+ LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+ nexus->ConfigureMonomorphic(type, handler);
+ } else {
+ DCHECK(kind() == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+ nexus->ConfigureMonomorphic(name, type, handler);
+ }
+
+ OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+ MONOMORPHIC);
+}
+
+
+void IC::ConfigureVectorState(Handle<Name> name, TypeHandleList* types,
+ CodeHandleList* handlers) {
+ DCHECK(UseVector());
+ if (kind() == Code::LOAD_IC) {
+ LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+ nexus->ConfigurePolymorphic(types, handlers);
+ } else {
+ DCHECK(kind() == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+ nexus->ConfigurePolymorphic(name, types, handlers);
+ }
+
+ OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+ POLYMORPHIC);
+}
+
+
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -648,7 +688,11 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) {
- set_target(*KeyedLoadIC::generic_stub(isolate()));
+ if (UseVector()) {
+ ConfigureVectorState(GENERIC);
+ } else {
+ set_target(*KeyedLoadIC::generic_stub(isolate()));
+ }
TRACE_IC("LoadIC", name);
TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
}
@@ -661,6 +705,25 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+ if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+ // Look up in script context table.
+ Handle<String> str_name = Handle<String>::cast(name);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+ Handle<ScriptContextTable> script_contexts(
+ global->native_context()->script_context_table());
+
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+ if (use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
+ LoadScriptContextFieldStub stub(isolate(), &lookup_result);
+ PatchCache(name, stub.GetCode());
+ }
+ return FixedArray::get(ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index),
+ lookup_result.slot_index);
+ }
+ }
+
// Named lookup in the object.
LookupIterator it(object, name);
LookupForRead(&it);
@@ -733,15 +796,26 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
number_of_types - deprecated_types - (handler_to_overwrite != -1);
if (number_of_valid_types >= 4) return false;
- if (number_of_types == 0) return false;
- if (!target()->FindHandlers(&handlers, types.length())) return false;
+ if (number_of_types == 0 && state() != MONOMORPHIC &&
+ state() != POLYMORPHIC) {
+ return false;
+ }
+ if (UseVector()) {
+ if (!nexus()->FindHandlers(&handlers, types.length())) return false;
+ } else {
+ if (!target()->FindHandlers(&handlers, types.length())) return false;
+ }
number_of_valid_types++;
if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false;
Handle<Code> ic;
if (number_of_valid_types == 1) {
- ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
- extra_ic_state());
+ if (UseVector()) {
+ ConfigureVectorState(name, receiver_type(), code);
+ } else {
+ ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
+ extra_ic_state());
+ }
} else {
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
@@ -752,11 +826,17 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
types.Add(type);
handlers.Add(code);
}
- ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
- number_of_valid_types, name,
- extra_ic_state());
+
+ if (UseVector()) {
+ ConfigureVectorState(name, &types, &handlers);
+ } else {
+ ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
+ number_of_valid_types, name,
+ extra_ic_state());
+ }
}
- set_target(*ic);
+
+ if (!UseVector()) set_target(*ic);
return true;
}
@@ -804,9 +884,13 @@ template Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map,
void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
DCHECK(handler->is_handler());
- Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
- kind(), name, receiver_type(), handler, extra_ic_state());
- set_target(*ic);
+ if (UseVector()) {
+ ConfigureVectorState(name, receiver_type(), handler);
+ } else {
+ Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
+ kind(), name, receiver_type(), handler, extra_ic_state());
+ set_target(*ic);
+ }
}
@@ -851,7 +935,12 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
// same key.
CopyICToMegamorphicCache(name);
}
- set_target(*megamorphic_stub());
+ if (UseVector()) {
+ ConfigureVectorState(kind() == Code::KEYED_LOAD_IC ? GENERIC
+ : MEGAMORPHIC);
+ } else {
+ set_target(*megamorphic_stub());
+ }
// Fall through.
case MEGAMORPHIC:
UpdateMegamorphicCache(*receiver_type(), *name, *code);
@@ -874,6 +963,10 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
ExtraICState extra_state) {
+ if (FLAG_vector_ics) {
+ return LoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
+ }
+
return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state);
}
@@ -917,6 +1010,7 @@ Handle<Code> LoadIC::megamorphic_stub() {
Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
ExtraICState extra_state) {
+ DCHECK(!FLAG_vector_ics);
return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state);
}
@@ -946,7 +1040,11 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
- set_target(*pre_monomorphic_stub());
+ if (UseVector()) {
+ ConfigureVectorState(PREMONOMORPHIC);
+ } else {
+ set_target(*pre_monomorphic_stub());
+ }
TRACE_IC("LoadIC", lookup->name());
return;
}
@@ -1209,11 +1307,19 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
+ Handle<Code> null_handle;
Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
TargetMaps(&target_receiver_maps);
+
if (target_receiver_maps.length() == 0) {
+ if (FLAG_vector_ics) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
+ ConfigureVectorState(Handle<Name>::null(), receiver_type(), handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
}
@@ -1228,6 +1334,12 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
Handle<JSObject>::cast(receiver)->GetElementsKind())) {
+ if (FLAG_vector_ics) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
+ ConfigureVectorState(Handle<Name>::null(), receiver_type(), handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
}
@@ -1239,6 +1351,10 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
+ if (FLAG_vector_ics) {
+ ConfigureVectorState(GENERIC);
+ return null_handle;
+ }
return generic_stub();
}
@@ -1246,9 +1362,25 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
+ if (FLAG_vector_ics) {
+ ConfigureVectorState(GENERIC);
+ return null_handle;
+ }
return generic_stub();
}
+ if (FLAG_vector_ics) {
+ CodeHandleList handlers(target_receiver_maps.length());
+ ElementHandlerCompiler compiler(isolate());
+ compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
+ TypeHandleList types(target_receiver_maps.length());
+ for (int i = 0; i < target_receiver_maps.length(); i++) {
+ types.Add(HeapType::Class(target_receiver_maps.at(i), isolate()));
+ }
+ ConfigureVectorState(Handle<Name>::null(), &types, &handlers);
+ return null_handle;
+ }
+
return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps);
}
@@ -1284,11 +1416,14 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
}
if (!is_target_set()) {
- Code* generic = *generic_stub();
- if (*stub == generic) {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+ if (!FLAG_vector_ics) {
+ Code* generic = *generic_stub();
+ if (*stub == generic) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+ }
+
+ set_target(*stub);
}
- set_target(*stub);
TRACE_IC("LoadIC", key);
}
@@ -1363,6 +1498,32 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
+ if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+ // Look up in script context table.
+ Handle<String> str_name = Handle<String>::cast(name);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+ Handle<ScriptContextTable> script_contexts(
+ global->native_context()->script_context_table());
+
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index);
+ if (lookup_result.mode == CONST) {
+ return TypeError("harmony_const_assign", object, name);
+ }
+
+ if (FLAG_use_ic &&
+ StoreScriptContextFieldStub::Accepted(&lookup_result)) {
+ StoreScriptContextFieldStub stub(isolate(), &lookup_result);
+ PatchCache(name, stub.GetCode());
+ }
+
+ script_context->set(lookup_result.slot_index, *value);
+ return value;
+ }
+ }
+
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
if (MigrateDeprecated(object) || object->IsJSProxy()) {
@@ -2113,13 +2274,38 @@ RUNTIME_FUNCTION(CallIC_Customization_Miss) {
RUNTIME_FUNCTION(LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
- ic.UpdateState(receiver, key);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+ if (FLAG_vector_ics) {
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+ // LoadIC miss handler if the handler misses. Since the vector Nexus is
+ // set up outside the IC, handle that here.
+ if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, vector_slot);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ } else {
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ }
+ } else {
+ DCHECK(args.length() == 2);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ }
return *result;
}
@@ -2128,13 +2314,26 @@ RUNTIME_FUNCTION(LoadIC_Miss) {
RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+ if (FLAG_vector_ics) {
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ } else {
+ DCHECK(args.length() == 2);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ }
+
return *result;
}
@@ -2142,13 +2341,26 @@ RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+ if (FLAG_vector_ics) {
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ } else {
+ DCHECK(args.length() == 2);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ }
+
return *result;
}
@@ -2598,19 +2810,17 @@ RUNTIME_FUNCTION(StoreCallbackProperty) {
*/
RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
- Handle<Name> name_handle =
+ Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
- // TODO(rossberg): Support symbols in the API.
- if (name_handle->IsSymbol())
+ if (name->IsSymbol() && !interceptor_info->can_intercept_symbols())
return isolate->heap()->no_interceptor_result_sentinel();
- Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetterCallback getter =
- FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
+ v8::GenericNamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::GenericNamedPropertyGetterCallback>(getter_address);
DCHECK(getter != NULL);
Handle<JSObject> receiver =
@@ -2640,7 +2850,7 @@ static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here.
HandleScope scope(isolate);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate, true);
if (ic.contextual_mode() != CONTEXTUAL) {
return isolate->heap()->undefined_value();
}
@@ -2722,13 +2932,39 @@ RUNTIME_FUNCTION(LoadElementWithInterceptor) {
RUNTIME_FUNCTION(LoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
- ic.UpdateState(receiver, key);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+ if (FLAG_vector_ics) {
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+ // LoadIC miss handler if the handler misses. Since the vector Nexus is
+ // set up outside the IC, handle that here.
+ if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, vector_slot);
+ LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ } else {
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ }
+ } else {
+ DCHECK(args.length() == 2);
+ LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ }
+
return *result;
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 5ed8082ed1..541fa0c7dc 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -76,23 +76,10 @@ class IC {
state_ = PROTOTYPE_FAILURE;
}
- // If the stub contains weak maps then this function adds the stub to
- // the dependent code array of each weak map.
- static void RegisterWeakMapDependency(Handle<Code> stub);
-
- // This function is called when a weak map in the stub is dying,
- // invalidates the stub by setting maps in it to undefined.
- static void InvalidateMaps(Code* stub);
-
// Clear the inline cache to initial state.
static void Clear(Isolate* isolate, Address address,
ConstantPoolArray* constant_pool);
- // Clear the vector-based inline cache to initial state.
- template <class Nexus>
- static void Clear(Isolate* isolate, Code::Kind kind, Code* host,
- Nexus* nexus);
-
#ifdef DEBUG
bool IsLoadStub() const {
return target()->is_load_stub() || target()->is_keyed_load_stub();
@@ -138,6 +125,12 @@ class IC {
static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
Isolate* isolate);
+ static bool ICUseVector(Code::Kind kind) {
+ return (FLAG_vector_ics &&
+ (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
+ kind == Code::CALL_IC;
+ }
+
protected:
// Get the call-site target; used for determining the state.
Handle<Code> target() const { return target_; }
@@ -158,14 +151,21 @@ class IC {
bool is_target_set() { return target_set_; }
bool UseVector() const {
- bool use = (FLAG_vector_ics &&
- (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC)) ||
- kind() == Code::CALL_IC;
+ bool use = ICUseVector(kind());
// If we are supposed to use the nexus, verify the nexus is non-null.
DCHECK(!use || nexus_ != NULL);
return use;
}
+ // Configure for most states.
+ void ConfigureVectorState(IC::State new_state);
+ // Configure the vector for MONOMORPHIC.
+ void ConfigureVectorState(Handle<Name> name, Handle<HeapType> type,
+ Handle<Code> handler);
+ // Configure the vector for POLYMORPHIC.
+ void ConfigureVectorState(Handle<Name> name, TypeHandleList* types,
+ CodeHandleList* handlers);
+
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type, Handle<Object> name);
void TraceIC(const char* type, Handle<Object> name, State old_state,
@@ -272,11 +272,15 @@ class IC {
void FindTargetMaps() {
if (target_maps_set_) return;
target_maps_set_ = true;
- if (state_ == MONOMORPHIC) {
- Map* map = target_->FindFirstMap();
- if (map != NULL) target_maps_.Add(handle(map));
- } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
- target_->FindAllMaps(&target_maps_);
+ if (UseVector()) {
+ nexus()->ExtractMaps(&target_maps_);
+ } else {
+ if (state_ == MONOMORPHIC) {
+ Map* map = target_->FindFirstMap();
+ if (map != NULL) target_maps_.Add(handle(map));
+ } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
+ target_->FindAllMaps(&target_maps_);
+ }
}
}
@@ -364,7 +368,18 @@ class LoadIC : public IC {
return LoadICState::GetContextualMode(extra_ic_state());
}
- explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
+ : IC(depth, isolate, nexus) {
+ DCHECK(!FLAG_vector_ics || nexus != NULL);
+ DCHECK(IsLoadStub());
+ }
+
+ // TODO(mvstanton): The for_queries_only is because we have a case where we
+ // construct an IC only to gather the contextual mode, and we don't have
+ // vector/slot information. for_queries_only is a temporary hack to enable the
+ // strong DCHECK protection around vector/slot.
+ LoadIC(FrameDepth depth, Isolate* isolate, bool for_queries_only)
+ : IC(depth, isolate, NULL, for_queries_only) {
DCHECK(IsLoadStub());
}
@@ -396,6 +411,8 @@ class LoadIC : public IC {
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Name> name);
+ static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
+
protected:
inline void set_target(Code* code);
@@ -408,7 +425,7 @@ class LoadIC : public IC {
}
}
- virtual Handle<Code> megamorphic_stub() OVERRIDE;
+ Handle<Code> megamorphic_stub() OVERRIDE;
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -434,8 +451,23 @@ class LoadIC : public IC {
class KeyedLoadIC : public LoadIC {
public:
- explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
- : LoadIC(depth, isolate) {
+ // ExtraICState bits (building on IC)
+ class IcCheckTypeField : public BitField<IcCheckType, 1, 1> {};
+
+ static ExtraICState ComputeExtraICState(ContextualMode contextual_mode,
+ IcCheckType key_type) {
+ return LoadICState(contextual_mode).GetExtraICState() |
+ IcCheckTypeField::encode(key_type);
+ }
+
+ static IcCheckType GetKeyType(ExtraICState extra_state) {
+ return IcCheckTypeField::decode(extra_state);
+ }
+
+ KeyedLoadIC(FrameDepth depth, Isolate* isolate,
+ KeyedLoadICNexus* nexus = NULL)
+ : LoadIC(depth, isolate, nexus) {
+ DCHECK(!FLAG_vector_ics || nexus != NULL);
DCHECK(target()->is_keyed_load_stub());
}
@@ -463,6 +495,8 @@ class KeyedLoadIC : public LoadIC {
static Handle<Code> generic_stub(Isolate* isolate);
static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
+ static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
+
protected:
// receiver is HeapObject because it could be a String or a JSObject
Handle<Code> LoadElementStub(Handle<HeapObject> receiver);
@@ -525,7 +559,7 @@ class StoreIC : public IC {
JSReceiver::StoreFromKeyed store_mode);
protected:
- virtual Handle<Code> megamorphic_stub() OVERRIDE;
+ Handle<Code> megamorphic_stub() OVERRIDE;
// Stub accessors.
Handle<Code> generic_stub() const;
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index ceff593c36..75032e1915 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -92,6 +92,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -138,25 +158,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ lw(scratch, MemOperand(cp, offset));
- __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
- __ li(at, function);
- __ Branch(miss, ne, at, Operand(scratch));
-
+ __ lw(result, MemOperand(cp, offset));
+ __ lw(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ lw(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ li(prototype, Handle<Map>(function->initial_map()));
+ __ lw(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+ __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
@@ -321,18 +332,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ li(this->name(), Operand(name));
- __ li(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ lw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ And(at, scratch, Operand(Map::Deprecated::kMask));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ li(scratch1(), handle(constant, isolate()));
- __ Branch(miss_label, ne, value_reg, Operand(scratch1()));
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ lw(scratch,
+ FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ Branch(miss_label, ne, value_reg, Operand(scratch));
}
@@ -412,11 +443,11 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- } else {
- __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch2, Operand(map_reg));
}
// Check access rights to the global object. This has to happen after
@@ -434,17 +465,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- if (load_prototype_from_map) {
- __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ li(reg, Operand(prototype));
- }
+ __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -457,7 +478,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch2, Operand(scratch1));
}
// Perform security check for access to the global object.
@@ -477,6 +501,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -578,6 +606,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
@@ -594,6 +623,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(receiver(), holder_reg, this->name());
} else {
@@ -623,7 +653,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
@@ -663,12 +693,16 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ li(result, Operand(cell));
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ lw(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -679,6 +713,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index c1e67f9ab6..000e3266c4 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -24,9 +24,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -57,13 +60,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
number_of_handled_maps++;
// Check map and tail call if there's a match.
// Separate compare from branch, to provide path for above JumpIfSmi().
- __ Subu(match, map_reg, Operand(map));
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ GetWeakValue(match, cell);
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
- Operand(zero_reg));
+ Operand(map_reg));
}
}
DCHECK(number_of_handled_maps != 0);
@@ -85,15 +89,20 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ Register match = scratch2();
+ __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ GetWeakValue(match, cell);
if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
- Operand(receiver_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
+ Operand(map_reg));
} else {
Label next_map;
- __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
- __ li(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Branch(&next_map, ne, match, Operand(map_reg));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 0984490fc3..7c8a5eacab 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -272,18 +272,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static const Register LoadIC_TempRegister() { return a3; }
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+
+ __ Push(receiver, name, slot, vector);
+ } else {
+ __ Push(receiver, name);
+ }
+}
+
+
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in ra.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
- __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -412,15 +429,19 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in ra.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -801,8 +822,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, a3, t0, t1, t2);
// Cache miss.
__ Branch(&miss);
@@ -871,8 +892,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index e538712d3f..fab66d8963 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -105,15 +108,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check register validity.
DCHECK(!scratch.is(no_reg));
@@ -121,6 +116,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
@@ -140,8 +146,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ srl(at, name, kCacheIndexShift);
@@ -151,8 +157,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 8251b2ab9e..d3b861bc25 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -92,6 +92,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ Daddu(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -138,25 +158,17 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
+ MacroAssembler* masm, int index, Register result, Label* miss) {
// Check we're still in the same context.
- Register scratch = prototype;
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ld(scratch, MemOperand(cp, offset));
- __ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ ld(scratch, MemOperand(scratch, Context::SlotOffset(index)));
- __ li(at, function);
- __ Branch(miss, ne, at, Operand(scratch));
-
+ __ ld(result, MemOperand(cp, offset));
+ __ ld(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ld(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ li(prototype, Handle<Map>(function->initial_map()));
+ __ ld(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+ __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
@@ -321,18 +333,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ li(this->name(), Operand(name));
- __ li(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ ld(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ And(at, scratch, Operand(Map::Deprecated::kMask));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ li(scratch1(), handle(constant, isolate()));
- __ Branch(miss_label, ne, value_reg, Operand(scratch1()));
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ ld(scratch,
+ FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ Branch(miss_label, ne, value_reg, Operand(scratch));
}
@@ -411,18 +443,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
__ ld(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
Register map_reg = scratch1;
+ __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- } else {
- __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch2, Operand(map_reg));
}
// Check access rights to the global object. This has to happen after
@@ -440,11 +466,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
- if (load_prototype_from_map) {
- __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ li(reg, Operand(prototype));
- }
+ __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -457,7 +479,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch2, Operand(scratch1));
}
// Perform security check for access to the global object.
@@ -477,6 +502,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -578,6 +607,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
@@ -594,6 +624,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(receiver(), holder_reg, this->name());
} else {
@@ -623,7 +654,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
@@ -663,12 +694,16 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ li(result, Operand(cell));
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ ld(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -679,6 +714,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index 796ed87f7e..1e1880f88d 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -24,9 +24,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -57,13 +60,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
number_of_handled_maps++;
// Check map and tail call if there's a match.
// Separate compare from branch, to provide path for above JumpIfSmi().
- __ Dsubu(match, map_reg, Operand(map));
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ GetWeakValue(match, cell);
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
- Operand(zero_reg));
+ Operand(map_reg));
}
}
DCHECK(number_of_handled_maps != 0);
@@ -85,15 +89,20 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ Register match = scratch2();
+ __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ GetWeakValue(match, cell);
if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
- Operand(receiver_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
+ Operand(map_reg));
} else {
Label next_map;
- __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
- __ li(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Branch(&next_map, ne, match, Operand(map_reg));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index b4055b2367..7ac191c395 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -270,18 +270,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static const Register LoadIC_TempRegister() { return a3; }
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+
+ __ Push(receiver, name, slot, vector);
+ } else {
+ __ Push(receiver, name);
+ }
+}
+
+
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is on the stack.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
- __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -410,15 +427,19 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in ra.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -810,8 +831,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, a3, a4, a5, a6);
// Cache miss.
__ Branch(&miss);
@@ -880,8 +901,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, a3, a4, a5, a6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index 272e5bea9b..04883d7bc9 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -106,15 +109,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check register validity.
DCHECK(!scratch.is(no_reg));
@@ -122,6 +117,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
@@ -141,8 +147,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ dsrl(at, name, kCacheIndexShift);
@@ -152,8 +158,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
new file mode 100644
index 0000000000..e98f5172f8
--- /dev/null
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, r6, r3, r7, r8};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(r6.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ static Register registers[] = {receiver, name, r6, r7, r8};
+ return registers;
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
new file mode 100644
index 0000000000..2f29c83412
--- /dev/null
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -0,0 +1,698 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r3 : receiver
+ // -- r5 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ LoadP(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ LoadP(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(r3);
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(name->IsUniqueName());
+ DCHECK(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbz(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ andi(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ bne(miss_label, cr0);
+
+ // Check that receiver is a JSObject.
+ __ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmpi(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ blt(miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ cmp(map, tmp);
+ __ bne(miss_label);
+
+ // Restore the temporarily used register.
+ __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ LoadP(scratch, MemOperand(cp, offset));
+ __ LoadP(scratch,
+ FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ LoadP(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ Move(ip, function);
+ __ cmp(ip, scratch);
+ __ bne(miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ LoadP(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mr(r3, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ DCHECK(cell->value()->IsTheHole());
+ __ mov(scratch, Operand(cell));
+ __ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ __ bne(miss);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Operand(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch_in,
+ bool is_store, int argc, Register* values) {
+ DCHECK(!receiver.is(scratch_in));
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc - 1 - i];
+ DCHECK(!receiver.is(arg));
+ DCHECK(!scratch_in.is(arg));
+ __ push(arg);
+ }
+ DCHECK(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = r3;
+ Register call_data = r7;
+ Register holder = r5;
+ Register api_function_address = r4;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(call_data, api_call_info);
+ __ LoadP(call_data,
+ FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ Move(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+ __ mov(api_function_address, Operand(ref));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Operand(name));
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
+ Handle<Name> name, Handle<Map> transition) {
+ __ mov(this->name(), Operand(name));
+ __ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+ Register value_reg,
+ Label* miss_label) {
+ __ Move(scratch1(), handle(constant, isolate()));
+ __ cmp(value_reg, scratch1());
+ __ bne(miss_label);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+ Register value_reg,
+ Label* miss_label) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ LoadP(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1(), it.Current(), &do_store);
+ it.Advance();
+ if (it.Done()) {
+ __ bne(miss_label);
+ break;
+ }
+ __ beq(&do_store);
+ }
+ __ bind(&do_store);
+ }
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type()->IsConstant()) {
+ current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder()->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
+
+ __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ LoadP(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ Register map_reg = scratch1;
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ } else {
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ // Two possible reasons for loading the prototype from the map:
+ // (1) Can't store references to new space in code.
+ // (2) Handler is shared for all receivers with the same prototype
+ // map (but not necessarily the same prototype instance).
+ bool load_prototype_from_map =
+ heap()->InNewSpace(*prototype) || depth == 1;
+ if (load_prototype_from_map) {
+ __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ __ mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ b(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ b(&success);
+ GenerateRestoreName(miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ Move(r3, value);
+ __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
+ // Build AccessorInfo::args_ list on the stack and push property name below
+ // the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ DCHECK(!scratch2().is(reg));
+ DCHECK(!scratch3().is(reg));
+ DCHECK(!scratch4().is(reg));
+ __ push(receiver());
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch3(), callback);
+ __ LoadP(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+ }
+ __ push(scratch3());
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ mr(scratch4(), scratch3());
+ __ Push(scratch3(), scratch4());
+ __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch4(), reg);
+ __ push(name());
+
+ // Abi for CallApiGetter
+ Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ mov(getter_address_reg, Operand(ref));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r3, scratch1());
+ __ beq(&interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
+ }
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = Frontend(receiver(), name);
+
+ __ Push(receiver(), holder_reg); // receiver
+ __ mov(ip, Operand(callback)); // callback info
+ __ push(ip);
+ __ mov(ip, Operand(name));
+ __ Push(ip, value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<Name> name) {
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+ FrontendHeader(receiver(), name, &miss);
+
+ // Get the value from the cell.
+ Register result = StoreDescriptor::ValueRegister();
+ __ mov(result, Operand(cell));
+ __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ __ beq(&miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6);
+ __ Ret();
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
new file mode 100644
index 0000000000..c86845646e
--- /dev/null
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(r0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ Register tmp = scratch1();
+ __ JumpIfSmi(this->name(), &miss);
+ __ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+ __ lbz(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+ } else {
+ __ Cmpi(this->name(), Operand(name), r0);
+ __ bne(&miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(map));
+ __ cmp(map_reg, ip);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ LoadP(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; ++i) {
+ __ mov(ip, Operand(receiver_maps->at(i)));
+ __ cmp(scratch1(), ip);
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+ } else {
+ Label next_map;
+ __ bne(&next_map);
+ __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
new file mode 100644
index 0000000000..97c3e2fcc9
--- /dev/null
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -0,0 +1,1047 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ beq(global_object);
+ __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ beq(global_object);
+ __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ beq(global_object);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ mr(r0, scratch2);
+ __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ and_(scratch2, scratch1, scratch2, SetRC);
+ __ bne(miss, cr0);
+ __ mr(scratch2, r0);
+
+ // Get the value at the masked, scaled index and return.
+ __ LoadP(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ mr(r0, scratch2);
+ __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
+ __ and_(scratch2, scratch1, scratch2, SetRC);
+ __ bne(miss, cr0);
+ __ mr(scratch2, r0);
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ StoreP(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mr(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver, Register map,
+ Register scratch,
+ int interceptor_bit, Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
+ __ andi(r0, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ bne(slow, cr0);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmpi(scratch, Operand(JS_OBJECT_TYPE));
+ __ blt(slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch1, Register scratch2,
+ Register result, Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ bne(not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+ // Check that the key (index) is within bounds.
+ __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmpl(key, scratch1);
+ __ bge(out_of_range);
+ // Fast case: Do the load.
+ __ addi(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ __ SmiToPtrArrayOffset(scratch2, key);
+ __ LoadPX(scratch2, MemOperand(scratch2, scratch1));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, ip);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ beq(out_of_range);
+ __ mr(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
+ // The key is not a smi.
+ Label unique;
+ // Is it a name?
+ __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+ __ bgt(not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ beq(&unique);
+
+ // Is the string an array index, with cached numeric value?
+ __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
+ __ and_(r0, hash, r8, SetRC);
+ __ beq(index_string, cr0);
+
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
+ // map: key map
+ __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ andi(r0, hash, Operand(kIsNotInternalizedMask));
+ __ bne(not_unique, cr0);
+
+ __ bind(&unique);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ Register dictionary = r3;
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+ Label slow;
+
+ __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), r3, r6, r7);
+ __ Ret();
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return r6; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7);
+
+ __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+
+ __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+ __ blt(slow_case);
+
+ // Check that the key is a positive smi.
+ __ mov(scratch1, Operand(0x80000001));
+ __ and_(r0, key, scratch1, SetRC);
+ __ bne(slow_case, cr0);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ LoadP(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ LoadP(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ SubSmiLiteral(scratch2, scratch2, Smi::FromInt(2), r0);
+ __ cmpl(key, scratch2);
+ __ bge(unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ SmiToPtrArrayOffset(scratch3, key);
+ __ addi(scratch3, scratch3, Operand(kOffset));
+
+ __ LoadPX(scratch2, MemOperand(scratch1, scratch3));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, scratch3);
+ __ beq(unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ LoadP(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ SmiToPtrArrayOffset(scratch3, scratch2);
+ __ addi(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ return MemOperand(scratch1, scratch3);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ LoadP(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
+ __ LoadP(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpl(key, scratch);
+ __ bge(slow_case);
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ addi(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ return MemOperand(backing_store, scratch);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ DCHECK(receiver.is(r4));
+ DCHECK(key.is(r5));
+ DCHECK(value.is(r3));
+
+ Label slow, notin;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, r6, r7, r8, &notin, &slow);
+ Register mapped_base = mapped_location.ra();
+ Register mapped_offset = mapped_location.rb();
+ __ StorePX(value, mapped_location);
+ __ add(r9, mapped_base, mapped_offset);
+ __ mr(r11, value);
+ __ RecordWrite(mapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in r6.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, r6, r7, &slow);
+ Register unmapped_base = unmapped_location.ra();
+ Register unmapped_offset = unmapped_location.rb();
+ __ StorePX(value, unmapped_location);
+ __ add(r9, unmapped_base, unmapped_offset);
+ __ mr(r11, value);
+ __ RecordWrite(unmapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7);
+
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // The return address is in lr.
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = LoadDescriptor::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ DCHECK(key.is(r5));
+ DCHECK(receiver.is(r4));
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
+ Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(r3, r6, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // r6: elements map
+ // r7: elements
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r6, ip);
+ __ bne(&slow);
+ __ SmiUntag(r3, key);
+ __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
+ __ Ret();
+
+ // Slow case, key and receiver still in r3 and r4.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7,
+ r6);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
+ Map::kHasNamedInterceptor, &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r7, ip);
+ __ beq(&probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ srawi(r6, r3, KeyedLookupCache::kMapHashShift);
+ __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ srawi(r7, r7, Name::kHashShift);
+ __ xor_(r6, r6, r7);
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ mov(r7, Operand(mask));
+ __ and_(r6, r6, r7, LeaveRC);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ mov(r7, Operand(cache_keys));
+ __ mr(r0, r5);
+ __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1));
+ __ add(r7, r7, r5);
+ __ mr(r5, r0);
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and move r7 to next entry.
+ __ LoadP(r8, MemOperand(r7));
+ __ addi(r7, r7, Operand(kPointerSize * 2));
+ __ cmp(r3, r8);
+ __ bne(&try_next_entry);
+ __ LoadP(r8, MemOperand(r7, -kPointerSize)); // Load name
+ __ cmp(key, r8);
+ __ beq(&hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ // Last entry: Load map and move r7 to name.
+ __ LoadP(r8, MemOperand(r7));
+ __ addi(r7, r7, Operand(kPointerSize));
+ __ cmp(r3, r8);
+ __ bne(&slow);
+ __ LoadP(r8, MemOperand(r7));
+ __ cmp(key, r8);
+ __ bne(&slow);
+
+ // Get field offset.
+ // r3 : receiver's map
+ // r6 : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ __ mov(r7, Operand(cache_field_offsets));
+ if (i != 0) {
+ __ addi(r6, r6, Operand(i));
+ }
+ __ ShiftLeftImm(r8, r6, Operand(2));
+ __ lwzx(r8, MemOperand(r8, r7));
+ __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset));
+ __ sub(r8, r8, r9);
+ __ cmpi(r8, Operand::Zero());
+ __ bge(&property_array_property);
+ if (i != 0) {
+ __ b(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset));
+ __ add(r9, r9, r8); // Index from start of object.
+ __ subi(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2));
+ __ LoadPX(r3, MemOperand(r3, receiver));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ r7, r6);
+ __ Ret();
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ addi(receiver, receiver,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2));
+ __ LoadPX(r3, MemOperand(r3, receiver));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ r7, r6);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // r6: elements
+ __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
+ // Load the property to r3.
+ GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7,
+ r6);
+ __ Ret();
+
+ __ bind(&index_name);
+ __ IndexFromHash(r6, key);
+ // Now jump to the place where smi keys are handled.
+ __ b(&index_smi);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+static void KeyedStoreGenerateMegamorphicHelper(
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch_value = r7;
+ Register address = r8;
+ if (check_map == kCheckMap) {
+ __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ mov(scratch_value,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ cmp(elements_map, scratch_value);
+ __ bne(fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(scratch_value, key);
+ __ LoadPX(scratch_value, MemOperand(address, scratch_value));
+ __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()),
+ r0);
+ __ bne(&holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
+ r0);
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(scratch_value, key);
+ __ StorePX(value, MemOperand(address, scratch_value));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
+ r0);
+ }
+ __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(scratch_value, key);
+ __ StorePUX(value, MemOperand(address, scratch_value));
+ // Update write barrier for the elements array address.
+ __ mr(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+ __ bne(slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ addi(address, elements,
+ Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
+ kHeapObjectTag)));
+ __ SmiToDoubleArrayOffset(scratch_value, key);
+ __ lwzx(scratch_value, MemOperand(address, scratch_value));
+ __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0);
+ __ bne(&fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value, key, elements, r6, d0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
+ r0);
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex);
+ __ bne(&non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ b(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, r7, slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ b(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, r7, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ b(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ---------- S t a t e --------------
+ // -- r3 : value
+ // -- r4 : key
+ // -- r5 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array, maybe_name_key, miss;
+
+ // Register usage.
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ DCHECK(receiver.is(r4));
+ DCHECK(key.is(r5));
+ DCHECK(value.is(r3));
+ Register receiver_map = r6;
+ Register elements_map = r9;
+ Register elements = r10; // Elements array of the receiver.
+ // r7 and r8 are used as general scratch registers.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &maybe_name_key);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+ // Get the map of the object.
+ __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ andi(r0, ip,
+ Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ bne(&slow, cr0);
+ // Check if the object is a JS array or not.
+ __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ cmpi(r7, Operand(JS_ARRAY_TYPE));
+ __ beq(&array);
+ // Check that the object is some kind of JSObject.
+ __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
+ __ blt(&slow);
+
+ // Object case: Check key against length in the elements array.
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmpl(key, ip);
+ __ blt(&fast_object);
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+ // Entry registers are intact.
+ // r3: value.
+ // r4: key.
+ // r5: receiver.
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ // Never returns to here.
+
+ __ bind(&maybe_name_key);
+ __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(r7, &slow);
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+ key, r6, r7, r8, r9);
+ // Cache miss.
+ __ b(&miss);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // Condition code from comparing key and array length is still available.
+ __ bne(&slow); // Only support writing to writing to array[array.length].
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmpl(key, ip);
+ __ bge(&slow);
+ __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ cmp(elements_map, ip); // PPC - I think I can re-use ip here
+ __ bne(&check_if_double_array);
+ __ b(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ cmp(elements_map, ip); // PPC - another ip re-use
+ __ bne(&slow);
+ __ b(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ cmpl(key, ip);
+ __ bge(&extra);
+
+ KeyedStoreGenerateMegamorphicHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength, value, key, receiver,
+ receiver_map, elements_map, elements);
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(receiver.is(r4));
+ DCHECK(name.is(r5));
+ DCHECK(StoreDescriptor::ValueRegister().is(r3));
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+ name, r6, r7, r8, r9);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register dictionary = r6;
+ DCHECK(receiver.is(r4));
+ DCHECK(name.is(r5));
+ DCHECK(value.is(r3));
+
+ __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8);
+ GenerateMiss(masm);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ return Assembler::IsCmpImmediate(instr);
+}
+
+
+//
+// This code is paired with the JumpPatchSite class in full-codegen-ppc.cc
+//
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ if (!Assembler::IsCmpImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetCmpImmediateRawImmediate(instr);
+ delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask;
+ // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+ // nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
+ cmp_instruction_address, delta);
+ }
+
+ Address patch_address =
+ cmp_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ // This is patching a conditional "jump if not smi/jump if smi" site.
+ // Enabling by changing from
+ // cmp cr0, rx, rx
+ // to
+ // rlwinm(r0, value, 0, 31, 31, SetRC);
+ // bc(label, BT/BF, 2)
+ // and vice-versa to be disabled again.
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRA(instr_at_patch);
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ DCHECK(Assembler::IsCmpRegister(instr_at_patch));
+ DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(),
+ Assembler::GetRB(instr_at_patch).code());
+ patcher.masm()->TestIfSmi(reg, r0);
+ } else {
+ DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+ DCHECK(Assembler::IsRldicl(instr_at_patch));
+#else
+ DCHECK(Assembler::IsRlwinm(instr_at_patch));
+#endif
+ patcher.masm()->cmp(reg, reg, cr0);
+ }
+ DCHECK(Assembler::IsBranch(branch_instr));
+
+ // Invert the logic of the branch
+ if (Assembler::GetCondition(branch_instr) == eq) {
+ patcher.EmitCondition(ne);
+ } else {
+ DCHECK(Assembler::GetCondition(branch_instr) == ne);
+ patcher.EmitCondition(eq);
+ }
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
new file mode 100644
index 0000000000..816a2ae649
--- /dev/null
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -0,0 +1,191 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, bool leave_frame,
+ StubCache::Table table, Register receiver, Register name,
+ // Number of the cache entry, not scaled.
+ Register offset, Register scratch, Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ DCHECK(value_off_addr > key_off_addr);
+ DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+ DCHECK(map_off_addr > key_off_addr);
+ DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ ShiftLeftImm(offset_scratch, offset, Operand(1));
+ __ add(offset_scratch, offset, offset_scratch);
+
+ // Calculate the base address of the entry.
+ __ mov(base_addr, Operand(key_offset));
+ __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2));
+ __ add(base_addr, base_addr, scratch2);
+
+ // Check that the key in the entry matches the name.
+ __ LoadP(ip, MemOperand(base_addr, 0));
+ __ cmp(name, ip);
+ __ bne(&miss);
+
+ // Check the map matches.
+ __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(ip, scratch2);
+ __ bne(&miss);
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+
+ DCHECK(!r0.is(flags_reg));
+ __ li(r0, Operand(Code::kFlagsNotUsedInLookup));
+ __ andc(flags_reg, flags_reg, r0);
+ __ mov(r0, Operand(flags));
+ __ cmpl(flags_reg, r0);
+ __ bne(&miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ b(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ b(&miss);
+ }
+#endif
+
+ if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Jump to the first instruction in the code stub.
+ __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mtctr(r0);
+ __ bctr();
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+#if V8_TARGET_ARCH_PPC64
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 24.
+ DCHECK(sizeof(Entry) == 24);
+#else
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ DCHECK(sizeof(Entry) == 12);
+#endif
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+ DCHECK(!extra2.is(receiver));
+ DCHECK(!extra2.is(name));
+ DCHECK(!extra2.is(scratch));
+ DCHECK(!extra2.is(extra));
+
+ // Check scratch, extra and extra2 registers are valid.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ add(scratch, scratch, ip);
+#if V8_TARGET_ARCH_PPC64
+ // Use only the low 32 bits of the map pointer.
+ __ rldicl(scratch, scratch, 0, 32);
+#endif
+ uint32_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift));
+ // Mask down the eor argument to the minimum to keep the immediate
+ // encodable.
+ __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+ // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+ __ andi(scratch, scratch, Operand(mask));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ ShiftRightImm(extra, name, Operand(kCacheIndexShift));
+ __ sub(scratch, scratch, extra);
+ uint32_t mask2 = kSecondaryTableSize - 1;
+ __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+ __ andi(scratch, scratch, Operand(mask2));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 7aee6f16ad..4223b28dda 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -52,10 +52,10 @@ class StubCache {
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
// If leave_frame is true, then exit a frame before the tail call.
- void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
- Register receiver, Register name, Register scratch,
- Register extra, Register extra2 = no_reg,
- Register extra3 = no_reg);
+ void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2 = no_reg, Register extra3 = no_reg);
enum Table { kPrimary, kSecondary };
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index a782b088ed..46fa8cc337 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -15,6 +15,28 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector);
+ __ Push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(slot);
+ __ Pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ addp(rsp, Immediate(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -56,24 +78,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ movp(scratch, Operand(rsi, offset));
- __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
- __ j(not_equal, miss);
-
+ __ movp(result, Operand(rsi, offset));
+ __ movp(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+ __ movp(result, Operand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
+ __ movp(result,
+ FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
}
@@ -324,17 +338,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ Move(this->name(), name);
- __ Move(StoreTransitionDescriptor::MapRegister(), transition);
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ movl(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
+ __ andl(scratch, Immediate(Map::Deprecated::kMask));
+ __ j(not_zero, miss);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ Cmp(value_reg, handle(constant, isolate()));
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ movp(scratch,
+ FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ cmpp(value_reg, scratch);
__ j(not_equal, miss_label);
}
@@ -413,18 +448,13 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
__ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map = in_new_space || depth == 1;
- if (load_prototype_from_map) {
- // Save the map in scratch1 for later.
- __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
+ Register map_reg = scratch1;
+ __ movp(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
+
if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ j(not_equal, miss);
}
// Check access rights to the global object. This has to happen after
@@ -441,11 +471,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
}
reg = holder_reg; // From now on the object will be in holder_reg.
- if (load_prototype_from_map) {
- __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- __ Move(reg, prototype);
- }
+ __ movp(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -457,8 +483,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ j(not_equal, miss);
}
// Perform security check for access to the global object.
@@ -478,6 +506,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -576,6 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
}
__ Push(holder_reg);
__ Push(this->name());
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
@@ -593,6 +626,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ ret(0);
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
__ Pop(this->name());
__ Pop(holder_reg);
if (must_preserve_receiver_reg) {
@@ -625,7 +659,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ PopReturnAddressTo(scratch1());
__ Push(receiver());
@@ -671,11 +705,15 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ Move(result, cell);
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ movp(result, FieldOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -689,6 +727,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ ret(0);
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index a5848b6dda..5be9c465cc 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -42,20 +42,22 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ JumpIfSmi(receiver(), &miss);
- __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
// Check map and tail call if there's a match
- __ Cmp(scratch1(), receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ Move(transition_map(), transitioned_maps->at(i),
- RelocInfo::EMBEDDED_OBJECT);
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
@@ -79,9 +81,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
@@ -109,8 +114,9 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
// Check map and tail call if there's a match
- __ Cmp(map_reg, map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index f125322645..2709a85718 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -119,9 +119,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY))
- << kSmiTagSize;
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
__ Test(Operand(elements, scratch1, times_pointer_size,
kDetailsOffset - kHeapObjectTag),
Smi::FromInt(kTypeAndReadOnlyMask));
@@ -590,8 +589,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, rbx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, rbx, no_reg);
// Cache miss.
__ jmp(&miss);
@@ -763,11 +762,30 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
}
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return rbx; }
-
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
+ !rdi.is(vector));
+
+ __ PopReturnAddressTo(rdi);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(slot);
+ __ Push(vector);
+ __ PushReturnAddressFrom(rdi);
+ } else {
+ DCHECK(!rbx.is(receiver) && !rbx.is(name));
-static const Register KeyedLoadIC_TempRegister() { return rbx; }
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ PushReturnAddressFrom(rbx);
+ }
+}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
@@ -776,25 +794,26 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->load_miss(), 1);
- __ PopReturnAddressTo(LoadIC_TempRegister());
- __ Push(LoadDescriptor::ReceiverRegister()); // receiver
- __ Push(LoadDescriptor::NameRegister()); // name
- __ PushReturnAddressFrom(LoadIC_TempRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is on the stack.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ DCHECK(!rbx.is(receiver) && !rbx.is(name));
- __ PopReturnAddressTo(LoadIC_TempRegister());
- __ Push(LoadDescriptor::ReceiverRegister()); // receiver
- __ Push(LoadDescriptor::NameRegister()); // name
- __ PushReturnAddressFrom(LoadIC_TempRegister());
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -806,25 +825,26 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_miss(), 1);
- __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
- __ Push(LoadDescriptor::ReceiverRegister()); // receiver
- __ Push(LoadDescriptor::NameRegister()); // name
- __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is on the stack.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ DCHECK(!rbx.is(receiver) && !rbx.is(name));
- __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
- __ Push(LoadDescriptor::ReceiverRegister()); // receiver
- __ Push(LoadDescriptor::NameRegister()); // name
- __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -838,7 +858,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), rbx, no_reg);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index 8aff1ea48c..f15635c6b9 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
@@ -82,10 +84,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
@@ -107,6 +110,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(extra2.is(no_reg));
DCHECK(extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch doesn't conflict with
+ // the vector and slot registers, which need to be preserved for a handler
+ // call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
@@ -123,8 +137,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
@@ -136,8 +150,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index ae637d11b7..2ff35954c8 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -47,6 +47,28 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ push(vector);
+ __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ pop(slot);
+ __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ add(esp, Immediate(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -88,28 +110,23 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->native_context()->get(index)));
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(scratch, Operand(esi, offset));
- __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
- __ j(not_equal, miss);
-
+ __ mov(result, Operand(esi, offset));
+ __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+ __ mov(result, Operand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+ __ mov(result,
+ FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
}
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
+ DCHECK(!FLAG_vector_ics);
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(eax, scratch1);
__ ret(0);
@@ -329,17 +346,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Immediate(name));
- __ mov(StoreTransitionDescriptor::MapRegister(), Immediate(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
+ __ and_(scratch, Immediate(Map::Deprecated::kMask));
+ __ j(not_zero, miss);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ CmpObject(value_reg, handle(constant, isolate()));
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ mov(scratch,
+ FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ cmp(value_reg, scratch);
__ j(not_equal, miss_label);
}
@@ -415,14 +453,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map = in_new_space || depth == 1;
+ Register map_reg = scratch1;
+ __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ j(not_equal, miss);
}
// Check access rights to the global object. This has to happen after
@@ -432,24 +468,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ __ CheckAccessGlobalProxy(reg, map_reg, scratch2, miss);
+ // Restore map_reg.
+ __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
} else if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
}
-
- if (load_prototype_from_map) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
-
reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (load_prototype_from_map) {
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- __ mov(reg, prototype);
- }
+ __ mov(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -462,7 +489,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ j(not_equal, miss);
}
// Perform security check for access to the global object.
@@ -482,6 +512,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -581,7 +615,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
}
__ push(holder_reg);
__ push(this->name());
-
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
@@ -605,6 +639,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
}
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
@@ -637,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -683,16 +718,15 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
-
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- if (masm()->serializer_enabled()) {
- __ mov(result, Immediate(cell));
- __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
- } else {
- __ mov(result, Operand::ForCell(cell));
- }
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
+ __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
@@ -706,6 +740,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
// The code above already loads the result into the return register.
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ ret(0);
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 20b47e726e..89bd937480 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -44,10 +44,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Label miss;
if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
@@ -75,7 +78,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ cmp(map_reg, map);
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
@@ -100,15 +104,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
MapHandleList* transitioned_maps) {
Label miss;
__ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(scratch1(), receiver_maps->at(i));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ j(equal, handler_stubs->at(i));
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 959b8b628e..1004ac036b 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -694,8 +694,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, ebx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
// Cache miss.
__ jmp(&miss);
@@ -769,31 +769,52 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
+ !edi.is(vector));
+
+ __ pop(edi);
+ __ push(receiver);
+ __ push(name);
+ __ push(slot);
+ __ push(vector);
+ __ push(edi);
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
+ }
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
__ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
- LoadIC_PushArgs(masm);
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -809,13 +830,21 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
- LoadIC_PushArgs(masm);
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -827,7 +856,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index 0291ef3d82..be456ce95c 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register name, Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
@@ -56,6 +58,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ __ pop(VectorLoadICDescriptor::SlotRegister());
+ }
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
@@ -100,6 +109,18 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!offset.is(vector) && !offset.is(slot));
+
+ __ pop(vector);
+ __ pop(slot);
+ }
+
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
@@ -113,10 +134,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
@@ -159,8 +181,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
+ receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
@@ -172,8 +194,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
+ receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.