summaryrefslogtreecommitdiff
path: root/deps/v8/src/ic
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2015-11-30 21:22:40 -0800
committerAli Ijaz Sheikh <ofrobots@google.com>2015-12-04 00:06:01 -0800
commit8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02 (patch)
tree8698af91526d0eac90840dcba1e5b565160105c4 /deps/v8/src/ic
parent8a2acd4cc9807510786b4b6f7ad3a947aeb3a14c (diff)
downloadandroid-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.tar.gz
android-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.tar.bz2
android-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.zip
deps: upgrade V8 to 4.7.80.24
Pick up the latest branch head for V8 4.7: https://github.com/v8/v8/commit/be169f8df059040e6a53ec1dd4579d8bca2167b5 Full change history for the 4.7 branch: https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.7 V8 blog post about what is new on V8 4.7: http://v8project.blogspot.de/2015/10/v8-release-47.html PR-URL: https://github.com/nodejs/node/pull/4106 Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com> Reviewed-By: rvagg - Rod Vagg <rod@vagg.org>
Diffstat (limited to 'deps/v8/src/ic')
-rw-r--r--deps/v8/src/ic/access-compiler.cc20
-rw-r--r--deps/v8/src/ic/access-compiler.h9
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc2
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc28
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc24
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc5
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc10
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc2
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc29
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc16
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc5
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc10
-rw-r--r--deps/v8/src/ic/handler-compiler.cc43
-rw-r--r--deps/v8/src/ic/handler-compiler.h1
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc3
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc42
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc5
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc26
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc50
-rw-r--r--deps/v8/src/ic/ic-compiler.cc109
-rw-r--r--deps/v8/src/ic/ic-compiler.h15
-rw-r--r--deps/v8/src/ic/ic-inl.h1
-rw-r--r--deps/v8/src/ic/ic-state.cc20
-rw-r--r--deps/v8/src/ic/ic-state.h2
-rw-r--r--deps/v8/src/ic/ic.cc515
-rw-r--r--deps/v8/src/ic/ic.h32
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc2
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc28
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc5
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc14
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc10
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc2
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc30
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc5
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc23
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc10
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc2
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc28
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc5
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc14
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc10
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc3
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc32
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc5
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc14
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc13
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc3
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc42
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc5
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc26
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc50
51 files changed, 912 insertions, 493 deletions
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index 0dc9ab6e8d..951966e7de 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -49,5 +49,25 @@ Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
return store_calling_convention();
}
+
+
+Register PropertyAccessCompiler::slot() const {
+ if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+ return LoadDescriptor::SlotRegister();
+ }
+ DCHECK(FLAG_vector_stores &&
+ (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ return VectorStoreICDescriptor::SlotRegister();
+}
+
+
+Register PropertyAccessCompiler::vector() const {
+ if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+ return LoadWithVectorDescriptor::VectorRegister();
+ }
+ DCHECK(FLAG_vector_stores &&
+ (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ return VectorStoreICDescriptor::VectorRegister();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index 61567a2224..a5beb714f8 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -54,15 +54,12 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register receiver() const { return registers_[0]; }
Register name() const { return registers_[1]; }
- Register slot() const { return LoadDescriptor::SlotRegister(); }
- Register vector() const { return LoadWithVectorDescriptor::VectorRegister(); }
+ Register slot() const;
+ Register vector() const;
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
Register scratch3() const { return registers_[4]; }
- // Calling convention between indexed store IC and handler.
- Register transition_map() const { return scratch1(); }
-
static Register* GetCallingConvention(Code::Kind);
static Register* load_calling_convention();
static Register* store_calling_convention();
@@ -81,6 +78,8 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Isolate* isolate_;
MacroAssembler masm_;
+ // Ensure that MacroAssembler has a reasonable size.
+ STATIC_ASSERT(sizeof(MacroAssembler) < 128 * kPointerSize);
};
}
} // namespace v8::internal
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 3b0c0c26c7..62f554792f 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(r3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || r3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r3, r4, r5};
return registers;
}
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 6f4ddcf98a..e2585fe222 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -305,25 +306,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -566,6 +577,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index a805f4ccee..de219ae72f 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -692,12 +692,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
+ // We use register r8 when FLAG_vector_stores is enabled, because otherwise
+ // probing the megamorphic stub cache would require pushing temporaries on
+ // the stack.
+ // TODO(mvstanton): quit using register r8 when
+ // FLAG_enable_embedded_constant_pool is turned on.
+ DCHECK(!FLAG_vector_stores || !FLAG_enable_embedded_constant_pool);
+ Register temporary2 = FLAG_vector_stores ? r8 : r4;
if (FLAG_vector_stores) {
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r3, r4, r5, r6));
+
+ DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -708,8 +716,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, receiver, key, r5, temporary2, r6, r9);
// Cache miss.
__ b(&miss);
@@ -792,20 +800,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r3;
+ Register dictionary = r5;
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
DCHECK(value.is(r0));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(r3));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(r4));
__ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, r6, r9);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r6, r9);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index ff2bcf05b1..9b8abd3298 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -111,7 +111,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ b(ne, &next_map);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
index cdd04faf38..86710eb29a 100644
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -120,8 +120,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 14b0fa7f16..13b0887a82 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -38,7 +38,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, value, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(x3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || x3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, x3, x4, x5};
return registers;
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 71c70da7a4..10ea1d72ff 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -298,27 +299,36 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, name and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow");
-
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -617,6 +627,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ B(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ Bind(&success);
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 27c4f71431..c4c856aab7 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -696,7 +696,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, x3, x4, x5, x6));
+ DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -708,7 +708,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, x3, x4, x5, x6);
+ receiver, key, x5, x6, x7, x8);
// Cache miss.
__ B(&miss);
@@ -789,19 +789,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register value = StoreDescriptor::ValueRegister();
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- Register dictionary = x3;
- DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
+ Register dictionary = x5;
+ DCHECK(!AreAliased(value, receiver, name,
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister(), x5, x6, x7));
__ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, x6, x7);
__ Ret();
// Cache miss: Jump to runtime.
__ Bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x6, x7);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index a86b5e53b5..b4a4163fed 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -116,7 +116,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
// This argument is used by the handler stub. For example, see
// ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
}
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ Bind(&skip);
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
index ecd7fe1534..eb82f2af86 100644
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -111,8 +111,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 98b30aa54d..77e0fb5e43 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -4,10 +4,11 @@
#include "src/ic/handler-compiler.h"
-#include "src/cpu-profiler.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/ic/ic-inl.h"
+#include "src/isolate-inl.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -329,7 +330,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
PrototypeIterator iter(isolate(), last);
while (!iter.IsAtEnd()) {
lost_holder_register = true;
- last = JSObject::cast(iter.GetCurrent());
+ last = iter.GetCurrent<JSObject>();
iter.Advance();
}
auto last_handle = handle(last);
@@ -423,6 +424,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss;
+ if (FLAG_vector_stores) PushVectorAndSlot();
+
// Check that we are allowed to write this.
bool is_nonexistent = holder()->map() == transition->GetBackPointer();
if (is_nonexistent) {
@@ -433,7 +436,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
: PrototypeIterator::END_AT_NULL;
PrototypeIterator iter(isolate(), holder());
while (!iter.IsAtEnd(end)) {
- last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ last = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
}
if (!last.is_null()) set_holder(last);
@@ -453,16 +456,19 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
- Register transition_map_reg = StoreTransitionDescriptor::MapRegister();
- bool push_map_on_stack = transition_map_reg.is(no_reg);
- Register map_reg = push_map_on_stack ? scratch1() : transition_map_reg;
+ Register transition_map_reg = StoreTransitionHelper::MapRegister();
+ bool stack_args = StoreTransitionHelper::UsesStackArgs();
+ Register map_reg = stack_args ? scratch1() : transition_map_reg;
if (details.type() == DATA_CONSTANT) {
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
- if (push_map_on_stack) {
+ if (stack_args) {
+ // Also pushes vector and slot.
GeneratePushMap(map_reg, scratch2());
+ } else if (FLAG_vector_stores) {
+ PopVectorAndSlot();
}
GenerateRestoreName(name);
StoreTransitionStub stub(isolate());
@@ -479,8 +485,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
: StoreTransitionStub::StoreMapAndValue;
GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
- if (push_map_on_stack) {
+ if (stack_args) {
+ // Also pushes vector and slot.
GeneratePushMap(map_reg, scratch2());
+ } else if (FLAG_vector_stores) {
+ PopVectorAndSlot();
}
GenerateRestoreName(name);
StoreTransitionStub stub(isolate(),
@@ -490,21 +499,37 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
}
GenerateRestoreName(&miss, name);
+ if (FLAG_vector_stores) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, name);
}
+bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
+ HeapType* field_type) const {
+ return !field_type->Classes().Done();
+}
+
+
Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
Label miss;
DCHECK(it->representation().IsHeapObject());
- GenerateFieldTypeChecks(*it->GetFieldType(), value(), &miss);
+ HeapType* field_type = *it->GetFieldType();
+ bool need_save_restore = false;
+ if (RequiresFieldTypeChecks(field_type)) {
+ need_save_restore = IC::ICUseVector(kind());
+ if (need_save_restore) PushVectorAndSlot();
+ GenerateFieldTypeChecks(field_type, value(), &miss);
+ if (need_save_restore) PopVectorAndSlot();
+ }
+
StoreFieldStub stub(isolate(), it->GetFieldIndex(), it->representation());
GenerateTailCall(masm(), stub.GetCode());
__ bind(&miss);
+ if (need_save_restore) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, it->name());
}
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 05c973a625..f5dafe9038 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -262,6 +262,7 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
Register value_reg, Register scratch,
Label* miss_label);
+ bool RequiresFieldTypeChecks(HeapType* field_type) const;
void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
Label* miss_label);
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 81579e5dc3..acb3526d9d 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -30,7 +30,8 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores ||
+ ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index c45821fe8b..1d019092c7 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -303,13 +304,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+ }
}
@@ -318,7 +330,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
@@ -327,7 +339,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -351,10 +364,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
Register scratch) {
- // Get the return address, push the argument and then continue.
- __ pop(scratch);
+ // current after GeneratePushMap
+ // -------------------------------------------------
+ // ret addr slot
+ // vector vector
+ // sp -> slot map
+ // sp -> ret addr
+ //
+ __ xchg(map_reg, Operand(esp, 0));
+ __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
__ push(map_reg);
- __ push(scratch);
}
@@ -574,6 +593,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index a1e2cbcefe..d0a2e0bd54 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index d683264e13..7a6a41541c 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -577,7 +577,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, ebx, no_reg);
+ receiver, key, edi, no_reg);
if (FLAG_vector_stores) {
__ pop(VectorStoreICDescriptor::VectorRegister());
@@ -734,6 +734,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ // This shouldn't be called.
+ __ int3();
+ return;
+ }
+
// Return address is on the stack.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -787,22 +793,32 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = ebx;
-
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
+ if (FLAG_vector_stores) {
+ __ push(vector);
+ __ push(slot);
+ }
+
+ Register dictionary = ebx;
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(1);
+ __ Drop(FLAG_vector_stores ? 3 : 1);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
+ if (FLAG_vector_stores) {
+ __ pop(slot);
+ __ pop(vector);
+ }
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index 68b30e7bdb..7366ebe15f 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -23,8 +23,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
Label miss;
+ bool is_vector_store =
+ IC::ICUseVector(ic_kind) &&
+ (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
@@ -56,19 +61,29 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (IC::ICUseVector(ic_kind)) {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ if (is_vector_store) {
+ // The overlap here is rather embarrassing. One does what one must.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ pop(vector);
+ __ mov(Operand::StaticVariable(virtual_register), extra);
+ __ pop(extra); // Pop "slot".
+ // Jump to the first instruction in the code stub.
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
}
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
__ bind(&miss);
} else {
+ DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
+
// Save the offset on the stack.
__ push(offset);
@@ -105,21 +120,22 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- if (IC::ICUseVector(ic_kind)) {
+ // Jump to the first instruction in the code stub.
+ if (is_vector_store) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!offset.is(vector) && !offset.is(slot));
-
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(Operand::StaticVariable(virtual_register), offset);
__ pop(vector);
- __ pop(slot);
+ __ pop(offset); // Pop "slot".
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
}
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
// Pop at miss.
__ bind(&miss);
__ pop(offset);
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index d7b95dada2..20e4fedc23 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -4,9 +4,9 @@
#include "src/ic/ic-compiler.h"
-#include "src/cpu-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic-inl.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -119,6 +119,25 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
}
+Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ Handle<Map> receiver_map, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
+
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ Handle<Code> code =
+ compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
+ return code;
+}
+
+
Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode) {
@@ -165,7 +184,7 @@ Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
static void FillCache(Isolate* isolate, Handle<Code> code) {
Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
isolate->factory()->non_monomorphic_cache(), code->flags(), code);
- isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
+ isolate->heap()->SetRootNonMonomorphicCache(*dictionary);
}
@@ -220,31 +239,6 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
}
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
- MapHandleList* receiver_maps, LanguageMode language_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
- Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CodeHandleList handlers(receiver_maps->length());
- ElementHandlerCompiler compiler(isolate);
- compiler.CompileElementHandlers(receiver_maps, &handlers, language_mode);
- PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code = ic_compiler.CompilePolymorphic(
- receiver_maps, &handlers, isolate->factory()->empty_string(),
- Code::NORMAL, ELEMENT);
-
- isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
-
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::ComputePolymorphic(
Code::Kind kind, MapHandleList* maps, CodeHandleList* handlers,
int valid_maps, Handle<Name> name, ExtraICState extra_ic_state) {
@@ -256,6 +250,23 @@ Handle<Code> PropertyICCompiler::ComputePolymorphic(
}
+void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
+ LanguageMode language_mode) {
+ Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ compiler.CompileKeyedStorePolymorphicHandlers(
+ receiver_maps, transitioned_maps, handlers, store_mode);
+}
+
+
Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
LanguageMode language_mode) {
@@ -338,11 +349,9 @@ Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
+void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map(receiver_maps->at(i));
Handle<Code> cached_stub;
@@ -362,22 +371,36 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
transitioned_map->elements_kind(),
is_js_array, store_mode).GetCode();
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ // TODO(mvstanton): Consider embedding store_mode in the state of the slow
+ // keyed store ic for uniformity.
cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
if (IsSloppyArgumentsElements(elements_kind)) {
- cached_stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
+ cached_stub =
+ KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
cached_stub = StoreFastElementStub(isolate(), is_js_array,
elements_kind, store_mode).GetCode();
} else {
- cached_stub = StoreElementStub(isolate(), elements_kind).GetCode();
+ cached_stub =
+ StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
}
}
DCHECK(!cached_stub.is_null());
- handlers.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
+ handlers->Add(cached_stub);
+ transitioned_maps->Add(transitioned_map);
}
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
+ // Collect MONOMORPHIC stubs for all |receiver_maps|.
+ CodeHandleList handlers(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ CompileKeyedStorePolymorphicHandlers(receiver_maps, &transitioned_maps,
+ &handlers, store_mode);
Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
&transitioned_maps);
@@ -390,20 +413,28 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
if (receiver_map->has_sloppy_arguments_elements()) {
- stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
+ stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
store_mode).GetCode();
} else {
- stub = StoreElementStub(isolate(), elements_kind).GetCode();
+ stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
}
+ return stub;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+ Handle<Code> stub =
+ CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index b5226e9a6e..ee6597d59d 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -34,11 +34,16 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
Handle<Map> receiver_map, ExtraICState extra_ic_state);
+ static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
+ Handle<Map> receiver_map, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
static Handle<Code> ComputeKeyedStoreMonomorphic(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps,
- LanguageMode language_mode);
+ static void ComputeKeyedStorePolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
+ LanguageMode language_mode);
static Handle<Code> ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
LanguageMode language_mode);
@@ -78,10 +83,16 @@ class PropertyICCompiler : public PropertyAccessCompiler {
Handle<Name> name, Code::StubType type,
IcCheckType check);
+ Handle<Code> CompileKeyedStoreMonomorphicHandler(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode);
+ void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers,
+ KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index bce3c1206d..646b73d641 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -60,6 +60,7 @@ void IC::SetTargetAtAddress(Address address, Code* target,
DCHECK(!target->is_inline_cache_stub() ||
(target->kind() != Code::LOAD_IC &&
target->kind() != Code::KEYED_LOAD_IC &&
+ target->kind() != Code::CALL_IC &&
(!FLAG_vector_stores || (target->kind() != Code::STORE_IC &&
target->kind() != Code::KEYED_STORE_IC))));
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index fc33c80487..bc03d7d487 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -358,6 +358,8 @@ const char* CompareICState::GetStateName(State state) {
switch (state) {
case UNINITIALIZED:
return "UNINITIALIZED";
+ case BOOLEAN:
+ return "BOOLEAN";
case SMI:
return "SMI";
case NUMBER:
@@ -384,6 +386,8 @@ Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
switch (state) {
case UNINITIALIZED:
return Type::None(zone);
+ case BOOLEAN:
+ return Type::Boolean(zone);
case SMI:
return Type::SignedSmall(zone);
case NUMBER:
@@ -410,6 +414,7 @@ CompareICState::State CompareICState::NewInputState(State old_state,
Handle<Object> value) {
switch (old_state) {
case UNINITIALIZED:
+ if (value->IsBoolean()) return BOOLEAN;
if (value->IsSmi()) return SMI;
if (value->IsHeapNumber()) return NUMBER;
if (value->IsInternalizedString()) return INTERNALIZED_STRING;
@@ -417,6 +422,9 @@ CompareICState::State CompareICState::NewInputState(State old_state,
if (value->IsSymbol()) return UNIQUE_NAME;
if (value->IsJSObject()) return OBJECT;
break;
+ case BOOLEAN:
+ if (value->IsBoolean()) return BOOLEAN;
+ break;
case SMI:
if (value->IsSmi()) return SMI;
if (value->IsHeapNumber()) return NUMBER;
@@ -454,6 +462,7 @@ CompareICState::State CompareICState::TargetState(
bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) {
switch (old_state) {
case UNINITIALIZED:
+ if (x->IsBoolean() && y->IsBoolean()) return BOOLEAN;
if (x->IsSmi() && y->IsSmi()) return SMI;
if (x->IsNumber() && y->IsNumber()) return NUMBER;
if (Token::IsOrderedRelationalCompareOp(op)) {
@@ -470,16 +479,16 @@ CompareICState::State CompareICState::TargetState(
return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING;
}
if (x->IsString() && y->IsString()) return STRING;
- if (!Token::IsEqualityOp(op)) return GENERIC;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
Handle<JSObject>::cast(y)->map()) {
return KNOWN_OBJECT;
} else {
- return OBJECT;
+ return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
}
}
+ if (!Token::IsEqualityOp(op)) return GENERIC;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
return GENERIC;
case SMI:
return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
@@ -496,11 +505,11 @@ CompareICState::State CompareICState::TargetState(
if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
return GENERIC;
case KNOWN_OBJECT:
- DCHECK(Token::IsEqualityOp(op));
if (x->IsJSObject() && y->IsJSObject()) {
- return OBJECT;
+ return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
}
return GENERIC;
+ case BOOLEAN:
case STRING:
case UNIQUE_NAME:
case OBJECT:
@@ -510,5 +519,6 @@ CompareICState::State CompareICState::TargetState(
UNREACHABLE();
return GENERIC; // Make the compiler happy.
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 0b4b9cdc99..b529b8c54d 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -174,9 +174,11 @@ class CompareICState {
// ... < GENERIC
// SMI < NUMBER
// INTERNALIZED_STRING < STRING
+ // INTERNALIZED_STRING < UNIQUE_NAME
// KNOWN_OBJECT < OBJECT
enum State {
UNINITIALIZED,
+ BOOLEAN,
SMI,
NUMBER,
STRING,
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 4ed85d5842..f0d571bed6 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -17,6 +17,7 @@
#include "src/ic/ic-inl.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
+#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
@@ -122,8 +123,11 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier = "";
if (new_target->kind() == Code::KEYED_STORE_IC) {
- modifier = GetTransitionMarkModifier(
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
+ KeyedAccessStoreMode mode =
+ FLAG_vector_stores
+ ? casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode()
+ : KeyedStoreIC::GetKeyedAccessStoreMode(extra_state);
+ modifier = GetTransitionMarkModifier(mode);
}
PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state), modifier);
@@ -271,11 +275,8 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
if (first_map == NULL) return false;
Handle<Map> old_map(first_map);
if (old_map->is_deprecated()) return true;
- if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
- receiver_map()->elements_kind())) {
- return true;
- }
- return false;
+ return IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
+ receiver_map()->elements_kind());
}
CacheHolderFlag flag;
@@ -665,6 +666,20 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
}
+void IC::ConfigureVectorState(MapHandleList* maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers) {
+ DCHECK(UseVector());
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+ nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
+
+ vector_set_ = true;
+ OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+ POLYMORPHIC);
+}
+
+
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -966,7 +981,7 @@ static Handle<Code> KeyedStoreICInitializeStubHelper(
Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state) {
- if (FLAG_vector_stores) {
+ if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -988,6 +1003,13 @@ Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
}
+Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
+ ExtraICState extra_state) {
+ LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
+ return KeyedStoreICInitializeStubHelper(isolate, mode, MEGAMORPHIC);
+}
+
+
Handle<Code> LoadIC::megamorphic_stub() {
DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
return KeyedLoadIC::ChooseMegamorphicStub(isolate(), extra_ic_state());
@@ -1137,7 +1159,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
// Use specialized code for getting prototype of functions.
if (receiver->IsJSFunction() &&
Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
- Handle<JSFunction>::cast(receiver)->should_have_prototype() &&
+ receiver->IsConstructor() &&
!Handle<JSFunction>::cast(receiver)
->map()
->has_non_instance_prototype()) {
@@ -1851,8 +1873,9 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
}
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
+Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode) {
+ Handle<Code> null_handle;
// Don't handle megamorphic property accesses for INTERCEPTORS or
// ACCESSOR_CONSTANT
// via megamorphic stubs, since they don't have a map in their relocation info
@@ -1862,13 +1885,19 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
return megamorphic_stub();
}
- Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
+ if (FLAG_vector_stores) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ monomorphic_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
monomorphic_map, language_mode(), store_mode);
}
@@ -1878,7 +1907,9 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
KeyedAccessStoreMode old_store_mode =
- KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ FLAG_vector_stores
+ ? GetKeyedAccessStoreMode()
+ : KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1894,9 +1925,17 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
+ if (FLAG_vector_stores) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ transitioned_receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
+ handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
transitioned_receiver_map, language_mode(), store_mode);
- } else if (*previous_receiver_map == receiver->map() &&
+ } else if (receiver_map.is_identical_to(previous_receiver_map) &&
old_store_mode == STANDARD_STORE &&
(store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
@@ -1904,6 +1943,13 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
+ if (FLAG_vector_stores) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
receiver_map, language_mode(), store_mode);
}
@@ -1964,6 +2010,16 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
}
}
+ if (FLAG_vector_stores) {
+ MapHandleList transitioned_maps(target_receiver_maps.length());
+ CodeHandleList handlers(target_receiver_maps.length());
+ PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
+ &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
+ language_mode());
+ ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
+ return null_handle;
+ }
+
return PropertyICCompiler::ComputeKeyedStorePolymorphic(
&target_receiver_maps, store_mode, language_mode());
}
@@ -1972,22 +2028,20 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
Handle<Map> map, KeyedAccessStoreMode store_mode) {
switch (store_mode) {
- case STORE_TRANSITION_SMI_TO_OBJECT:
- case STORE_TRANSITION_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
- return Map::TransitionElementsTo(map, FAST_ELEMENTS);
- case STORE_TRANSITION_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
- return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
+ case STORE_TRANSITION_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_TO_OBJECT: {
+ ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ return Map::TransitionElementsTo(map, kind);
+ }
+ case STORE_TRANSITION_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_TO_DOUBLE: {
+ ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+ return Map::TransitionElementsTo(map, kind);
+ }
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
DCHECK(map->has_fixed_typed_array_elements());
// Fall through
@@ -2023,26 +2077,14 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
- }
+ return STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
- }
+ return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
- }
+ return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
}
return STORE_AND_GROW_NO_TRANSITION;
@@ -2050,25 +2092,13 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
// Handle only in-bounds elements accesses.
if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_TRANSITION_SMI_TO_DOUBLE;
- }
+ return STORE_TRANSITION_TO_DOUBLE;
} else if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_TRANSITION_SMI_TO_OBJECT;
- }
+ return STORE_TRANSITION_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
+ return STORE_TRANSITION_TO_OBJECT;
}
}
if (!FLAG_trace_external_array_abuse &&
@@ -2085,6 +2115,44 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
}
+void KeyedStoreIC::ValidateStoreMode(Handle<Code> stub) {
+#ifdef DEBUG
+ DCHECK(!FLAG_vector_stores);
+ if (stub.is_null() || *stub == *megamorphic_stub() || *stub == *slow_stub()) {
+ return;
+ }
+
+ // Query the keyed store mode.
+ ExtraICState state = stub->extra_ic_state();
+ KeyedAccessStoreMode stub_mode = GetKeyedAccessStoreMode(state);
+
+ MapHandleList map_list;
+ stub->FindAllMaps(&map_list);
+ CodeHandleList list;
+ stub->FindHandlers(&list, map_list.length());
+ for (int i = 0; i < list.length(); i++) {
+ Handle<Code> handler = list.at(i);
+ CHECK(handler->is_handler());
+ CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
+ // Ensure that we only see handlers we know have the store mode embedded.
+ CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
+ major_key == CodeStub::StoreFastElement ||
+ major_key == CodeStub::StoreElement ||
+ major_key == CodeStub::ElementsTransitionAndStore ||
+ *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
+ // Ensure that the store mode matches that of the IC.
+ CHECK(major_key == CodeStub::NoCache ||
+ stub_mode == CommonStoreModeBits::decode(minor_key));
+ // The one exception is the keyed store slow builtin, which doesn't include
+ // store mode.
+ CHECK(major_key != CodeStub::NoCache ||
+ *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
+ }
+#endif // DEBUG
+}
+
+
MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value) {
@@ -2148,23 +2216,47 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
+ Handle<Map> old_receiver_map;
+ bool sloppy_arguments_elements = false;
+ bool key_is_valid_index = false;
+ KeyedAccessStoreMode store_mode = STANDARD_STORE;
+ if (use_ic && object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ old_receiver_map = handle(receiver->map(), isolate());
+ sloppy_arguments_elements =
+ !is_sloppy(language_mode()) &&
+ receiver->elements()->map() ==
+ isolate()->heap()->sloppy_arguments_elements_map();
+ if (!sloppy_arguments_elements) {
+ key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
+ if (key_is_valid_index) {
+ uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
+ store_mode = GetStoreMode(receiver, index, value);
+ }
+ }
+ }
+
+ DCHECK(store_handle.is_null());
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), store_handle,
+ Runtime::SetObjectProperty(isolate(), object, key,
+ value, language_mode()),
+ Object);
+
if (use_ic) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map() &&
- !is_sloppy(language_mode())) {
+ if (!old_receiver_map.is_null()) {
+ if (sloppy_arguments_elements) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
- } else if (key->IsSmi() && Smi::cast(*key)->value() >= 0) {
- uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
+ } else if (key_is_valid_index) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
// from fast path keyed stores.
- if (!receiver->map()->DictionaryElementsInPrototypeChainOnly()) {
- KeyedAccessStoreMode store_mode =
- GetStoreMode(receiver, index, value);
- stub = StoreElementStub(receiver, store_mode);
+ if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
+ stub = StoreElementStub(old_receiver_map, store_mode);
+
+ // Validate that the store_mode in the stub can also be derived
+ // from peeking in the code bits of the handlers.
+ if (!FLAG_vector_stores) ValidateStoreMode(stub);
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
}
@@ -2176,14 +2268,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
- if (store_handle.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), store_handle,
- Runtime::SetObjectProperty(isolate(), object, key, value,
- language_mode()),
- Object);
- }
-
if (FLAG_vector_stores) {
if (!is_vector_set() || stub.is_null()) {
Code* megamorphic = *megamorphic_stub();
@@ -2213,73 +2297,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
-bool CallIC::DoCustomHandler(Handle<Object> function,
- const CallICState& callic_state) {
- DCHECK(FLAG_use_ic && function->IsJSFunction());
-
- // Are we the array function?
- Handle<JSFunction> array_function =
- Handle<JSFunction>(isolate()->native_context()->array_function());
- if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
- // Alter the slot.
- CallICNexus* nexus = casted_nexus<CallICNexus>();
- nexus->ConfigureMonomorphicArray();
-
- // Vector-based ICs have a different calling convention in optimized code
- // than full code so the correct stub has to be chosen.
- if (AddressIsOptimizedCode()) {
- CallIC_ArrayStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- } else {
- CallIC_ArrayTrampolineStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- }
-
- Handle<String> name;
- if (array_function->shared()->name()->IsString()) {
- name = Handle<String>(String::cast(array_function->shared()->name()),
- isolate());
- }
- TRACE_IC("CallIC", name);
- OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
- MONOMORPHIC);
- return true;
- }
- return false;
-}
-
-
-void CallIC::PatchMegamorphic(Handle<Object> function) {
- CallICState callic_state(target()->extra_ic_state());
-
- // We are going generic.
- CallICNexus* nexus = casted_nexus<CallICNexus>();
- nexus->ConfigureMegamorphic();
-
- // Vector-based ICs have a different calling convention in optimized code
- // than full code so the correct stub has to be chosen.
- if (AddressIsOptimizedCode()) {
- CallICStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- } else {
- CallICTrampolineStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- }
-
- Handle<Object> name = isolate()->factory()->empty_string();
- if (function->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- name = handle(js_function->shared()->name(), isolate());
- }
-
- TRACE_IC("CallIC", name);
- OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
- GENERIC);
-}
-
-
void CallIC::HandleMiss(Handle<Object> function) {
- CallICState callic_state(target()->extra_ic_state());
Handle<Object> name = isolate()->factory()->empty_string();
CallICNexus* nexus = casted_nexus<CallICNexus>();
Object* feedback = nexus->GetFeedback();
@@ -2287,25 +2305,22 @@ void CallIC::HandleMiss(Handle<Object> function) {
// Hand-coded MISS handling is easier if CallIC slots don't contain smis.
DCHECK(!feedback->IsSmi());
- if (feedback->IsWeakCell() || !function->IsJSFunction()) {
+ if (feedback->IsWeakCell() || !function->IsJSFunction() ||
+ feedback->IsAllocationSite()) {
// We are going generic.
nexus->ConfigureMegamorphic();
} else {
- // The feedback is either uninitialized or an allocation site.
- // It might be an allocation site because if we re-compile the full code
- // to add deoptimization support, we call with the default call-ic, and
- // merely need to patch the target to match the feedback.
- // TODO(mvstanton): the better approach is to dispense with patching
- // altogether, which is in progress.
- DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()) ||
- feedback->IsAllocationSite());
-
- // Do we want to install a custom handler?
- if (FLAG_use_ic && DoCustomHandler(function, callic_state)) {
- return;
- }
+ DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()));
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- nexus->ConfigureMonomorphic(Handle<JSFunction>::cast(function));
+ Handle<JSFunction> array_function =
+ Handle<JSFunction>(isolate()->native_context()->array_function());
+ if (array_function.is_identical_to(js_function)) {
+ // Alter the slot.
+ nexus->ConfigureMonomorphicArray();
+ } else {
+ nexus->ConfigureMonomorphic(js_function);
+ }
}
if (function->IsJSFunction()) {
@@ -2342,22 +2357,6 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
}
-RUNTIME_FUNCTION(Runtime_CallIC_Customization_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- Handle<Object> function = args.at<Object>(0);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
- Handle<Smi> slot = args.at<Smi>(2);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- CallICNexus nexus(vector, vector_slot);
- // A miss on a custom call ic always results in going megamorphic.
- CallIC ic(isolate, &nexus);
- ic.PatchMegamorphic(function);
- return *function;
-}
-
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
@@ -2373,13 +2372,14 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
+ vector->GetKind(vector_slot));
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2439,18 +2439,19 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<Object> result;
if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
+ DCHECK(args.length() == 5 || args.length() == 6);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- if (vector->GetKind(vector_slot) == Code::STORE_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2481,14 +2482,15 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- if (vector->GetKind(vector_slot) == Code::STORE_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2567,12 +2569,19 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- LanguageMode language_mode = ic.language_mode();
+ LanguageMode language_mode;
+ if (FLAG_vector_stores) {
+ StoreICNexus nexus(isolate);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
+ } else {
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ language_mode = ic.language_mode();
+ }
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2583,12 +2592,19 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- LanguageMode language_mode = ic.language_mode();
+ LanguageMode language_mode;
+ if (FLAG_vector_stores) {
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
+ } else {
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ language_mode = ic.language_mode();
+ }
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2600,14 +2616,20 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ DCHECK(args.length() == (FLAG_vector_stores ? 6 : 4));
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Map> map = args.at<Map>(3);
-
- LanguageMode language_mode = ic.language_mode();
+ Handle<Map> map = args.at<Map>(FLAG_vector_stores ? 5 : 3);
+ LanguageMode language_mode;
+ if (FLAG_vector_stores) {
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
+ } else {
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ language_mode = ic.language_mode();
+ }
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2626,13 +2648,67 @@ MaybeHandle<Object> BinaryOpIC::Transition(
BinaryOpICState state(isolate(), target()->extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
- Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
- TokenToJSBuiltin(state.op(), state.strength()));
- Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Execution::Call(isolate(), function, left, 1, &right),
- Object);
+ switch (state.op()) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Add(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::SUB:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Subtract(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::MUL:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Multiply(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::DIV:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Divide(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::MOD:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Modulus(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::BIT_OR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::BitwiseOr(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::BIT_AND:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::BitwiseAnd(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::BIT_XOR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::BitwiseXor(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::SAR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::ShiftRight(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::SHR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::ShiftRightLogical(isolate(), left, right, state.strength()),
+ Object);
+ break;
+ case Token::SHL:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::ShiftLeft(isolate(), left, right, state.strength()), Object);
+ break;
+ }
// Do not try to update the target if the code was marked for lazy
// deoptimization. (Since we do not relocate addresses in these
@@ -2866,42 +2942,6 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
}
-Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op,
- Strength strength) {
- if (is_strong(strength)) {
- switch (op) {
- default: UNREACHABLE();
- case Token::ADD: return Builtins::ADD_STRONG;
- case Token::SUB: return Builtins::SUB_STRONG;
- case Token::MUL: return Builtins::MUL_STRONG;
- case Token::DIV: return Builtins::DIV_STRONG;
- case Token::MOD: return Builtins::MOD_STRONG;
- case Token::BIT_OR: return Builtins::BIT_OR_STRONG;
- case Token::BIT_AND: return Builtins::BIT_AND_STRONG;
- case Token::BIT_XOR: return Builtins::BIT_XOR_STRONG;
- case Token::SAR: return Builtins::SAR_STRONG;
- case Token::SHR: return Builtins::SHR_STRONG;
- case Token::SHL: return Builtins::SHL_STRONG;
- }
- } else {
- switch (op) {
- default: UNREACHABLE();
- case Token::ADD: return Builtins::ADD;
- case Token::SUB: return Builtins::SUB;
- case Token::MUL: return Builtins::MUL;
- case Token::DIV: return Builtins::DIV;
- case Token::MOD: return Builtins::MOD;
- case Token::BIT_OR: return Builtins::BIT_OR;
- case Token::BIT_AND: return Builtins::BIT_AND;
- case Token::BIT_XOR: return Builtins::BIT_XOR;
- case Token::SAR: return Builtins::SAR;
- case Token::SHR: return Builtins::SHR;
- case Token::SHL: return Builtins::SHL;
- }
- }
-}
-
-
Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
ToBooleanStub stub(isolate(), target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
@@ -3073,13 +3113,14 @@ RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
+ vector->GetKind(vector_slot));
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index ee5fd261dc..d65d7a8c1b 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -122,6 +122,11 @@ class IC {
// Configure the vector for POLYMORPHIC.
void ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
CodeHandleList* handlers);
+ // Configure the vector for POLYMORPHIC with transitions (only for element
+ // keyed stores).
+ void ConfigureVectorState(MapHandleList* maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers);
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type, Handle<Object> name);
@@ -280,14 +285,8 @@ class CallIC : public IC {
DCHECK(nexus != NULL);
}
- void PatchMegamorphic(Handle<Object> function);
-
void HandleMiss(Handle<Object> function);
- // Returns true if a custom handler was installed.
- bool DoCustomHandler(Handle<Object> function,
- const CallICState& callic_state);
-
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
CallICState::CallType call_type);
@@ -324,6 +323,7 @@ class LoadIC : public IC {
}
// Code generator routines.
+
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm,
@@ -525,9 +525,9 @@ class KeyedStoreIC : public StoreIC {
// When more language modes are added, these BitFields need to move too.
STATIC_ASSERT(i::LANGUAGE_END == 3);
class ExtraICStateKeyedAccessStoreMode
- : public BitField<KeyedAccessStoreMode, 3, 4> {}; // NOLINT
+ : public BitField<KeyedAccessStoreMode, 3, 3> {}; // NOLINT
- class IcCheckTypeField : public BitField<IcCheckType, 7, 1> {};
+ class IcCheckTypeField : public BitField<IcCheckType, 6, 1> {};
static ExtraICState ComputeExtraICState(LanguageMode flag,
KeyedAccessStoreMode mode) {
@@ -538,10 +538,17 @@ class KeyedStoreIC : public StoreIC {
static KeyedAccessStoreMode GetKeyedAccessStoreMode(
ExtraICState extra_state) {
+ DCHECK(!FLAG_vector_stores);
return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
}
+ KeyedAccessStoreMode GetKeyedAccessStoreMode() {
+ DCHECK(FLAG_vector_stores);
+ return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ }
+
static IcCheckType GetKeyType(ExtraICState extra_state) {
+ DCHECK(!FLAG_vector_stores);
return IcCheckTypeField::decode(extra_state);
}
@@ -571,6 +578,8 @@ class KeyedStoreIC : public StoreIC {
static Handle<Code> initialize_stub_in_optimized_code(
Isolate* isolate, LanguageMode language_mode, State initialization_state);
+ static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
+ ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
@@ -587,7 +596,7 @@ class KeyedStoreIC : public StoreIC {
}
}
- Handle<Code> StoreElementStub(Handle<JSObject> receiver,
+ Handle<Code> StoreElementStub(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
private:
@@ -599,6 +608,8 @@ class KeyedStoreIC : public StoreIC {
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
+ void ValidateStoreMode(Handle<Code> stub);
+
friend class IC;
};
@@ -608,9 +619,6 @@ class BinaryOpIC : public IC {
public:
explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
- static Builtins::JavaScript TokenToJSBuiltin(Token::Value op,
- Strength strength);
-
MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
Handle<Object> right) WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index 9aba385497..f2f6c62c71 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(a3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, t0, t1};
return registers;
}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index e3d4ae3adc..8c135e4088 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -295,25 +296,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -556,6 +567,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index 80f5c4783f..64f1662880 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -100,7 +100,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ Branch(&next_map, ne, match, Operand(map_reg));
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index a673dbf254..a1a118135b 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -681,7 +681,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a3, t0, t1, t2));
+ DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -693,7 +693,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, a3, t0, t1, t2);
+ receiver, key, t1, t2, t4, t5);
// Cache miss.
__ Branch(&miss);
@@ -794,20 +794,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = a3;
+ Register dictionary = t1;
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
DCHECK(value.is(a0));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0));
__ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+ __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+ __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index 12cacc8f4f..1a9897e8f3 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -116,8 +116,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index a2e7aed4dc..500a6d65c7 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(a3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, a4, a5};
return registers;
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 49e9265aee..9c3a5b3e70 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -296,25 +297,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -350,7 +361,7 @@ void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
- __ ld(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ lwu(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
__ And(at, scratch, Operand(Map::Deprecated::kMask));
__ Branch(miss, ne, at, Operand(zero_reg));
}
@@ -557,6 +568,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index a834430e1e..8cdd8f03bc 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -100,7 +100,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ Branch(&next_map, ne, match, Operand(map_reg));
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 6f3916dd2e..0d7af56071 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -551,7 +551,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ Daddu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) -
+ Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
kHeapObjectTag));
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
@@ -677,9 +677,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
if (FLAG_vector_stores) {
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a3, a4, a5, a6));
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+
+ DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -691,7 +692,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, a3, a4, a5, a6);
+ receiver, key, a5, a6, a7, t0);
// Cache miss.
__ Branch(&miss);
@@ -792,18 +793,20 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = a3;
- DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5));
+ Register dictionary = a5;
+ DCHECK(!AreAliased(
+ value, receiver, name, VectorStoreICDescriptor::VectorRegister(),
+ VectorStoreICDescriptor::SlotRegister(), dictionary, a6, a7));
__ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, a6, a7);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, a6, a7);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index b1ec640719..4ab9f8e5b2 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -119,8 +119,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index 2021b80fd8..fcbbc66121 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(r6.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || r6.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r6, r7, r8};
return registers;
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 9ec2f5ff3f..52efcf91a4 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -304,25 +305,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -564,6 +575,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index 59054b2058..578b73d40e 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ bne(&next_map);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 7cac3058bb..09117179ea 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -711,7 +711,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
+ DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -723,7 +723,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r6, r7, r8, r9);
+ receiver, key, r8, r9, r10, r11);
// Cache miss.
__ b(&miss);
@@ -806,20 +806,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r6;
+ Register dictionary = r8;
DCHECK(receiver.is(r4));
DCHECK(name.is(r5));
DCHECK(value.is(r3));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(r6));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(r7));
__ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8);
+ __ IncrementCounter(counters->store_normal_hit(), 1, r9, r10);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r9, r10);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
index ed703fb21e..6030b2cbc8 100644
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -137,8 +137,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 63e60f0b91..85b44ef475 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -31,7 +31,8 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(rbx.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores ||
+ rbx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, rbx, rdi, r8};
return registers;
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 920d06c541..1490c921fc 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -303,13 +304,26 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+
+ __ PopReturnAddressTo(r11);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ Push(slot);
+ __ Push(vector);
+ __ PushReturnAddressFrom(r11);
+ } else {
+ DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ PushReturnAddressFrom(rbx);
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ PushReturnAddressFrom(rbx);
+ }
}
@@ -318,7 +332,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
@@ -327,7 +341,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -574,6 +589,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index d5e548412c..fd92cca570 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -55,7 +55,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 8d334809cb..ff74a965e4 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -582,7 +582,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, rbx, no_reg);
+ receiver, key, r9, no_reg);
// Cache miss.
__ jmp(&miss);
@@ -735,8 +735,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
+ if (FLAG_vector_stores) {
+ // This shouldn't be called.
+ __ int3();
+ return;
+ }
+ // The return address is on the stack.
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -785,7 +790,10 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = rbx;
+ Register dictionary = r11;
+ DCHECK(!FLAG_vector_stores ||
+ !AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
+ VectorStoreICDescriptor::SlotRegister()));
Label miss;
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index 3908018927..9a9dfe9f4b 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -110,9 +110,16 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// the vector and slot registers, which need to be preserved for a handler
// call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch));
+ if (ic_kind == Code::LOAD_IC || ic_kind == Code::KEYED_LOAD_IC) {
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch));
+ } else {
+ DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch));
+ }
}
#endif
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index bdcbb166b9..a80c649e45 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -30,7 +30,8 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores ||
+ ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index c0d5fd8234..d9f7e8012d 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -303,13 +304,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+ }
}
@@ -318,7 +330,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
@@ -327,7 +339,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -351,10 +364,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
Register scratch) {
- // Get the return address, push the argument and then continue.
- __ pop(scratch);
+ // current after GeneratePushMap
+ // -------------------------------------------------
+ // ret addr slot
+ // vector vector
+ // sp -> slot map
+ // sp -> ret addr
+ //
+ __ xchg(map_reg, Operand(esp, 0));
+ __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
__ push(map_reg);
- __ push(scratch);
}
@@ -574,6 +593,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 4d5fc6a712..d29e32108b 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index f9a94bc5b8..53e7a5ca0c 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -577,7 +577,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, ebx, no_reg);
+ receiver, key, edi, no_reg);
if (FLAG_vector_stores) {
__ pop(VectorStoreICDescriptor::VectorRegister());
@@ -734,6 +734,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ // This shouldn't be called.
+ __ int3();
+ return;
+ }
+
// Return address is on the stack.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -787,22 +793,32 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = ebx;
-
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
+ if (FLAG_vector_stores) {
+ __ push(vector);
+ __ push(slot);
+ }
+
+ Register dictionary = ebx;
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(1);
+ __ Drop(FLAG_vector_stores ? 3 : 1);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
+ if (FLAG_vector_stores) {
+ __ pop(slot);
+ __ pop(vector);
+ }
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index d76d0a26b7..2522223ead 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -23,8 +23,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
Label miss;
+ bool is_vector_store =
+ IC::ICUseVector(ic_kind) &&
+ (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
@@ -56,19 +61,29 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (IC::ICUseVector(ic_kind)) {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ if (is_vector_store) {
+ // The overlap here is rather embarrassing. One does what one must.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ pop(vector);
+ __ mov(Operand::StaticVariable(virtual_register), extra);
+ __ pop(extra); // Pop "slot".
+ // Jump to the first instruction in the code stub.
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
}
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
__ bind(&miss);
} else {
+ DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
+
// Save the offset on the stack.
__ push(offset);
@@ -105,21 +120,22 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- if (IC::ICUseVector(ic_kind)) {
+ // Jump to the first instruction in the code stub.
+ if (is_vector_store) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!offset.is(vector) && !offset.is(slot));
-
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(Operand::StaticVariable(virtual_register), offset);
__ pop(vector);
- __ pop(slot);
+ __ pop(offset); // Pop "slot".
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
}
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
// Pop at miss.
__ bind(&miss);
__ pop(offset);