summaryrefslogtreecommitdiff
path: root/deps/v8/src/ic
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2017-09-12 11:34:59 +0200
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:15:18 +0200
commitd82e1075dbc2cec2d6598ade10c1f43805f690fd (patch)
treeccd242b9b491dfc341d1099fe11b0ef528839877 /deps/v8/src/ic
parentb4b7ac6ae811b2b5a3082468115dfb5a5246fe3f (diff)
downloadandroid-node-v8-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.gz
android-node-v8-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.bz2
android-node-v8-d82e1075dbc2cec2d6598ade10c1f43805f690fd.zip
deps: update V8 to 6.1.534.36
PR-URL: https://github.com/nodejs/node/pull/14730 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/ic')
-rw-r--r--deps/v8/src/ic/OWNERS2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc82
-rw-r--r--deps/v8/src/ic/accessor-assembler.h4
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc43
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc1
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc11
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc1
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc813
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h26
-rw-r--r--deps/v8/src/ic/call-optimization.cc11
-rw-r--r--deps/v8/src/ic/call-optimization.h3
-rw-r--r--deps/v8/src/ic/handler-compiler.cc2
-rw-r--r--deps/v8/src/ic/handler-compiler.h1
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h1
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc10
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc1
-rw-r--r--deps/v8/src/ic/ic-inl.h5
-rw-r--r--deps/v8/src/ic/ic-state.cc363
-rw-r--r--deps/v8/src/ic/ic-state.h129
-rw-r--r--deps/v8/src/ic/ic-stats.cc8
-rw-r--r--deps/v8/src/ic/ic.cc306
-rw-r--r--deps/v8/src/ic/ic.h24
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc58
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc15
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc1
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc14
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc1
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc15
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc1
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc14
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc1
-rw-r--r--deps/v8/src/ic/stub-cache.cc15
-rw-r--r--deps/v8/src/ic/stub-cache.h1
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc11
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc1
-rw-r--r--deps/v8/src/ic/x87/OWNERS2
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc40
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc456
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc85
39 files changed, 450 insertions, 2128 deletions
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 3581afece3..fa1291f6f3 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -5,3 +5,5 @@ ishell@chromium.org
jkummerow@chromium.org
mvstanton@chromium.org
verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 6508169558..f3d9f09ca4 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -34,8 +34,7 @@ Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
// Adding |header_size| with a separate IntPtrAdd rather than passing it
// into ElementOffsetFromIndex() allows it to be folded into a single
// [base, index, offset] indirect memory access on x64.
- Node* offset =
- ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+ Node* offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS);
Node* feedback = Load(MachineType::AnyTagged(), vector,
IntPtrAdd(offset, IntPtrConstant(header_size)));
@@ -250,7 +249,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
GotoIfNot(
WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ SmiConstant(Isolate::kProtectorValid)),
miss);
exit_point->Return(UndefinedConstant());
}
@@ -408,8 +407,7 @@ void AccessorAssembler::HandleLoadICProtoHandlerCase(
GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
&validity_cell_check_done);
Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
miss);
Goto(&validity_cell_check_done);
@@ -712,8 +710,7 @@ void AccessorAssembler::HandleStoreICElementHandlerCase(
Comment("HandleStoreICElementHandlerCase");
Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
miss);
Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
@@ -742,8 +739,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
&validity_cell_check_done);
Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
miss);
Goto(&validity_cell_check_done);
@@ -1062,7 +1058,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
// capacity even for a map that think it doesn't have any unused fields.
// Perform a bounds check to see if we actually have to grow the array.
Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
- Node* size = ElementOffsetFromIndex(length, FAST_ELEMENTS, mode,
+ Node* size = ElementOffsetFromIndex(length, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize);
GotoIf(UintPtrLessThan(offset, size), &done);
@@ -1070,9 +1066,8 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
// Grow properties array.
- ElementsKind kind = FAST_ELEMENTS;
DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
- FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+ FixedArrayBase::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
CSA_ASSERT(this,
@@ -1082,17 +1077,16 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
kMaxNumberOfDescriptors + JSObject::kFieldsAdded, mode),
mode));
- Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+ Node* new_properties = AllocatePropertyArray(new_capacity, mode);
- FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
- Heap::kUndefinedValueRootIndex, mode);
+ FillPropertyArrayWithUndefined(new_properties, length, new_capacity, mode);
// |new_properties| is guaranteed to be in new space, so we can skip
// the write barrier.
- CopyFixedArrayElements(kind, properties, new_properties, length,
- SKIP_WRITE_BARRIER, mode);
+ CopyPropertyArrayValues(properties, new_properties, length,
+ SKIP_WRITE_BARRIER, mode);
- StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+ StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
Comment("] Extend storage");
Goto(&done);
@@ -1195,20 +1189,20 @@ void AccessorAssembler::EmitElementLoad(
EmitFastElementsBoundsCheck(object, elements, intptr_index,
is_jsarray_condition, out_of_bounds);
int32_t kinds[] = {// Handled by if_fast_packed.
- FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
// Handled by if_fast_holey.
- FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
// Handled by if_fast_double.
- FAST_DOUBLE_ELEMENTS,
+ PACKED_DOUBLE_ELEMENTS,
// Handled by if_fast_holey_double.
- FAST_HOLEY_DOUBLE_ELEMENTS};
+ HOLEY_DOUBLE_ELEMENTS};
Label* labels[] = {// FAST_{SMI,}_ELEMENTS
&if_fast_packed, &if_fast_packed,
// FAST_HOLEY_{SMI,}_ELEMENTS
&if_fast_holey, &if_fast_holey,
- // FAST_DOUBLE_ELEMENTS
+ // PACKED_DOUBLE_ELEMENTS
&if_fast_double,
- // FAST_HOLEY_DOUBLE_ELEMENTS
+ // HOLEY_DOUBLE_ELEMENTS
&if_fast_holey_double};
Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
arraysize(kinds));
@@ -1469,7 +1463,7 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
}
void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
- Node* instance_type, Node* key,
+ Node* instance_type,
const LoadICParameters* p,
Label* slow,
UseStubCache use_stub_cache) {
@@ -1502,7 +1496,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
VARIABLE(var_name_index, MachineType::PointerRepresentation());
Label* notfound =
use_stub_cache == kUseStubCache ? &stub_cache : &lookup_prototype_chain;
- DescriptorLookup(key, descriptors, bitfield3, &if_descriptor_found,
+ DescriptorLookup(p->name, descriptors, bitfield3, &if_descriptor_found,
&var_name_index, notfound);
BIND(&if_descriptor_found);
@@ -1518,7 +1512,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Comment("stub cache probe for fast property load");
VARIABLE(var_handler, MachineRepresentation::kTagged);
Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name,
&found_handler, &var_handler, &stub_cache_miss);
BIND(&found_handler);
{
@@ -1544,7 +1538,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
VARIABLE(var_name_index, MachineType::PointerRepresentation());
Label dictionary_found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+ NameDictionaryLookup<NameDictionary>(properties, p->name, &dictionary_found,
&var_name_index,
&lookup_prototype_chain);
BIND(&dictionary_found);
@@ -1574,7 +1568,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
var_holder_map.Bind(receiver_map);
var_holder_instance_type.Bind(instance_type);
// Private symbols must not be looked up on the prototype chain.
- GotoIf(IsPrivateSymbol(key), &return_undefined);
+ GotoIf(IsPrivateSymbol(p->name), &return_undefined);
Goto(&loop);
BIND(&loop);
{
@@ -1590,7 +1584,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
var_holder_instance_type.Bind(proto_instance_type);
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
TryGetOwnProperty(p->context, receiver, proto, proto_map,
- proto_instance_type, key, &return_value, &var_value,
+ proto_instance_type, p->name, &return_value, &var_value,
&next_proto, &goto_slow);
// This trampoline and the next are required to appease Turbofan's
@@ -1776,7 +1770,8 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Comment("LoadIC_BytecodeHandler_noninlined");
// Call into the stub that implements the non-inlined parts of LoadIC.
- Callable ic = CodeFactory::LoadICInOptimizedCode_Noninlined(isolate());
+ Callable ic =
+ Builtins::CallableFor(isolate(), Builtins::kLoadIC_Noninlined);
Node* code_target = HeapConstant(ic.code());
exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context,
p->receiver, p->name, p->slot, p->vector);
@@ -1859,9 +1854,9 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
GotoIfNot(
WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
miss);
- exit_point->ReturnCallStub(CodeFactory::LoadIC_Uninitialized(isolate()),
- p->context, p->receiver, p->name, p->slot,
- p->vector);
+ exit_point->ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
+ p->context, p->receiver, p->name, p->slot, p->vector);
}
}
@@ -1891,7 +1886,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
BIND(&not_function_prototype);
}
- GenericPropertyLoad(receiver, receiver_map, instance_type, p->name, p, &miss,
+ GenericPropertyLoad(receiver, receiver_map, instance_type, p, &miss,
kDontUseStubCache);
BIND(&miss);
@@ -2061,8 +2056,9 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
// TODO(jkummerow): Inline this? Or some of it?
- TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
- p->receiver, p->name, p->slot, p->vector);
+ TailCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC_Megamorphic),
+ p->context, p->receiver, p->name, p->slot, p->vector);
}
BIND(&try_polymorphic_name);
{
@@ -2106,8 +2102,9 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
BIND(&if_unique_name);
{
- GenericPropertyLoad(receiver, receiver_map, instance_type,
- var_unique.value(), p, &slow);
+ LoadICParameters pp = *p;
+ pp.name = var_unique.value();
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, &slow);
}
BIND(&if_notunique);
@@ -2326,8 +2323,7 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
- TailCallStub(callable, context, receiver, name, slot, vector);
+ TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
}
void AccessorAssembler::GenerateLoadICProtoArray(
@@ -2416,8 +2412,8 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- TailCallStub(callable, context, receiver, name, slot, vector);
+ TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
+ vector);
}
void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 5644fa8ae8..c771b2ff5a 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -182,8 +182,8 @@ class AccessorAssembler : public CodeStubAssembler {
enum UseStubCache { kUseStubCache, kDontUseStubCache };
void GenericPropertyLoad(Node* receiver, Node* receiver_map,
- Node* instance_type, Node* key,
- const LoadICParameters* p, Label* slow,
+ Node* instance_type, const LoadICParameters* p,
+ Label* slow,
UseStubCache use_stub_cache = kUseStubCache);
// Low-level helpers.
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 317a95146f..c17670d921 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -134,7 +134,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -143,8 +144,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ b(ne, miss_label);
// Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -165,8 +166,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
__ b(ne, miss);
}
@@ -199,9 +199,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -209,10 +207,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ ldr(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -402,17 +396,22 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(receiver()); // receiver
__ push(holder_reg);
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ mov(ip, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ mov(ip, Operand(cell));
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
+ __ mov(scratch, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ mov(scratch, Operand(cell));
+ }
+ __ push(scratch);
+ __ mov(scratch, Operand(name));
+ __ Push(scratch, value());
}
- __ push(ip);
- __ mov(ip, Operand(name));
- __ Push(ip, value());
__ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 5cf9dc46ae..7eddd42298 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -29,7 +29,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index db6dc639a1..cc42c030e3 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -74,7 +74,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
@@ -134,9 +135,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Mov(holder, receiver);
@@ -144,10 +143,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ Ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ Ldr(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index f77bb8af5b..8e9a7f5d2b 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -29,7 +29,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return al;
}
}
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 29df4bf082..0690d8e528 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -13,7 +13,9 @@ using compiler::Node;
Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* rhs, Node* slot_id,
- Node* feedback_vector) {
+ Node* feedback_vector,
+ Node* function,
+ bool rhs_is_smi) {
// Shared entry for floating point addition.
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
check_rhsisoddball(this, Label::kDeferred),
@@ -25,24 +27,51 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
VARIABLE(var_result, MachineRepresentation::kTagged);
// Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_lhsissmi(this);
+ // If rhs is known to be an Smi we want to fast path Smi operation. This is
+ // for AddSmi operation. For the normal Add operation, we want to fast path
+ // both Smi and Number operations, so this path should not be marked as
+ // Deferred.
+ Label if_lhsisnotsmi(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
BIND(&if_lhsissmi);
{
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Comment("lhs is Smi");
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if the {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+
+ var_fadd_lhs.Bind(SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
+ }
+
+ BIND(&if_rhsissmi);
+ }
- BIND(&if_rhsissmi);
{
+ Comment("perform smi operation");
// Try fast Smi addition first.
Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
BitcastTaggedToWord(rhs));
Node* overflow = Projection(1, pair);
// Check if the Smi additon overflowed.
- Label if_overflow(this), if_notoverflow(this);
+ // If rhs is known to be an Smi we want to fast path Smi operation. This
+ // is for AddSmi operation. For the normal Add operation, we want to fast
+ // path both Smi and Number operations, so this path should not be marked
+ // as Deferred.
+ Label if_overflow(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred),
+ if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_overflow);
@@ -60,50 +89,33 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Goto(&end);
}
}
-
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fadd);
- }
}
BIND(&if_lhsisnotsmi);
{
- // Load the map of {lhs}.
- Node* lhs_map = LoadMap(lhs);
-
// Check if {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(lhs_map), &if_lhsisnotnumber);
+ GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber);
- // Check if the {rhs} is Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fadd);
- }
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if the {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
- // Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
+ var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
+ }
+ BIND(&if_rhsissmi);
+ }
+ {
var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ var_fadd_rhs.Bind(SmiToFloat64(rhs));
Goto(&do_fadd);
}
}
@@ -130,11 +142,8 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
// Check if {rhs} is a HeapNumber.
- Branch(IsHeapNumberMap(rhs_map), &call_with_oddball_feedback,
+ Branch(IsHeapNumber(rhs), &call_with_oddball_feedback,
&check_rhsisoddball);
}
@@ -189,122 +198,105 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&call_add_stub);
{
- Callable callable = CodeFactory::Add(isolate());
- var_result.Bind(CallStub(callable, context, lhs, rhs));
+ var_result.Bind(CallBuiltin(Builtins::kAdd, context, lhs, rhs));
Goto(&end);
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id, function);
return var_result.value();
}
-Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector) {
- // Shared entry for floating point subtraction.
- Label do_fsub(this), end(this), call_subtract_stub(this),
- if_lhsisnotnumber(this), check_rhsisoddball(this),
- call_with_any_feedback(this);
- VARIABLE(var_fsub_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fsub_rhs, MachineRepresentation::kFloat64);
+Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
+ Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
+ Node* function, const SmiOperation& smiOperation,
+ const FloatOperation& floatOperation, Token::Value opcode,
+ bool rhs_is_smi) {
+ Label do_float_operation(this), end(this), call_stub(this),
+ check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
+ if_lhsisnotnumber(this, Label::kDeferred);
+ VARIABLE(var_float_lhs, MachineRepresentation::kFloat64);
+ VARIABLE(var_float_rhs, MachineRepresentation::kFloat64);
VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
VARIABLE(var_result, MachineRepresentation::kTagged);
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_lhsissmi(this);
+ // If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi
+ // bytecode handlers) we want to fast path Smi operation. For the normal
+ // operation, we want to fast path both Smi and Number operations, so this
+ // path should not be marked as Deferred.
+ Label if_lhsisnotsmi(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
+ // Check if the {lhs} is a Smi or a HeapObject.
BIND(&if_lhsissmi);
{
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- // Try a fast Smi subtraction first.
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
- BitcastTaggedToWord(rhs));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
-
- BIND(&if_overflow);
+ Comment("lhs is Smi");
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ BIND(&if_rhsisnotsmi);
{
- // lhs, rhs - smi and result - number. combined - number.
- // The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
+ // Check if {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+
+ // Perform a floating point operation.
+ var_float_lhs.Bind(SmiToFloat64(lhs));
+ var_float_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_float_operation);
}
- BIND(&if_notoverflow);
- // lhs, rhs, result smi. combined - smi.
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
+ BIND(&if_rhsissmi);
}
- BIND(&if_rhsisnotsmi);
{
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
+ Comment("perform smi operation");
+ var_result.Bind(smiOperation(lhs, rhs, &var_type_feedback));
+ Goto(&end);
}
}
BIND(&if_lhsisnotsmi);
{
- // Load the map of the {lhs}.
- Node* lhs_map = LoadMap(lhs);
-
+ Comment("lhs is not Smi");
// Check if the {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(lhs_map), &if_lhsisnotnumber);
+ GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber);
- // Check if the {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- BIND(&if_rhsissmi);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
- }
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if the {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ // Perform a floating point operation.
+ var_float_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_float_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_float_operation);
+ }
- // Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
+ BIND(&if_rhsissmi);
+ }
+ {
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
+ var_float_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_float_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_float_operation);
}
}
- BIND(&do_fsub);
+ BIND(&do_float_operation);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* lhs_value = var_fsub_lhs.value();
- Node* rhs_value = var_fsub_rhs.value();
- Node* value = Float64Sub(lhs_value, rhs_value);
+ Node* lhs_value = var_float_lhs.value();
+ Node* rhs_value = var_float_rhs.value();
+ Node* value = floatOperation(lhs_value, rhs_value);
var_result.Bind(AllocateHeapNumberWithValue(value));
Goto(&end);
}
@@ -325,20 +317,17 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
{
var_type_feedback.Bind(
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_subtract_stub);
+ Goto(&call_stub);
}
BIND(&if_rhsisnotsmi);
{
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
// Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
var_type_feedback.Bind(
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_subtract_stub);
+ Goto(&call_stub);
}
}
@@ -353,474 +342,160 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
var_type_feedback.Bind(
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_subtract_stub);
+ Goto(&call_stub);
}
BIND(&call_with_any_feedback);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_subtract_stub);
- }
-
- BIND(&call_subtract_stub);
- {
- Callable callable = CodeFactory::Subtract(isolate());
- var_result.Bind(CallStub(callable, context, lhs, rhs));
- Goto(&end);
- }
-
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
- return var_result.value();
-}
-
-Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector) {
- // Shared entry point for floating point multiplication.
- Label do_fmul(this), if_lhsisnotnumber(this, Label::kDeferred),
- check_rhsisoddball(this, Label::kDeferred),
- call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_multiply_stub(this), end(this);
- VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
-
- Label lhs_is_smi(this), lhs_is_not_smi(this);
- Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
-
- BIND(&lhs_is_smi);
- {
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
- // in case of overflow.
- var_result.Bind(SmiMul(lhs, rhs));
- var_type_feedback.Bind(
- SelectSmiConstant(TaggedIsSmi(var_result.value()),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber));
- Goto(&end);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- // Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(SmiToFloat64(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
+ Goto(&call_stub);
+ }
+
+ BIND(&call_stub);
+ {
+ Node* result;
+ switch (opcode) {
+ case Token::SUB:
+ result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
+ break;
+ case Token::MUL:
+ result = CallBuiltin(Builtins::kMultiply, context, lhs, rhs);
+ break;
+ case Token::DIV:
+ result = CallBuiltin(Builtins::kDivide, context, lhs, rhs);
+ break;
+ case Token::MOD:
+ result = CallBuiltin(Builtins::kModulus, context, lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
}
- }
-
- BIND(&lhs_is_not_smi);
- {
- Node* lhs_map = LoadMap(lhs);
-
- // Check if {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(lhs_map), &if_lhsisnotnumber);
-
- // Check if {rhs} is a Smi.
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(SmiToFloat64(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- // Both {lhs} and {rhs} are HeapNumbers. Load their values and
- // multiply them.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
- }
- }
-
- BIND(&do_fmul);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&end);
}
- BIND(&if_lhsisnotnumber);
- {
- // No checks on rhs are done yet. We just know lhs is not a number or Smi.
- // Check if lhs is an oddball.
- Node* lhs_instance_type = LoadInstanceType(lhs);
- Node* lhs_is_oddball =
- Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
-
- GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
-
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Branch(IsHeapNumberMap(rhs_map), &call_with_oddball_feedback,
- &check_rhsisoddball);
- }
-
- BIND(&check_rhsisoddball);
- {
- // Check if rhs is an oddball. At this point we know lhs is either a
- // Smi or number or oddball and rhs is not a number or Smi.
- Node* rhs_instance_type = LoadInstanceType(rhs);
- Node* rhs_is_oddball =
- Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(rhs_is_oddball, &call_with_oddball_feedback,
- &call_with_any_feedback);
- }
-
- BIND(&call_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_multiply_stub);
- }
-
- BIND(&call_with_any_feedback);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_multiply_stub);
- }
-
- BIND(&call_multiply_stub);
- {
- Callable callable = CodeFactory::Multiply(isolate());
- var_result.Bind(CallStub(callable, context, lhs, rhs));
- Goto(&end);
- }
-
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id, function);
return var_result.value();
}
-Node* BinaryOpAssembler::Generate_DivideWithFeedback(Node* context,
- Node* dividend,
- Node* divisor,
- Node* slot_id,
- Node* feedback_vector) {
- // Shared entry point for floating point division.
- Label do_fdiv(this), dividend_is_not_number(this, Label::kDeferred),
- check_divisor_for_oddball(this, Label::kDeferred),
- call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_divide_stub(this), end(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
-
- BIND(&dividend_is_smi);
- {
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
+Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
+ Node* rhs, Node* slot_id,
+ Node* feedback_vector,
+ Node* function,
+ bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ // Try a fast Smi subtraction first.
+ Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
+ BitcastTaggedToWord(rhs));
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_notoverflow(this), end(this);
+ // If rhs is known to be an Smi (for SubSmi) we want to fast path Smi
+ // operation. For the normal Sub operation, we want to fast path both
+ // Smi and Number operations, so this path should not be marked as Deferred.
+ Label if_overflow(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ BIND(&if_notoverflow);
{
- Label bailout(this);
-
- // Try to perform Smi division if possible.
- var_result.Bind(TrySmiDiv(dividend, divisor, &bailout));
- var_type_feedback.Bind(
+ var_type_feedback->Bind(
SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
-
- // Bailout: convert {dividend} and {divisor} to double and do double
- // division.
- BIND(&bailout);
- {
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
}
- BIND(&divisor_is_not_smi);
+ BIND(&if_overflow);
{
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&dividend_is_not_smi);
- {
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(dividend_map), &dividend_is_not_number);
-
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ Node* value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&end);
}
- }
-
- BIND(&do_fdiv);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* value =
- Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
- }
-
- BIND(&dividend_is_not_number);
- {
- // We just know dividend is not a number or Smi. No checks on divisor yet.
- // Check if dividend is an oddball.
- Node* dividend_instance_type = LoadInstanceType(dividend);
- Node* dividend_is_oddball =
- Word32Equal(dividend_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
-
- GotoIf(TaggedIsSmi(divisor), &call_with_oddball_feedback);
-
- // Load the map of the {divisor}.
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Branch(IsHeapNumberMap(divisor_map), &call_with_oddball_feedback,
- &check_divisor_for_oddball);
- }
-
- BIND(&check_divisor_for_oddball);
- {
- // Check if divisor is an oddball. At this point we know dividend is either
- // a Smi or number or oddball and divisor is not a number or Smi.
- Node* divisor_instance_type = LoadInstanceType(divisor);
- Node* divisor_is_oddball =
- Word32Equal(divisor_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(divisor_is_oddball, &call_with_oddball_feedback,
- &call_with_any_feedback);
- }
-
- BIND(&call_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_divide_stub);
- }
- BIND(&call_with_any_feedback);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_divide_stub);
- }
-
- BIND(&call_divide_stub);
- {
- Callable callable = CodeFactory::Divide(isolate());
- var_result.Bind(CallStub(callable, context, dividend, divisor));
- Goto(&end);
- }
-
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
- return var_result.value();
+ BIND(&end);
+ return var_result.value();
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Sub(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, lhs, rhs, slot_id, feedback_vector, function, smiFunction,
+ floatFunction, Token::SUB, rhs_is_smi);
}
-Node* BinaryOpAssembler::Generate_ModulusWithFeedback(Node* context,
- Node* dividend,
- Node* divisor,
- Node* slot_id,
- Node* feedback_vector) {
- // Shared entry point for floating point division.
- Label do_fmod(this), dividend_is_not_number(this, Label::kDeferred),
- check_divisor_for_oddball(this, Label::kDeferred),
- call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_modulus_stub(this), end(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
+Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
+ Node* rhs, Node* slot_id,
+ Node* feedback_vector,
+ Node* function,
+ bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ Node* result = SmiMul(lhs, rhs);
+ var_type_feedback->Bind(SelectSmiConstant(
+ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
+ return result;
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Mul(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, lhs, rhs, slot_id, feedback_vector, function, smiFunction,
+ floatFunction, Token::MUL, rhs_is_smi);
+}
- BIND(&dividend_is_smi);
- {
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
+Node* BinaryOpAssembler::Generate_DivideWithFeedback(
+ Node* context, Node* dividend, Node* divisor, Node* slot_id,
+ Node* feedback_vector, Node* function, bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ // If rhs is known to be an Smi (for DivSmi) we want to fast path Smi
+ // operation. For the normal Div operation, we want to fast path both
+ // Smi and Number operations, so this path should not be marked as Deferred.
+ Label bailout(this, rhs_is_smi ? Label::kDeferred : Label::kNonDeferred),
+ end(this);
+ var_result.Bind(TrySmiDiv(lhs, rhs, &bailout));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ Goto(&end);
- BIND(&divisor_is_smi);
+ BIND(&bailout);
{
- var_result.Bind(SmiMod(dividend, divisor));
- var_type_feedback.Bind(
- SelectSmiConstant(TaggedIsSmi(var_result.value()),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ Node* value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
+ var_result.Bind(AllocateHeapNumberWithValue(value));
Goto(&end);
}
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
- }
-
- BIND(&dividend_is_not_smi);
- {
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(dividend_map), &dividend_is_not_number);
-
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
- }
-
- BIND(&do_fmod);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* value =
- Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
- }
-
- BIND(&dividend_is_not_number);
- {
- // No checks on divisor yet. We just know dividend is not a number or Smi.
- // Check if dividend is an oddball.
- Node* dividend_instance_type = LoadInstanceType(dividend);
- Node* dividend_is_oddball =
- Word32Equal(dividend_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
-
- GotoIf(TaggedIsSmi(divisor), &call_with_oddball_feedback);
-
- // Load the map of the {divisor}.
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Branch(IsHeapNumberMap(divisor_map), &call_with_oddball_feedback,
- &check_divisor_for_oddball);
- }
-
- BIND(&check_divisor_for_oddball);
- {
- // Check if divisor is an oddball. At this point we know dividend is either
- // a Smi or number or oddball and divisor is not a number or Smi.
- Node* divisor_instance_type = LoadInstanceType(divisor);
- Node* divisor_is_oddball =
- Word32Equal(divisor_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(divisor_is_oddball, &call_with_oddball_feedback,
- &call_with_any_feedback);
- }
-
- BIND(&call_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_modulus_stub);
- }
-
- BIND(&call_with_any_feedback);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_modulus_stub);
- }
-
- BIND(&call_modulus_stub);
- {
- Callable callable = CodeFactory::Modulus(isolate());
- var_result.Bind(CallStub(callable, context, dividend, divisor));
- Goto(&end);
- }
+ BIND(&end);
+ return var_result.value();
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Div(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, dividend, divisor, slot_id, feedback_vector, function,
+ smiFunction, floatFunction, Token::DIV, rhs_is_smi);
+}
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
- return var_result.value();
+Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
+ Node* context, Node* dividend, Node* divisor, Node* slot_id,
+ Node* feedback_vector, Node* function, bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ Node* result = SmiMod(lhs, rhs);
+ var_type_feedback->Bind(SelectSmiConstant(
+ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
+ return result;
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Mod(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, dividend, divisor, slot_id, feedback_vector, function,
+ smiFunction, floatFunction, Token::MOD, rhs_is_smi);
}
} // namespace internal
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index 849dfc29dc..bb37298447 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -5,6 +5,7 @@
#ifndef V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
#define V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
+#include <functional>
#include "src/code-stub-assembler.h"
namespace v8 {
@@ -22,21 +23,36 @@ class BinaryOpAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
Node* Generate_AddWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector);
+ Node* slot_id, Node* feedback_vector,
+ Node* function, bool rhs_is_smi);
Node* Generate_SubtractWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector);
+ Node* slot_id, Node* feedback_vector,
+ Node* function, bool rhs_is_smi);
Node* Generate_MultiplyWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector);
+ Node* slot_id, Node* feedback_vector,
+ Node* function, bool rhs_is_smi);
Node* Generate_DivideWithFeedback(Node* context, Node* dividend,
Node* divisor, Node* slot_id,
- Node* feedback_vector);
+ Node* feedback_vector, Node* function,
+ bool rhs_is_smi);
Node* Generate_ModulusWithFeedback(Node* context, Node* dividend,
Node* divisor, Node* slot_id,
- Node* feedback_vector);
+ Node* feedback_vector, Node* function,
+ bool rhs_is_smi);
+
+ private:
+ typedef std::function<Node*(Node*, Node*, Variable*)> SmiOperation;
+ typedef std::function<Node*(Node*, Node*)> FloatOperation;
+
+ Node* Generate_BinaryOperationWithFeedback(
+ Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
+ Node* function, const SmiOperation& smiOperation,
+ const FloatOperation& floatOperation, Token::Value opcode,
+ bool rhs_is_smi);
};
} // namespace internal
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 6780ac4ca4..975f789596 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -20,10 +20,8 @@ CallOptimization::CallOptimization(Handle<Object> function) {
}
}
-
Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
- Handle<Map> object_map, HolderLookup* holder_lookup,
- int* holder_depth_in_prototype_chain) const {
+ Handle<Map> object_map, HolderLookup* holder_lookup) const {
DCHECK(is_simple_api_call());
if (!object_map->IsJSObjectMap()) {
*holder_lookup = kHolderNotFound;
@@ -34,15 +32,11 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
*holder_lookup = kHolderIsReceiver;
return Handle<JSObject>::null();
}
- for (int depth = 1; true; depth++) {
- if (!object_map->has_hidden_prototype()) break;
+ if (object_map->has_hidden_prototype()) {
Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
object_map = handle(prototype->map());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
- if (holder_depth_in_prototype_chain != NULL) {
- *holder_depth_in_prototype_chain = depth;
- }
return prototype;
}
}
@@ -84,7 +78,6 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
break;
}
UNREACHABLE();
- return false;
}
void CallOptimization::Initialize(
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index efabd3387c..8ca8cde112 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -38,8 +38,7 @@ class CallOptimization BASE_EMBEDDED {
enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
Handle<JSObject> LookupHolderOfExpectedType(
- Handle<Map> receiver_map, HolderLookup* holder_lookup,
- int* holder_depth_in_prototype_chain = NULL) const;
+ Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
// Check if the api holder is between the receiver and the holder.
bool IsCompatibleReceiver(Handle<Object> receiver,
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 69ba39768a..b4aff8ec55 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -30,7 +30,7 @@ Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
// Create code object in the heap.
CodeDesc desc;
- masm()->GetCode(&desc);
+ masm()->GetCode(isolate(), &desc);
Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
#ifdef ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 4eb1b464c3..788e4bc0ed 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -27,7 +27,6 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
Label* miss) {
UNREACHABLE();
- return receiver();
}
virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 5f31d15d46..6c75b76ac7 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -122,7 +122,6 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
break;
default:
UNREACHABLE();
- return Handle<Smi>::null();
}
DCHECK(kind == kStoreField || kind == kTransitionToField ||
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 2cfa49b15b..e65f2ea8ff 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -84,7 +84,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
@@ -139,9 +139,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -149,10 +147,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
__ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ mov(holder, FieldOperand(holder, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index c4b4cdcc2b..2d2017d595 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return greater_equal;
default:
UNREACHABLE();
- return no_condition;
}
}
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 8ac3bd99da..fb86528ff8 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -58,9 +58,8 @@ void IC::SetTargetAtAddress(Address address, Code* target,
Address constant_pool) {
if (AddressIsDeoptimizedCode(target->GetIsolate(), address)) return;
- // Only these three old-style ICs still do code patching.
- DCHECK(target->is_binary_op_stub() || target->is_compare_ic_stub() ||
- target->is_to_boolean_ic_stub());
+ // Only one old-style ICs still does code patching.
+ DCHECK(target->is_compare_ic_stub());
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index a217b115fd..74a59d8f25 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -4,7 +4,6 @@
#include "src/ic/ic-state.h"
-#include "src/ast/ast-types.h"
#include "src/feedback-vector.h"
#include "src/ic/ic.h"
#include "src/objects-inl.h"
@@ -19,339 +18,6 @@ void ICUtility::Clear(Isolate* isolate, Address address,
}
-// static
-STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN;
-
-
-// static
-STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::LAST_TOKEN;
-
-
-BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
- : fixed_right_arg_(
- HasFixedRightArgField::decode(extra_ic_state)
- ? Just(1 << FixedRightArgValueField::decode(extra_ic_state))
- : Nothing<int>()),
- isolate_(isolate) {
- op_ =
- static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
- left_kind_ = LeftKindField::decode(extra_ic_state);
- right_kind_ = fixed_right_arg_.IsJust()
- ? (Smi::IsValid(fixed_right_arg_.FromJust()) ? SMI : INT32)
- : RightKindField::decode(extra_ic_state);
- result_kind_ = ResultKindField::decode(extra_ic_state);
- DCHECK_LE(FIRST_TOKEN, op_);
- DCHECK_LE(op_, LAST_TOKEN);
-}
-
-
-ExtraICState BinaryOpICState::GetExtraICState() const {
- ExtraICState extra_ic_state =
- OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
- ResultKindField::encode(result_kind_) |
- HasFixedRightArgField::encode(fixed_right_arg_.IsJust());
- if (fixed_right_arg_.IsJust()) {
- extra_ic_state = FixedRightArgValueField::update(
- extra_ic_state, WhichPowerOf2(fixed_right_arg_.FromJust()));
- } else {
- extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
- }
- return extra_ic_state;
-}
-
-std::string BinaryOpICState::ToString() const {
- std::string ret = "(";
- ret += Token::Name(op_);
- if (CouldCreateAllocationMementos()) ret += "_CreateAllocationMementos";
- ret += ":";
- ret += BinaryOpICState::KindToString(left_kind_);
- ret += "*";
- if (fixed_right_arg_.IsJust()) {
- ret += fixed_right_arg_.FromJust();
- } else {
- ret += BinaryOpICState::KindToString(right_kind_);
- }
- ret += "->";
- ret += BinaryOpICState::KindToString(result_kind_);
- ret += ")";
- return ret;
-}
-
-// static
-void BinaryOpICState::GenerateAheadOfTime(
- Isolate* isolate, void (*Generate)(Isolate*, const BinaryOpICState&)) {
-// TODO(olivf) We should investigate why adding stubs to the snapshot is so
-// expensive at runtime. When solved we should be able to add most binops to
-// the snapshot instead of hand-picking them.
-// Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind) \
- do { \
- BinaryOpICState state(isolate, op); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_ = Nothing<int>(); \
- state.right_kind_ = right_kind; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::ADD, INT32, INT32, INT32);
- GENERATE(Token::ADD, INT32, INT32, NUMBER);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER);
- GENERATE(Token::ADD, INT32, SMI, INT32);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER);
- GENERATE(Token::ADD, SMI, INT32, INT32);
- GENERATE(Token::ADD, SMI, INT32, NUMBER);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER);
- GENERATE(Token::ADD, SMI, SMI, INT32);
- GENERATE(Token::ADD, SMI, SMI, SMI);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI);
- GENERATE(Token::BIT_AND, NUMBER, INT32, INT32);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI);
- GENERATE(Token::BIT_AND, SMI, INT32, INT32);
- GENERATE(Token::BIT_AND, SMI, INT32, SMI);
- GENERATE(Token::BIT_AND, SMI, NUMBER, SMI);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32);
- GENERATE(Token::BIT_OR, INT32, INT32, SMI);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32);
- GENERATE(Token::BIT_OR, SMI, INT32, SMI);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI);
- GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32);
- GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32);
- GENERATE(Token::BIT_XOR, SMI, INT32, SMI);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI);
- GENERATE(Token::DIV, INT32, INT32, INT32);
- GENERATE(Token::DIV, INT32, INT32, NUMBER);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER);
- GENERATE(Token::DIV, INT32, SMI, INT32);
- GENERATE(Token::DIV, INT32, SMI, NUMBER);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER);
- GENERATE(Token::DIV, SMI, INT32, INT32);
- GENERATE(Token::DIV, SMI, INT32, NUMBER);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER);
- GENERATE(Token::DIV, SMI, SMI, NUMBER);
- GENERATE(Token::DIV, SMI, SMI, SMI);
- GENERATE(Token::MOD, NUMBER, SMI, NUMBER);
- GENERATE(Token::MOD, SMI, SMI, SMI);
- GENERATE(Token::MUL, INT32, INT32, INT32);
- GENERATE(Token::MUL, INT32, INT32, NUMBER);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER);
- GENERATE(Token::MUL, INT32, SMI, INT32);
- GENERATE(Token::MUL, INT32, SMI, NUMBER);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER);
- GENERATE(Token::MUL, SMI, INT32, INT32);
- GENERATE(Token::MUL, SMI, INT32, NUMBER);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER);
- GENERATE(Token::MUL, SMI, SMI, INT32);
- GENERATE(Token::MUL, SMI, SMI, NUMBER);
- GENERATE(Token::MUL, SMI, SMI, SMI);
- GENERATE(Token::SAR, INT32, SMI, INT32);
- GENERATE(Token::SAR, INT32, SMI, SMI);
- GENERATE(Token::SAR, NUMBER, SMI, SMI);
- GENERATE(Token::SAR, SMI, SMI, SMI);
- GENERATE(Token::SHL, INT32, SMI, INT32);
- GENERATE(Token::SHL, INT32, SMI, SMI);
- GENERATE(Token::SHL, NUMBER, SMI, SMI);
- GENERATE(Token::SHL, SMI, SMI, INT32);
- GENERATE(Token::SHL, SMI, SMI, SMI);
- GENERATE(Token::SHR, INT32, SMI, SMI);
- GENERATE(Token::SHR, NUMBER, SMI, INT32);
- GENERATE(Token::SHR, NUMBER, SMI, SMI);
- GENERATE(Token::SHR, SMI, SMI, SMI);
- GENERATE(Token::SUB, INT32, INT32, INT32);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER);
- GENERATE(Token::SUB, INT32, SMI, INT32);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER);
- GENERATE(Token::SUB, SMI, INT32, INT32);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER);
- GENERATE(Token::SUB, SMI, SMI, SMI);
-#undef GENERATE
-#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind) \
- do { \
- BinaryOpICState state(isolate, op); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_ = Just(fixed_right_arg_value); \
- state.right_kind_ = SMI; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::MOD, SMI, 2, SMI);
- GENERATE(Token::MOD, SMI, 4, SMI);
- GENERATE(Token::MOD, SMI, 8, SMI);
- GENERATE(Token::MOD, SMI, 16, SMI);
- GENERATE(Token::MOD, SMI, 32, SMI);
- GENERATE(Token::MOD, SMI, 2048, SMI);
-#undef GENERATE
-}
-
-AstType* BinaryOpICState::GetResultType() const {
- Kind result_kind = result_kind_;
- if (HasSideEffects()) {
- result_kind = NONE;
- } else if (result_kind == GENERIC && op_ == Token::ADD) {
- return AstType::NumberOrString();
- } else if (result_kind == NUMBER && op_ == Token::SHR) {
- return AstType::Unsigned32();
- }
- DCHECK_NE(GENERIC, result_kind);
- return KindToType(result_kind);
-}
-
-
-std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
- os << "(" << Token::Name(s.op_);
- if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
- os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
- if (s.fixed_right_arg_.IsJust()) {
- os << s.fixed_right_arg_.FromJust();
- } else {
- os << BinaryOpICState::KindToString(s.right_kind_);
- }
- return os << "->" << BinaryOpICState::KindToString(s.result_kind_) << ")";
-}
-
-
-void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
- Handle<Object> result) {
- ExtraICState old_extra_ic_state = GetExtraICState();
-
- left_kind_ = UpdateKind(left, left_kind_);
- right_kind_ = UpdateKind(right, right_kind_);
-
- int32_t fixed_right_arg_value = 0;
- bool has_fixed_right_arg =
- op_ == Token::MOD && right->ToInt32(&fixed_right_arg_value) &&
- fixed_right_arg_value > 0 &&
- base::bits::IsPowerOfTwo32(fixed_right_arg_value) &&
- FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
- (left_kind_ == SMI || left_kind_ == INT32) &&
- (result_kind_ == NONE || !fixed_right_arg_.IsJust());
- fixed_right_arg_ =
- has_fixed_right_arg ? Just(fixed_right_arg_value) : Nothing<int32_t>();
- result_kind_ = UpdateKind(result, result_kind_);
-
- if (!Token::IsTruncatingBinaryOp(op_)) {
- Kind input_kind = Max(left_kind_, right_kind_);
- if (result_kind_ < input_kind && input_kind <= NUMBER) {
- result_kind_ = input_kind;
- }
- }
-
- // We don't want to distinguish INT32 and NUMBER for string add (because
- // NumberToString can't make use of this anyway).
- if (left_kind_ == STRING && right_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- right_kind_ = NUMBER;
- } else if (right_kind_ == STRING && left_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- left_kind_ = NUMBER;
- }
-
- if (old_extra_ic_state == GetExtraICState()) {
- // Tagged operations can lead to non-truncating HChanges
- if (left->IsOddball()) {
- left_kind_ = GENERIC;
- } else {
- DCHECK(right->IsOddball());
- right_kind_ = GENERIC;
- }
- }
-}
-
-
-BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
- Kind kind) const {
- Kind new_kind = GENERIC;
- bool is_truncating = Token::IsTruncatingBinaryOp(op());
- if (object->IsOddball() && is_truncating) {
- // Oddballs will be automatically truncated by HChange.
- new_kind = INT32;
- } else if (object->IsUndefined(isolate_)) {
- // Undefined will be automatically truncated by HChange.
- new_kind = is_truncating ? INT32 : NUMBER;
- } else if (object->IsSmi()) {
- new_kind = SMI;
- } else if (object->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(object)->value();
- new_kind = IsInt32Double(value) ? INT32 : NUMBER;
- } else if (object->IsString() && op() == Token::ADD) {
- new_kind = STRING;
- }
- if (new_kind == INT32 && SmiValuesAre32Bits()) {
- new_kind = NUMBER;
- }
- if (kind != NONE && ((new_kind <= NUMBER && kind > NUMBER) ||
- (new_kind > NUMBER && kind <= NUMBER))) {
- new_kind = GENERIC;
- }
- return Max(kind, new_kind);
-}
-
-
-// static
-const char* BinaryOpICState::KindToString(Kind kind) {
- switch (kind) {
- case NONE:
- return "None";
- case SMI:
- return "Smi";
- case INT32:
- return "Int32";
- case NUMBER:
- return "Number";
- case STRING:
- return "String";
- case GENERIC:
- return "Generic";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// static
-AstType* BinaryOpICState::KindToType(Kind kind) {
- switch (kind) {
- case NONE:
- return AstType::None();
- case SMI:
- return AstType::SignedSmall();
- case INT32:
- return AstType::Signed32();
- case NUMBER:
- return AstType::Number();
- case STRING:
- return AstType::String();
- case GENERIC:
- return AstType::Any();
- }
- UNREACHABLE();
- return NULL;
-}
-
-
const char* CompareICState::GetStateName(State state) {
switch (state) {
case UNINITIALIZED:
@@ -376,34 +42,6 @@ const char* CompareICState::GetStateName(State state) {
return "GENERIC";
}
UNREACHABLE();
- return NULL;
-}
-
-AstType* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
- switch (state) {
- case UNINITIALIZED:
- return AstType::None();
- case BOOLEAN:
- return AstType::Boolean();
- case SMI:
- return AstType::SignedSmall();
- case NUMBER:
- return AstType::Number();
- case STRING:
- return AstType::String();
- case INTERNALIZED_STRING:
- return AstType::InternalizedString();
- case UNIQUE_NAME:
- return AstType::UniqueName();
- case RECEIVER:
- return AstType::Receiver();
- case KNOWN_RECEIVER:
- return map.is_null() ? AstType::Receiver() : AstType::Class(map, zone);
- case GENERIC:
- return AstType::Any();
- }
- UNREACHABLE();
- return NULL;
}
@@ -522,7 +160,6 @@ CompareICState::State CompareICState::TargetState(
return GENERIC;
}
UNREACHABLE();
- return GENERIC; // Make the compiler happy.
}
} // namespace internal
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 16651c5623..7a7c7578e5 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -11,8 +11,6 @@
namespace v8 {
namespace internal {
-class AstType;
-
const int kMaxKeyedPolymorphism = 4;
@@ -23,130 +21,6 @@ class ICUtility : public AllStatic {
};
-class BinaryOpICState final BASE_EMBEDDED {
- public:
- BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
- BinaryOpICState(Isolate* isolate, Token::Value op)
- : op_(op),
- left_kind_(NONE),
- right_kind_(NONE),
- result_kind_(NONE),
- fixed_right_arg_(Nothing<int>()),
- isolate_(isolate) {
- DCHECK_LE(FIRST_TOKEN, op);
- DCHECK_LE(op, LAST_TOKEN);
- }
-
- InlineCacheState GetICState() const {
- if (Max(left_kind_, right_kind_) == NONE) {
- return ::v8::internal::UNINITIALIZED;
- }
- if (Max(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::MEGAMORPHIC;
- }
- if (Min(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::GENERIC;
- }
- return ::v8::internal::MONOMORPHIC;
- }
-
- ExtraICState GetExtraICState() const;
- std::string ToString() const;
-
- static void GenerateAheadOfTime(Isolate*,
- void (*Generate)(Isolate*,
- const BinaryOpICState&));
-
- // Returns true if the IC _could_ create allocation mementos.
- bool CouldCreateAllocationMementos() const {
- if (left_kind_ == STRING || right_kind_ == STRING) {
- DCHECK_EQ(Token::ADD, op_);
- return true;
- }
- return false;
- }
-
- // Returns true if the IC _should_ create allocation mementos.
- bool ShouldCreateAllocationMementos() const {
- return FLAG_allocation_site_pretenuring && CouldCreateAllocationMementos();
- }
-
- bool HasSideEffects() const {
- return Max(left_kind_, right_kind_) == GENERIC;
- }
-
- // Returns true if the IC should enable the inline smi code (i.e. if either
- // parameter may be a smi).
- bool UseInlinedSmiCode() const {
- return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
- }
-
- static const int FIRST_TOKEN = Token::BIT_OR;
- static const int LAST_TOKEN = Token::MOD;
-
- Token::Value op() const { return op_; }
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-
- AstType* GetLeftType() const { return KindToType(left_kind_); }
- AstType* GetRightType() const { return KindToType(right_kind_); }
- AstType* GetResultType() const;
-
- void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
-
- Isolate* isolate() const { return isolate_; }
-
- enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
- Kind kind() const {
- return KindGeneralize(KindGeneralize(left_kind_, right_kind_),
- result_kind_);
- }
-
- private:
- friend std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s);
-
- Kind UpdateKind(Handle<Object> object, Kind kind) const;
-
- static const char* KindToString(Kind kind);
- static AstType* KindToType(Kind kind);
- static bool KindMaybeSmi(Kind kind) {
- return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
- }
- static bool KindLessGeneralThan(Kind kind1, Kind kind2) {
- if (kind1 == NONE) return true;
- if (kind1 == kind2) return true;
- if (kind2 == GENERIC) return true;
- if (kind2 == STRING) return false;
- return kind1 <= kind2;
- }
- static Kind KindGeneralize(Kind kind1, Kind kind2) {
- if (KindLessGeneralThan(kind1, kind2)) return kind2;
- if (KindLessGeneralThan(kind2, kind1)) return kind1;
- return GENERIC;
- }
-
- // We truncate the last bit of the token.
- STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
- class OpField : public BitField<int, 0, 4> {};
- class ResultKindField : public BitField<Kind, 4, 3> {};
- class LeftKindField : public BitField<Kind, 7, 3> {};
- // When fixed right arg is set, we don't need to store the right kind.
- // Thus the two fields can overlap.
- class HasFixedRightArgField : public BitField<bool, 10, 1> {};
- class FixedRightArgValueField : public BitField<int, 11, 4> {};
- class RightKindField : public BitField<Kind, 11, 3> {};
-
- Token::Value op_;
- Kind left_kind_;
- Kind right_kind_;
- Kind result_kind_;
- Maybe<int> fixed_right_arg_;
- Isolate* isolate_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s);
-
-
class CompareICState {
public:
// The type/state lattice is defined by the following inequations:
@@ -169,9 +43,6 @@ class CompareICState {
GENERIC
};
- static AstType* StateToType(Zone* zone, State state,
- Handle<Map> map = Handle<Map>());
-
static State NewInputState(State old_state, Handle<Object> value);
static const char* GetStateName(CompareICState::State state);
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
index de2529fcd9..c305209d48 100644
--- a/deps/v8/src/ic/ic-stats.cc
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -17,21 +17,21 @@ base::LazyInstance<ICStats>::type ICStats::instance_ =
LAZY_INSTANCE_INITIALIZER;
ICStats::ICStats() : ic_infos_(MAX_IC_INFO), pos_(0) {
- base::NoBarrier_Store(&enabled_, 0);
+ base::Relaxed_Store(&enabled_, 0);
}
void ICStats::Begin() {
if (V8_LIKELY(!FLAG_ic_stats)) return;
- base::NoBarrier_Store(&enabled_, 1);
+ base::Relaxed_Store(&enabled_, 1);
}
void ICStats::End() {
- if (base::NoBarrier_Load(&enabled_) != 1) return;
+ if (base::Relaxed_Load(&enabled_) != 1) return;
++pos_;
if (pos_ == MAX_IC_INFO) {
Dump();
}
- base::NoBarrier_Store(&enabled_, 0);
+ base::Relaxed_Store(&enabled_, 0);
}
void ICStats::Reset() {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 5dca55ed3e..2684d0ba36 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -50,7 +50,6 @@ char IC::TransitionMarkFromState(IC::State state) {
return 'G';
}
UNREACHABLE();
- return 0;
}
@@ -190,9 +189,9 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack or a
- // StubFailureTrampoline, we need to look one frame further down the stack to
- // find the frame pointer and the return address stack slot.
+ // If there's another JavaScript frame on the stack we need to look one frame
+ // further down the stack to find the frame pointer and the return address
+ // stack slot.
if (depth == EXTRA_CALL_FRAME) {
if (FLAG_enable_embedded_constant_pool) {
constant_pool = reinterpret_cast<Address*>(
@@ -230,12 +229,8 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
} else {
Code* target = this->target();
Code::Kind kind = target->kind();
- if (kind == Code::BINARY_OP_IC) {
- kind_ = FeedbackSlotKind::kBinaryOp;
- } else if (kind == Code::COMPARE_IC) {
+ if (kind == Code::COMPARE_IC) {
kind_ = FeedbackSlotKind::kCompareOp;
- } else if (kind == Code::TO_BOOLEAN_IC) {
- kind_ = FeedbackSlotKind::kToBoolean;
} else {
UNREACHABLE();
kind_ = FeedbackSlotKind::kInvalid;
@@ -262,22 +257,13 @@ bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
InlineCacheState IC::StateFromCode(Code* code) {
Isolate* isolate = code->GetIsolate();
switch (code->kind()) {
- case Code::BINARY_OP_IC: {
- BinaryOpICState state(isolate, code->extra_ic_state());
- return state.GetICState();
- }
case Code::COMPARE_IC: {
CompareICStub stub(isolate, code->extra_ic_state());
return stub.GetICState();
}
- case Code::TO_BOOLEAN_IC: {
- ToBooleanICStub stub(isolate, code->extra_ic_state());
- return stub.GetICState();
- }
default:
if (code->is_debug_stub()) return UNINITIALIZED;
UNREACHABLE();
- return UNINITIALIZED;
}
}
@@ -428,23 +414,15 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
// static
void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
- Code* host = host_function->shared()->code();
-
- if (host->kind() == Code::FUNCTION) {
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- host->set_profiler_ticks(0);
- } else if (host_function->IsInterpreted()) {
- if (FLAG_trace_opt_verbose) {
- if (host_function->shared()->profiler_ticks() != 0) {
- PrintF("[resetting ticks for ");
- host_function->PrintName();
- PrintF(" due from %d due to IC change]\n",
- host_function->shared()->profiler_ticks());
- }
+ if (FLAG_trace_opt_verbose) {
+ if (host_function->shared()->profiler_ticks() != 0) {
+ PrintF("[resetting ticks for ");
+ host_function->PrintName();
+ PrintF(" due from %d due to IC change]\n",
+ host_function->shared()->profiler_ticks());
}
- host_function->shared()->set_profiler_ticks(0);
}
+ host_function->shared()->set_profiler_ticks(0);
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -454,9 +432,7 @@ void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
void IC::PostPatching(Address address, Code* target, Code* old_target) {
// Type vector based ICs update these statistics at a different time because
// they don't always patch on state change.
- DCHECK(target->kind() == Code::BINARY_OP_IC ||
- target->kind() == Code::COMPARE_IC ||
- target->kind() == Code::TO_BOOLEAN_IC);
+ DCHECK(target->kind() == Code::COMPARE_IC);
DCHECK(old_target->is_inline_cache_stub());
DCHECK(target->is_inline_cache_stub());
@@ -480,10 +456,14 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
info->change_ic_with_type_info_count(polymorphic_delta);
info->change_ic_generic_count(generic_delta);
}
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
}
- host->set_profiler_ticks(0);
+
+ // TODO(leszeks): Normally we would reset profiler ticks here -- but, we don't
+ // currently have access the the feedback vector from the IC. In practice,
+ // this is not an issue, as these ICs are only used by asm.js, which shouldn't
+ // have too many IC changes. This inconsistency should go away once these
+ // Crankshaft/hydrogen code stubs go away.
+
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -571,6 +551,10 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
PatchCache(name, slow_stub());
TRACE_IC("LoadIC", name);
}
+
+ if (*name == isolate()->heap()->iterator_symbol()) {
+ return Runtime::ThrowIteratorError(isolate(), object);
+ }
return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
}
@@ -882,8 +866,7 @@ Handle<WeakCell> HolderCell(Isolate* isolate, Handle<JSObject> holder,
GlobalDictionary* dict = global->global_dictionary();
int number = dict->FindEntry(name);
DCHECK_NE(NameDictionary::kNotFound, number);
- Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(number)),
- isolate);
+ Handle<PropertyCell> cell(dict->CellAt(number), isolate);
return isolate->factory()->NewWeakCell(cell);
}
return Map::GetOrCreatePrototypeWeakCell(holder, isolate);
@@ -1160,7 +1143,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// When debugging we need to go the slow path to flood the accessor.
- if (GetHostFunction()->shared()->HasDebugInfo()) {
+ if (GetHostFunction()->shared()->HasBreakInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
@@ -1296,7 +1279,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup) {
Handle<Object> accessors = lookup->GetAccessors();
DCHECK(accessors->IsAccessorPair());
DCHECK(holder->HasFastProperties());
- DCHECK(!GetHostFunction()->shared()->HasDebugInfo());
+ DCHECK(!GetHostFunction()->shared()->HasBreakInfo());
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate());
CallOptimization call_optimization(getter);
@@ -1316,7 +1299,7 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
if (key->IsHeapNumber()) {
double value = Handle<HeapNumber>::cast(key)->value();
if (std::isnan(value)) {
- key = isolate->factory()->nan_string();
+ key = isolate->factory()->NaN_string();
} else {
int int_value = FastD2I(value);
if (value == int_value && Smi::IsValid(int_value)) {
@@ -1430,9 +1413,9 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
- // TODO(jkummerow): Use IsHoleyElementsKind(elements_kind).
+ // TODO(jkummerow): Use IsHoleyOrDictionaryElementsKind(elements_kind).
bool convert_hole_to_undefined =
- is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
+ is_js_array && elements_kind == HOLEY_ELEMENTS &&
*receiver_map == isolate()->get_initial_js_array_map(elements_kind);
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadElementDH);
return LoadHandler::LoadElement(isolate(), elements_kind,
@@ -2045,8 +2028,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
List<Handle<Object>> handlers(static_cast<int>(target_receiver_maps.size()));
StoreElementPolymorphicHandlers(&target_receiver_maps, &handlers, store_mode);
- DCHECK_LE(1, target_receiver_maps.size());
- if (target_receiver_maps.size() == 1) {
+ if (target_receiver_maps.size() == 0) {
+ ConfigureVectorState(PREMONOMORPHIC, Handle<Name>());
+ } else if (target_receiver_maps.size() == 1) {
ConfigureVectorState(Handle<Name>(), target_receiver_maps[0],
handlers.at(0));
} else {
@@ -2060,16 +2044,16 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
switch (store_mode) {
case STORE_TRANSITION_TO_OBJECT:
case STORE_AND_GROW_TRANSITION_TO_OBJECT: {
- ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
return Map::TransitionElementsTo(map, kind);
}
case STORE_TRANSITION_TO_DOUBLE:
case STORE_AND_GROW_TRANSITION_TO_DOUBLE: {
- ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
+ ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
+ ? HOLEY_DOUBLE_ELEMENTS
+ : PACKED_DOUBLE_ELEMENTS;
return Map::TransitionElementsTo(map, kind);
}
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
@@ -2081,7 +2065,6 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
return map;
}
UNREACHABLE();
- return MaybeHandle<Map>().ToHandleChecked();
}
Handle<Object> KeyedStoreIC::StoreElementHandler(
@@ -2205,14 +2188,14 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
!receiver->WouldConvertToSlowElements(index);
if (allow_growth) {
// Handle growing array in stub if necessary.
- if (receiver->HasFastSmiElements()) {
+ if (receiver->HasSmiElements()) {
if (value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
if (value->IsHeapObject()) {
return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
- } else if (receiver->HasFastDoubleElements()) {
+ } else if (receiver->HasDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
@@ -2220,13 +2203,13 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
- if (receiver->HasFastSmiElements()) {
+ if (receiver->HasSmiElements()) {
if (value->IsHeapNumber()) {
return STORE_TRANSITION_TO_DOUBLE;
} else if (value->IsHeapObject()) {
return STORE_TRANSITION_TO_OBJECT;
}
- } else if (receiver->HasFastDoubleElements()) {
+ } else if (receiver->HasDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_TRANSITION_TO_OBJECT;
}
@@ -2282,6 +2265,10 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
return store_handle;
}
+ if (state() != UNINITIALIZED) {
+ JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
+ }
+
bool use_ic = FLAG_use_ic && !object->IsStringWrapper() &&
!object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy();
if (use_ic && !object->IsSmi()) {
@@ -2304,9 +2291,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
old_receiver_map = handle(receiver->map(), isolate());
is_arguments = receiver->IsJSArgumentsObject();
if (!is_arguments) {
- key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
+ key_is_valid_index = key->IsSmi() && Smi::ToInt(*key) >= 0;
if (key_is_valid_index) {
- uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
+ uint32_t index = static_cast<uint32_t>(Smi::ToInt(*key));
store_mode = GetStoreMode(receiver, index, value);
}
}
@@ -2560,158 +2547,6 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
}
-MaybeHandle<Object> BinaryOpIC::Transition(
- Handle<AllocationSite> allocation_site, Handle<Object> left,
- Handle<Object> right) {
- BinaryOpICState state(isolate(), extra_ic_state());
-
- // Compute the actual result using the builtin for the binary operation.
- Handle<Object> result;
- switch (state.op()) {
- default:
- UNREACHABLE();
- case Token::ADD:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::Add(isolate(), left, right), Object);
- break;
- case Token::SUB:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Subtract(isolate(), left, right), Object);
- break;
- case Token::MUL:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Multiply(isolate(), left, right), Object);
- break;
- case Token::DIV:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Divide(isolate(), left, right), Object);
- break;
- case Token::MOD:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Modulus(isolate(), left, right), Object);
- break;
- case Token::BIT_OR:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::BitwiseOr(isolate(), left, right), Object);
- break;
- case Token::BIT_AND:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::BitwiseAnd(isolate(), left, right),
- Object);
- break;
- case Token::BIT_XOR:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::BitwiseXor(isolate(), left, right),
- Object);
- break;
- case Token::SAR:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::ShiftRight(isolate(), left, right),
- Object);
- break;
- case Token::SHR:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::ShiftRightLogical(isolate(), left, right),
- Object);
- break;
- case Token::SHL:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::ShiftLeft(isolate(), left, right), Object);
- break;
- }
-
- // Do not try to update the target if the code was marked for lazy
- // deoptimization. (Since we do not relocate addresses in these
- // code objects, an attempt to access the target could fail.)
- if (AddressIsDeoptimizedCode()) {
- return result;
- }
-
- // Compute the new state.
- BinaryOpICState old_state(isolate(), target()->extra_ic_state());
- state.Update(left, right, result);
-
- // Check if we have a string operation here.
- Handle<Code> new_target;
- if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
- // Setup the allocation site on-demand.
- if (allocation_site.is_null()) {
- allocation_site = isolate()->factory()->NewAllocationSite();
- }
-
- // Install the stub with an allocation site.
- BinaryOpICWithAllocationSiteStub stub(isolate(), state);
- new_target = stub.GetCodeCopyFromTemplate(allocation_site);
-
- // Sanity check the trampoline stub.
- DCHECK_EQ(*allocation_site, new_target->FindFirstAllocationSite());
- } else {
- // Install the generic stub.
- BinaryOpICStub stub(isolate(), state);
- new_target = stub.GetCode();
-
- // Sanity check the generic stub.
- DCHECK_NULL(new_target->FindFirstAllocationSite());
- }
- set_target(*new_target);
-
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- auto ic_stats = ICStats::instance();
- ic_stats->Begin();
- ICInfo& ic_info = ic_stats->Current();
- ic_info.type = "BinaryOpIC";
- ic_info.state = old_state.ToString();
- ic_info.state += " => ";
- ic_info.state += state.ToString();
- JavaScriptFrame::CollectTopFrameForICStats(isolate());
- ic_stats->End();
- } else if (FLAG_ic_stats) {
- int line;
- int column;
- Address pc = GetAbstractPC(&line, &column);
- LOG(isolate(),
- BinaryOpIC(pc, line, column, *new_target, old_state.ToString().c_str(),
- state.ToString().c_str(),
- allocation_site.is_null() ? nullptr : *allocation_site));
- }
-
- // Patch the inlined smi code as necessary.
- if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK);
- } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(isolate(), address(), DISABLE_INLINED_SMI_CHECK);
- }
-
- return result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- typedef BinaryOpDescriptor Descriptor;
- Handle<Object> left = args.at(Descriptor::kLeft);
- Handle<Object> right = args.at(Descriptor::kRight);
- BinaryOpIC ic(isolate);
- RETURN_RESULT_OR_FAILURE(
- isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
- Handle<AllocationSite> allocation_site =
- args.at<AllocationSite>(Descriptor::kAllocationSite);
- Handle<Object> left = args.at(Descriptor::kLeft);
- Handle<Object> right = args.at(Descriptor::kRight);
- BinaryOpIC ic(isolate);
- RETURN_RESULT_OR_FAILURE(isolate,
- ic.Transition(allocation_site, left, right));
-}
-
Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED,
@@ -2800,51 +2635,6 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
}
-Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanICStub stub(isolate(), extra_ic_state());
- ToBooleanHints old_hints = stub.hints();
- bool to_boolean_value = stub.UpdateStatus(object);
- ToBooleanHints new_hints = stub.hints();
- Handle<Code> code = stub.GetCode();
- set_target(*code);
-
- // Note: Although a no-op transition is semantically OK, it is hinting at a
- // bug somewhere in our state transition machinery.
- DCHECK_NE(old_hints, new_hints);
- if (V8_UNLIKELY(FLAG_ic_stats)) {
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- auto ic_stats = ICStats::instance();
- ic_stats->Begin();
- ICInfo& ic_info = ic_stats->Current();
- ic_info.type = "ToBooleanIC";
- ic_info.state = ToString(old_hints);
- ic_info.state += "=>";
- ic_info.state += ToString(new_hints);
- ic_stats->End();
- } else {
- int line;
- int column;
- Address pc = GetAbstractPC(&line, &column);
- LOG(isolate(),
- ToBooleanIC(pc, line, column, *code, ToString(old_hints).c_str(),
- ToString(new_hints).c_str()));
- }
- }
-
- return isolate()->factory()->ToBoolean(to_boolean_value);
-}
-
-
-RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- Handle<Object> object = args.at(0);
- ToBooleanIC ic(isolate);
- return *ic.ToBoolean(object);
-}
-
-
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 9ea8905757..bb8dca540c 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -122,11 +122,9 @@ class IC {
Handle<Object> ComputeHandler(LookupIterator* lookup);
virtual Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) {
UNREACHABLE();
- return Handle<Code>::null();
}
virtual Handle<Code> CompileHandler(LookupIterator* lookup) {
UNREACHABLE();
- return Handle<Code>::null();
}
void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
@@ -423,17 +421,6 @@ class KeyedStoreIC : public StoreIC {
};
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class BinaryOpIC : public IC {
- public:
- explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
- MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
- Handle<Object> left,
- Handle<Object> right) WARN_UNUSED_RESULT;
-};
-
-
class CompareIC : public IC {
public:
CompareIC(Isolate* isolate, Token::Value op)
@@ -461,16 +448,7 @@ class CompareIC : public IC {
friend class IC;
};
-
-class ToBooleanIC : public IC {
- public:
- explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
- Handle<Object> ToBoolean(Handle<Object> object);
-};
-
-
-// Helper for BinaryOpIC and CompareIC.
+// Helper for CompareIC.
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check);
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 19c7e47caa..79b7f83eaf 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -132,7 +132,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
DCHECK(IsFastPackedElementsKind(from_kind));
ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
- if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(from_kind, to_kind)) {
TrapAllocationMemento(receiver, bailout);
}
Label perform_transition(this), check_holey_map(this);
@@ -161,8 +161,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
// Found a supported transition target map, perform the transition!
BIND(&perform_transition);
{
- if (IsFastDoubleElementsKind(from_kind) !=
- IsFastDoubleElementsKind(to_kind)) {
+ if (IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind)) {
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
capacity, INTPTR_PARAMETERS, bailout);
@@ -178,8 +177,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
Node* packed_map =
LoadContextElement(native_context, Context::ArrayMapIndex(packed_kind));
GotoIf(WordNotEqual(receiver_map, packed_map), map_mismatch);
- if (AllocationSite::GetMode(packed_kind, holey_kind) ==
- TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(packed_kind, holey_kind)) {
TrapAllocationMemento(receiver, bailout);
}
Node* holey_map =
@@ -263,7 +261,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// FixedArray backing store -> Smi or object elements.
{
- Node* offset = ElementOffsetFromIndex(intptr_index, FAST_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(intptr_index, PACKED_ELEMENTS,
INTPTR_PARAMETERS, kHeaderSize);
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
@@ -288,7 +286,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're about to introduce holes, ensure holey elements.
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
- FAST_SMI_ELEMENTS, FAST_ELEMENTS, slow);
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, slow);
}
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
value);
@@ -300,14 +298,14 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// Check if we already have object elements; just do the store if so.
{
Label must_transition(this);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ Int32Constant(HOLEY_SMI_ELEMENTS)),
&must_transition);
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
- FAST_ELEMENTS, slow);
+ PACKED_ELEMENTS, slow);
}
Store(elements, offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
@@ -326,14 +324,15 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're adding holes at the end, always transition to a holey
// elements kind, otherwise try to remain packed.
ElementsKind target_kind = update_length == kBumpLengthWithGap
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
+ ? HOLEY_DOUBLE_ELEMENTS
+ : PACKED_DOUBLE_ELEMENTS;
TryRewriteElements(receiver, receiver_map, elements, native_context,
- FAST_SMI_ELEMENTS, target_kind, slow);
+ PACKED_SMI_ELEMENTS, target_kind, slow);
// Reload migrated elements.
Node* double_elements = LoadElements(receiver);
- Node* double_offset = ElementOffsetFromIndex(
- intptr_index, FAST_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ Node* double_offset =
+ ElementOffsetFromIndex(intptr_index, PACKED_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, kHeaderSize);
// Make sure we do not store signalling NaNs into double arrays.
Node* double_value = Float64SilenceNaN(LoadHeapNumberValue(value));
StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
@@ -347,10 +346,10 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're adding holes at the end, always transition to a holey
// elements kind, otherwise try to remain packed.
ElementsKind target_kind = update_length == kBumpLengthWithGap
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
TryRewriteElements(receiver, receiver_map, elements, native_context,
- FAST_SMI_ELEMENTS, target_kind, slow);
+ PACKED_SMI_ELEMENTS, target_kind, slow);
// The elements backing store didn't change, no reload necessary.
CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
Store(elements, offset, value);
@@ -366,7 +365,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
&check_cow_elements);
// FixedDoubleArray backing store -> double elements.
{
- Node* offset = ElementOffsetFromIndex(intptr_index, FAST_DOUBLE_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(intptr_index, PACKED_DOUBLE_ELEMENTS,
INTPTR_PARAMETERS, kHeaderSize);
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
@@ -396,7 +395,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're about to introduce holes, ensure holey elements.
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
- FAST_DOUBLE_ELEMENTS, slow);
+ PACKED_DOUBLE_ELEMENTS, slow);
}
StoreNoWriteBarrier(MachineRepresentation::kFloat64, elements, offset,
double_value);
@@ -409,14 +408,14 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
{
Node* native_context = LoadNativeContext(context);
ElementsKind target_kind = update_length == kBumpLengthWithGap
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
TryRewriteElements(receiver, receiver_map, elements, native_context,
- FAST_DOUBLE_ELEMENTS, target_kind, slow);
+ PACKED_DOUBLE_ELEMENTS, target_kind, slow);
// Reload migrated elements.
Node* fast_elements = LoadElements(receiver);
Node* fast_offset = ElementOffsetFromIndex(
- intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ intptr_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
Store(fast_elements, fast_offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
}
@@ -488,7 +487,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
Goto(slow);
}
- // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further dispatch.
+ // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further
+ // dispatch.
BIND(&if_nonfast);
{
STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
@@ -866,8 +866,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&not_callable);
{
if (language_mode == STRICT) {
- Node* message =
- SmiConstant(Smi::FromInt(MessageTemplate::kNoSetterInCallback));
+ Node* message = SmiConstant(MessageTemplate::kNoSetterInCallback);
TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
var_accessor_holder.value());
} else {
@@ -880,8 +879,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&readonly);
{
if (language_mode == STRICT) {
- Node* message =
- SmiConstant(Smi::FromInt(MessageTemplate::kStrictReadOnlyProperty));
+ Node* message = SmiConstant(MessageTemplate::kStrictReadOnlyProperty);
Node* type = Typeof(p->receiver);
TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
type, p->receiver);
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 9f0174f44d..0f92191e4a 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -126,7 +126,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -134,8 +135,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -189,9 +190,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -199,10 +198,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ lw(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ lw(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index fd39972f0e..d299fb52e0 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 99638f5493..171ed2eee8 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -126,7 +126,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ Ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -134,7 +135,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
- __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -188,9 +190,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -198,10 +198,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ Ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ Ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 0e2032a41d..41cb2c6dbc 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 877e3996e0..333cae8d68 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -128,7 +128,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -137,8 +138,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ bne(miss_label);
// Restore the temporarily used register.
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -194,9 +195,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -204,10 +203,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 0f25846870..14ad5c5b77 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 718b24d608..26730a9ad5 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -125,14 +125,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ CompareRoot(map, Heap::kHashTableMapRootIndex);
__ bne(miss_label);
// Restore the temporarily used register.
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -186,9 +188,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -196,10 +196,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index 494a4cd1d7..9be3878a58 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 6396c57061..46ac580a70 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -9,7 +9,6 @@
#include "src/counters.h"
#include "src/heap/heap.h"
#include "src/ic/ic-inl.h"
-#include "src/type-info.h"
namespace v8 {
namespace internal {
@@ -22,8 +21,8 @@ StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
}
void StubCache::Initialize() {
- DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
- DCHECK(base::bits::IsPowerOfTwo32(kSecondaryTableSize));
+ DCHECK(base::bits::IsPowerOfTwo(kPrimaryTableSize));
+ DCHECK(base::bits::IsPowerOfTwo(kSecondaryTableSize));
Clear();
}
@@ -41,12 +40,10 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
if (handler) {
DCHECK(IC::IsHandler(handler));
if (handler->IsCode()) {
- Code* code = Code::cast(handler);
- Code::Flags expected_flags =
- Code::ComputeHandlerFlags(stub_cache->ic_kind());
- Code::Flags flags = code->flags();
- DCHECK_EQ(expected_flags, flags);
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ Code::Flags code_flags = Code::cast(handler)->flags();
+ Code::Kind ic_code_kind = stub_cache->ic_kind();
+ DCHECK_EQ(ic_code_kind, Code::ExtractExtraICStateFromFlags(code_flags));
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code_flags));
}
}
return true;
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index ffb0a398ad..74b5715883 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -71,7 +71,6 @@ class StubCache {
return StubCache::secondary_;
}
UNREACHABLE();
- return nullptr;
}
Isolate* isolate() { return isolate_; }
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index eeddd55a7b..51bb791712 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -69,7 +69,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(properties,
+ FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
@@ -117,9 +118,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -127,10 +126,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ movp(holder, FieldOperand(receiver, HeapObject::kMapOffset));
__ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ movp(holder, FieldOperand(holder, HeapObject::kMapOffset));
- __ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 3b87bc9b6a..96468b1a16 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -28,7 +28,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return greater_equal;
default:
UNREACHABLE();
- return no_condition;
}
}
diff --git a/deps/v8/src/ic/x87/OWNERS b/deps/v8/src/ic/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/ic/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
deleted file mode 100644
index d1867553cd..0000000000
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, ebx, eax, edi};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, ebx, edi};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
deleted file mode 100644
index dc572a19cc..0000000000
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(esi);
- }
- __ ret(0);
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ push(slot);
- __ push(vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ pop(vector);
- __ pop(slot);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ add(esp, Immediate(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
- properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(!accessor_holder.is(scratch));
- // Copy return value.
- __ pop(scratch);
-
- if (is_store) {
- // Discard stack arguments.
- __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
- kPointerSize));
- }
- // Write the receiver and arguments to stack frame.
- __ push(receiver);
- if (is_store) {
- DCHECK(!AreAliased(receiver, scratch, store_parameter));
- __ push(store_parameter);
- }
- __ push(scratch);
- // Stack now matches JSFunction abi.
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = edi;
- Register data = ebx;
- Register holder = ecx;
- Register api_function_address = edx;
- scratch = no_reg;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ mov(holder, FieldOperand(holder, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- }
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
- __ mov(data, Immediate(isolate->factory()->undefined_value()));
- } else {
- if (optimization.is_constant_call()) {
- __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ mov(api_function_address, Immediate(function_address));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(isolate->factory()->the_hole_value()));
- __ j(not_equal, miss);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[12] : value
- // -- esp[8] : slot
- // -- esp[4] : vector
- // -- esp[0] : return address
- // -----------------------------------
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- __ push(esi);
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- __ mov(scratch,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ push(receiver);
- __ push(value());
- __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
- __ Set(eax, 1);
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
- // Restore context register.
- __ pop(esi);
- }
- if (accessor_index >= 0) {
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
- } else {
- // If we generate a global code snippet for deoptimization only, don't try
- // to drop stack arguments for the StoreIC because they are not a part of
- // expression stack and deoptimizer does not reconstruct them.
- __ ret(0);
- }
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Immediate(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ mov(scratch1, NativeContextOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ cmp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ j(equal, &done);
-
- // Compare security tokens of current and expected native contexts.
- __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ cmp(scratch1, scratch2);
- }
- __ j(not_equal, miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- // Operand::ForCell(...) points to the cell's payload!
- __ cmp(Operand::ForCell(validity_cell),
- Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
- __ j(not_equal, miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- __ bind(miss);
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- DCHECK(kind() == Code::LOAD_IC);
- PopVectorAndSlot();
- }
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- GenerateRestoreName(miss, name);
- DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- // Zap register aliases of the arguments passed on the stack to ensure they
- // are properly loaded by the handler (debug-only).
- STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
- STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
- __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
- __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
- __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-
- __ pop(scratch1()); // remove the return address
- // Discard stack arguments.
- __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
- kPointerSize));
- __ push(receiver());
- __ push(holder_reg);
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ Push(callback);
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ Push(cell);
- }
- __ Push(name);
- __ push(value());
- __ push(Immediate(Smi::FromInt(language_mode)));
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(kind(), name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
deleted file mode 100644
index 7564c006b8..0000000000
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Isolate* isolate, Address address,
- InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- DCHECK(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
- if (FLAG_trace_ic) {
- LOG(isolate, PatchIC(address, test_instruction_address, delta));
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc =
- (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87