summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm/code-stubs-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm/code-stubs-arm.cc')
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc486
1 files changed, 5 insertions, 481 deletions
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 59f304d51d..60a8322d36 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -635,8 +624,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -805,7 +797,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2075,46 +2066,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, kDestinationOfCopyNotAligned);
- }
-
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ add(count, count, Operand(count), SetCC);
- }
-
- Register limit = count; // Read until dest equals this.
- __ add(limit, dest, Operand(count));
-
- Label loop_entry, loop;
- // Copy bytes from src to dest until dest hits limit.
- __ b(&loop_entry);
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- __ bind(&loop_entry);
- __ cmp(dest, Operand(limit));
- __ b(lt, &loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -2690,84 +2641,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ b(ne, miss);
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ SmiUntag(scratch1);
- __ sub(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ add(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ add(scratch2, elements, Operand(scratch2, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, Operand(ip));
- __ b(eq, done);
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
- r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ stm(db_w, sp, spill_mask);
- if (name.is(r0)) {
- DCHECK(!elements.is(r1));
- __ Move(r1, name);
- __ Move(r0, elements);
- } else {
- __ Move(r0, elements);
- __ Move(r1, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ mov(scratch2, Operand(r2));
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(ne, done);
- __ b(eq, miss);
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3057,238 +2930,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ ldr(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ b(ne, &start_polymorphic);
- // found, now call handler.
- Register handler = feedback;
- __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ cmp(length, Operand(Smi::FromInt(2)));
- __ b(eq, miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
- __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ ldr(cached_map, MemOperand(pointer_reg));
- __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ b(ne, &prepare_next);
- __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
- __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- __ bind(&prepare_next);
- __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
- __ cmp(pointer_reg, too_far);
- __ b(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ cmp(cached_map, receiver_map);
- __ b(ne, try_array);
- Register handler = feedback;
- __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
- __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ ldr(cached_map, MemOperand(pointer_reg));
- __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ b(ne, &prepare_next);
- // Is it a transitioning store?
- __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ b(ne, &transition_call);
- __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- __ bind(&transition_call);
- __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(feedback, too_far);
-
- __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- __ bind(&prepare_next);
- __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ cmp(pointer_reg, too_far);
- __ b(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
- Register key = StoreWithVectorDescriptor::NameRegister(); // r2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
- Register feedback = r5;
- Register receiver_map = r6;
- Register scratch1 = r9;
-
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- // We are using register r8, which is used for the embedded constant pool
- // when FLAG_enable_embedded_constant_pool is true.
- DCHECK(!FLAG_enable_embedded_constant_pool);
- Register scratch2 = r8;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ b(ne, &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ b(ne, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3648,123 +3289,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : target
- // -- r3 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r1);
- __ AssertReceiver(r3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &new_object);
- __ CompareObjectType(r2, r0, r0, MAP_TYPE);
- __ b(ne, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
- __ cmp(r0, r1);
- __ b(ne, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- r0 : result (tagged)
- // -- r1 : result fields (untagged)
- // -- r5 : result end (untagged)
- // -- r2 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
- __ tst(r3, Operand(Map::ConstructionCounter::kMask));
- __ b(ne, &slack_tracking);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(r1, r5, r6);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
- __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
- __ InitializeFieldsWithFiller(r1, r4, r6);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r1, r5, r6);
-
- // Check if we can finalize the instance size.
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ tst(r3, Operand(Map::ConstructionCounter::kMask));
- __ Ret(ne);
-
- // Finalize the instance size.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
- __ Push(r2, r4);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(r2);
- }
- __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ sub(r5, r5, Operand(kHeapObjectTag));
- __ b(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(r1, r3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : function