summaryrefslogtreecommitdiff
path: root/deps/v8/src/s390/code-stubs-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/s390/code-stubs-s390.cc')
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc495
1 files changed, 11 insertions, 484 deletions
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 553d6d8ce4..5fdddc6e32 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -32,17 +32,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -647,8 +636,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -847,7 +839,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -2166,44 +2157,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
- Register src, Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ mov(r0, Operand(kPointerAlignmentMask));
- __ AndP(r0, dest);
- __ Check(eq, kDestinationOfCopyNotAligned, cr0);
- }
-
- // Nothing to do for zero characters.
- Label done;
- if (encoding == String::TWO_BYTE_ENCODING) {
- // double the length
- __ AddP(count, count, count);
- __ beq(&done, Label::kNear);
- } else {
- __ CmpP(count, Operand::Zero());
- __ beq(&done, Label::kNear);
- }
-
- // Copy count bytes from src to dst.
- Label byte_loop;
- // TODO(joransiu): Convert into MVC loop
- __ bind(&byte_loop);
- __ LoadlB(scratch, MemOperand(src));
- __ la(src, MemOperand(src, 1));
- __ stc(scratch, MemOperand(dest));
- __ la(dest, MemOperand(dest, 1));
- __ BranchOnCount(count, &byte_loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2792,83 +2745,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ bne(miss);
}
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
- MacroAssembler* masm, Label* miss, Label* done, Register elements,
- Register name, Register scratch1, Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ SmiUntag(scratch1); // convert smi to int
- __ SubP(scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ AddP(scratch2,
- Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ srl(scratch2, Operand(String::kHashShift));
- __ AndP(scratch2, scratch1);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ ShiftLeftP(ip, scratch2, Operand(1));
- __ AddP(scratch2, ip);
-
- // Check if the key is identical to the name.
- __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
- __ AddP(scratch2, elements, ip);
- __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ CmpP(name, ip);
- __ beq(done);
- }
-
- const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
- r4.bit() | r3.bit() | r2.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ LoadRR(r0, r14);
- __ MultiPush(spill_mask);
- if (name.is(r2)) {
- DCHECK(!elements.is(r3));
- __ LoadRR(r3, name);
- __ LoadRR(r2, elements);
- } else {
- __ LoadRR(r2, elements);
- __ LoadRR(r3, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ LoadRR(r1, r2);
- __ LoadRR(scratch2, r4);
- __ MultiPop(spill_mask);
- __ LoadRR(r14, r0);
-
- __ CmpP(r1, Operand::Zero());
- __ bne(done);
- __ beq(miss);
-}
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3145,240 +3021,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ LoadP(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ CmpP(receiver_map, cached_map);
- __ bne(&start_polymorphic, Label::kNear);
- // found, now call handler.
- Register handler = feedback;
- __ LoadP(handler,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
- __ beq(miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiToPtrArrayOffset(r0, length);
- __ AddP(too_far, feedback, r0);
- __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ LoadP(cached_map, MemOperand(pointer_reg));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ CmpP(receiver_map, cached_map);
- __ bne(&prepare_next, Label::kNear);
- __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
- __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&prepare_next);
- __ AddP(pointer_reg, Operand(kPointerSize * 2));
- __ CmpP(pointer_reg, too_far);
- __ blt(&next_loop, Label::kNear);
-
- // We exhausted our array of map handler pairs.
- __ b(miss);
-}
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ CmpP(cached_map, receiver_map);
- __ bne(try_array);
- Register handler = feedback;
- __ SmiToPtrArrayOffset(r1, slot);
- __ LoadP(handler,
- FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
- __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiToPtrArrayOffset(r0, too_far);
- __ AddP(too_far, feedback, r0);
- __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ LoadP(cached_map, MemOperand(pointer_reg));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ CmpP(receiver_map, cached_map);
- __ bne(&prepare_next);
- // Is it a transitioning store?
- __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ bne(&transition_call);
- __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&transition_call);
- __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadRR(feedback, too_far);
-
- __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&prepare_next);
- __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ CmpLogicalP(pointer_reg, too_far);
- __ blt(&next_loop);
-
- // We exhausted our array of map handler pairs.
- __ b(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r3
- Register key = StoreWithVectorDescriptor::NameRegister(); // r4
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r5
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r6
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2)); // r2
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ AddP(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- Register scratch2 = ip;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ CmpP(key, feedback);
- __ bne(&miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiToPtrArrayOffset(r0, slot);
- __ AddP(feedback, vector, r0);
- __ LoadP(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -3754,124 +3396,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : target
- // -- r5 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r3);
- __ AssertReceiver(r5);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
- __ bne(&new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r4, &new_object);
- __ CompareObjectType(r4, r2, r2, MAP_TYPE);
- __ bne(&new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
- __ CmpP(r2, r3);
- __ bne(&new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
- __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
- __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
- __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- r2 : result (tagged)
- // -- r3 : result fields (untagged)
- // -- r7 : result end (untagged)
- // -- r4 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
- __ DecodeField<Map::ConstructionCounter>(r9, r5);
- __ LoadAndTestP(r9, r9);
- __ bne(&slack_tracking);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(r3, r7, r8);
-
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
- __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
- __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
- __ SubP(r6, r7, r6);
- __ InitializeFieldsWithFiller(r3, r6, r8);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r3, r7, r8);
-
- // Check if we can finalize the instance size.
- __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
- __ Ret(ne);
-
- // Finalize the instance size.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2, r4);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r2);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- __ ShiftLeftP(r6, r6,
- Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
- __ Push(r4, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(r4);
- }
- __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
- __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
- __ AddP(r7, r2, r7);
- __ SubP(r7, r7, Operand(kHeapObjectTag));
- __ b(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(r3, r5);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : function
@@ -3902,7 +3426,8 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label no_rest_parameters;
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(ip, r0);
__ bne(&no_rest_parameters);
// Check if the arguments adaptor frame contains more arguments than
@@ -4076,7 +3601,8 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
Label adaptor_frame, try_allocate, runtime;
__ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(r2, r0);
__ beq(&adaptor_frame);
// No adaptor, parameter count = argument count.
@@ -4311,7 +3837,8 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
Label arguments_adaptor, arguments_done;
__ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(ip, r0);
__ beq(&arguments_adaptor);
{
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));