summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips64/code-stubs-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips64/code-stubs-mips64.cc')
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc1198
1 files changed, 602 insertions, 596 deletions
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 191b9607f8..d0c05ad0cc 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -11,6 +11,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/mips64/code-stubs-mips64.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
@@ -181,8 +182,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low, MemOperand(input_reg, double_offset));
- __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+ __ lw(input_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ lw(input_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -721,29 +724,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// a1 (rhs) second.
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cc == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cc == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cc == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result.
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // Remaining cases.
- ncr = LESS;
- }
- __ li(a0, Operand(Smi::FromInt(ncr)));
- __ push(a0);
+ DCHECK(cc == gt || cc == ge); // Remaining cases.
+ ncr = LESS;
}
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1413,202 +1412,105 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-// Uses registers a0 to a4.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: a0 or at sp + 1 * kPointerSize.
-// * function: a1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register a4.
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = a0; // Object (lhs).
- Register map = a3; // Map of the object.
- const Register function = a1; // Function (rhs).
- const Register prototype = a4; // Prototype of the function.
- const Register inline_site = t1;
- const Register scratch = a2;
-
- const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ ld(object, MemOperand(sp, 1 * kPointerSize));
- __ ld(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(at));
- __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = a1; // Object (lhs).
+ Register const function = a0; // Function (rhs).
+ Register const object_map = a2; // Map of {object}.
+ Register const function_map = a3; // Map of {function}.
+ Register const function_prototype = a4; // Prototype of {function}.
+ Register const scratch = a5;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&fast_case, ne, function, Operand(at));
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&fast_case, ne, object_map, Operand(at));
+ __ Ret(USE_DELAY_SLOT);
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ GetObjectType(function, function_map, scratch);
+ __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex); // In delay slot.
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ GetObjectType(function, function_map, scratch);
+ __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- DCHECK(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in a4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- __ LoadFromSafepointRegisterSlot(scratch, a4);
- __ Dsubu(inline_site, ra, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
- __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
-
- __ mov(t0, map);
- // |scratch| points at the beginning of the cell. Calculate the
- // field containing the map.
- __ Daddu(function, scratch, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(scratch, Cell::kValueOffset, t0, function,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
-
- // Register mapping: a3 is object map and a4 is function prototype.
- // Get prototype of object into a2.
- __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // Ensure that {function} has an instance prototype.
+ __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ ld(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(scratch,
+ FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
+ __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ // Get the "prototype" (or initial map) of the {function}.
+ __ ld(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ GetObjectType(function_prototype, scratch, scratch);
+ __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
+ __ ld(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ Branch(&is_instance, eq, scratch, Operand(prototype));
- __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
- __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ Branch(&loop);
-
- __ bind(&is_instance);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- if (!HasCallSiteInlineCheck()) {
- __ mov(v0, zero_reg);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- }
- } else {
- // Patch the call site to return true.
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ mov(v0, zero_reg);
- }
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- }
- } else {
- // Patch the call site to return false.
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- }
-
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ GetObjectType(function, scratch2, scratch);
- __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Null is not instance of anything.
- __ Branch(&object_not_null, ne, object,
- Operand(isolate()->factory()->null_value()));
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+ __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object_prototype, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
+ __ ld(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ Ret(USE_DELAY_SLOT);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(a0, a1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ mov(a0, v0);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1684,73 +1586,70 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime,
- ne,
- a2,
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
- __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a2, MemOperand(sp, 0 * kPointerSize));
+ __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiScale(a7, a2, kPointerSizeLog2);
- __ Daddu(a3, a3, Operand(a7));
- __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
- __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+ __ Daddu(a4, a4, Operand(a7));
+ __ daddiu(a3, a4, StandardFrameConstants::kCallerSPOffset);
__ bind(&runtime);
+ __ Push(a1, a3, a2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
// Registers used over whole function:
- // a6 : allocated object (tagged)
- // t1 : mapped parameter count (tagged)
+ // a5 : arguments count (tagged)
+ // a6 : mapped parameter count (tagged)
- __ ld(a1, MemOperand(sp, 0 * kPointerSize));
- // a1 = parameter count (tagged)
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a2,
+ Label adaptor_frame, try_allocate, runtime;
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
- __ mov(a2, a1);
- __ Branch(&try_allocate);
+ __ mov(a5, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(a6, a2); // In delay slot.
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(t2, a2, kPointerSizeLog2);
- __ Daddu(a3, a3, Operand(t2));
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 1 * kPointerSize));
-
- // a1 = parameter count (tagged)
- // a2 = argument count (tagged)
- // Compute the mapped parameter count = min(a1, a2) in a1.
- Label skip_min;
- __ Branch(&skip_min, lt, a1, Operand(a2));
- __ mov(a1, a2);
- __ bind(&skip_min);
+ __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a4, a4, Operand(t2));
+ __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // a5 = argument count (tagged)
+ // a6 = parameter count (tagged)
+ // Compute the mapped parameter count = min(a6, a5) in a6.
+ __ mov(a6, a2);
+ __ Branch(&try_allocate, le, a6, Operand(a5));
+ __ mov(a6, a5);
__ bind(&try_allocate);
@@ -1761,14 +1660,14 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
- __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
- __ SmiScale(t1, a1, kPointerSizeLog2);
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
+ __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
+ __ SmiScale(t1, a6, kPointerSizeLog2);
__ daddiu(t1, t1, kParameterMapHeaderSize);
__ bind(&param_map_size);
// 2. Backing store.
- __ SmiScale(t2, a2, kPointerSizeLog2);
+ __ SmiScale(t2, a5, kPointerSizeLog2);
__ Daddu(t1, t1, Operand(t2));
__ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
@@ -1776,7 +1675,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
+ __ Allocate(t1, v0, a4, t1, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1789,36 +1688,36 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kNormalOffset));
__ bind(&skip2_ne);
- __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kAliasedOffset));
__ bind(&skip2_eq);
// v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
// a2 = argument count (smi-tagged)
// a4 = address of arguments map (tagged)
+ // a6 = mapped parameter count (tagged)
__ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ld(a3, MemOperand(sp, 2 * kPointerSize));
- __ AssertNotSmi(a3);
+ __ AssertNotSmi(a1);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
+ __ sd(a1, FieldMemOperand(v0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(a5);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
- __ sd(a2, FieldMemOperand(v0, kLengthOffset));
+ __ sd(a5, FieldMemOperand(v0, kLengthOffset));
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, a4 will point there, otherwise
@@ -1827,29 +1726,29 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
// a2 = argument count (tagged)
// a4 = address of parameter map or backing store (tagged)
+ // a6 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
Label skip3;
- __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
- // Move backing store address to a3, because it is
+ __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
// expected there when filling in the unmapped arguments.
- __ mov(a3, a4);
+ __ mov(a1, a4);
__ bind(&skip3);
- __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+ __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
- __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
- __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
__ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ SmiScale(t2, a1, kPointerSizeLog2);
- __ Daddu(a6, a4, Operand(t2));
- __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
- __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(a5, a4, Operand(t2));
+ __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1860,71 +1759,71 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
Label parameters_loop, parameters_test;
- __ mov(a6, a1);
- __ ld(t1, MemOperand(sp, 0 * kPointerSize));
- __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Dsubu(t1, t1, Operand(a1));
+ __ mov(a5, a6);
+ __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Dsubu(t1, t1, Operand(a6));
__ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(a3, a4, Operand(t2));
- __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a1, a4, Operand(t2));
+ __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
- // a6 = loop variable (tagged)
- // a1 = mapping index (tagged)
- // a3 = address of backing store (tagged)
+ // a1 = address of backing store (tagged)
// a4 = address of parameter map (tagged)
- // a5 = temporary scratch (a.o., for address calculation)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
// a7 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
-
- __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
- __ SmiScale(a5, a6, kPointerSizeLog2);
- __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Daddu(t2, a4, a5);
+ __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
+ __ SmiScale(a0, a5, kPointerSizeLog2);
+ __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Daddu(t2, a4, a0);
__ sd(t1, MemOperand(t2));
- __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Daddu(t2, a3, a5);
+ __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Daddu(t2, a1, a0);
__ sd(a7, MemOperand(t2));
__ Daddu(t1, t1, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, a6, Operand(Smi::FromInt(0)));
+ __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+
+ // Restore t1 = argument count (tagged).
+ __ ld(a5, FieldMemOperand(v0, kLengthOffset));
__ bind(&skip_parameter_map);
- // a2 = argument count (tagged)
- // a3 = address of backing store (tagged)
- // a5 = scratch
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // a5 = argument count (tagged)
+ // a6 = mapped parameter count (tagged)
+ // t1 = scratch
// Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
- __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
- __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
Label arguments_loop, arguments_test;
- __ mov(t1, a1);
- __ ld(a4, MemOperand(sp, 1 * kPointerSize));
- __ SmiScale(t2, t1, kPointerSizeLog2);
- __ Dsubu(a4, a4, Operand(t2));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Dsubu(a3, a3, Operand(t2));
__ jmp(&arguments_test);
__ bind(&arguments_loop);
- __ Dsubu(a4, a4, Operand(kPointerSize));
- __ ld(a6, MemOperand(a4, 0));
- __ SmiScale(t2, t1, kPointerSizeLog2);
- __ Daddu(a5, a3, Operand(t2));
- __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+ __ Dsubu(a3, a3, Operand(kPointerSize));
+ __ ld(a4, MemOperand(a3, 0));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(t1, a1, Operand(t2));
+ __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t1, Operand(a2));
+ __ Branch(&arguments_loop, lt, a6, Operand(a5));
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
+ // Return.
+ __ Ret();
// Do the runtime call to allocate the arguments object.
- // a2 = argument count (tagged)
+ // a5 = argument count (tagged)
__ bind(&runtime);
- __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ Push(a1, a3, a5);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -1953,46 +1852,40 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a3,
+ Label try_allocate, runtime;
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&try_allocate, ne, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Get the length from the frame.
- __ ld(a1, MemOperand(sp, 0));
- __ Branch(&try_allocate);
-
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a1, MemOperand(sp, 0));
- __ SmiScale(at, a1, kPointerSizeLog2);
-
- __ Daddu(a3, a2, Operand(at));
-
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+ __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(at, a2, kPointerSizeLog2);
+ __ Daddu(a4, a4, Operand(at));
+ __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
- __ SmiUntag(a1);
+ __ SmiUntag(t1, a2);
+ __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
- __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+ __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ Allocate(a1, v0, a2, a3, &runtime,
+ __ Allocate(t1, v0, a4, a5, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
@@ -2002,88 +1895,55 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ ld(a1, MemOperand(sp, 0 * kPointerSize));
- __ AssertSmi(a1);
- __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ AssertSmi(a2);
+ __ sd(a2,
+ FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
Label done;
- __ Branch(&done, eq, a1, Operand(zero_reg));
-
- // Get the parameters pointer from the stack.
- __ ld(a2, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(&done, eq, a2, Operand(zero_reg));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
- __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ SmiUntag(a1);
-
+ __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ sd(a2, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
// Copy the fixed array slots.
Label loop;
// Set up a4 to point to the first array slot.
__ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement a3 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ ld(a3, MemOperand(a2));
+ __ Daddu(a3, a3, Operand(-kPointerSize));
+ __ ld(a5, MemOperand(a3));
// Post-increment a4 with kPointerSize on each iteration.
- __ sd(a3, MemOperand(a4));
+ __ sd(a5, MemOperand(a4));
__ Daddu(a4, a4, Operand(kPointerSize));
- __ Dsubu(a1, a1, Operand(1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
+ __ Dsubu(a2, a2, Operand(1));
+ __ Branch(&loop, ne, a2, Operand(zero_reg));
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ DropAndRet(3);
+ __ Ret();
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ Push(a1, a3, a2);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // sp[0] : language mode
- // sp[4] : index of rest parameter
- // sp[8] : number of parameters
- // sp[12] : receiver displacement
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a1, MemOperand(sp, 2 * kPointerSize));
- __ SmiScale(at, a1, kPointerSizeLog2);
-
- __ Daddu(a3, a2, Operand(at));
-
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 3 * kPointerSize));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2608,27 +2468,24 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&done, eq, a5, Operand(at));
__ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
- __ Branch(FLAG_pretenuring_call_new ? &miss : &check_allocation_site, ne,
- feedback_map, Operand(at));
+ __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, feedback_map, Operand(at));
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, feedback_map, Operand(at));
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
- __ Branch(&megamorphic, ne, a1, Operand(a5));
- __ jmp(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ Branch(&megamorphic, ne, a1, Operand(a5));
+ __ jmp(&done);
__ bind(&miss);
@@ -2647,23 +2504,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
- __ Branch(&not_array_function, ne, a1, Operand(a5));
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ Branch(&done);
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ Branch(&not_array_function, ne, a1, Operand(a5));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ Branch(&done);
- __ bind(&not_array_function);
- }
+ __ bind(&not_array_function);
- CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2673,7 +2528,7 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
// Do not transform the receiver for strict mode functions.
int32_t strict_mode_function_mask =
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte ;
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
// Do not transform the receiver for native (Compilerhints already in a3).
int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
@@ -2686,30 +2541,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // put proxy as additional argument
- __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
- __ mov(a2, zero_reg);
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ sd(a1, MemOperand(sp, argc * kPointerSize));
- __ li(a0, Operand(argc)); // Set up the number of arguments.
- __ mov(a2, zero_reg);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ li(a0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2731,12 +2565,12 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// a1 : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ __ JumpIfSmi(a1, &slow);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, a4, a4);
@@ -2770,7 +2604,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -2792,33 +2626,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// a4 : original constructor (for IsSuperConstructorCall)
- Label slow, non_function_call;
+
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(a1, &non_function_call);
+ __ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, a5, a5);
- __ Branch(&slow, ne, a5, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ dsrl(at, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, at);
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into a2.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by a3 + 1.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, a5);
}
@@ -2830,30 +2658,16 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mov(a3, a1);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = a4;
- __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
- // a0: number of arguments
- // a1: called object
- // a5: object type
- Label do_call;
- __ bind(&slow);
- __ Branch(&non_function_call, ne, a5, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ bind(&non_function);
+ __ mov(a3, a1);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -2897,33 +2711,15 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
}
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(vector, FieldMemOperand(vector,
- JSFunction::kSharedFunctionInfoOffset));
- __ ld(vector, FieldMemOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
-}
-
-
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a1 - function
// a3 - slot id
// a2 - vector
- Label miss;
-
+ // a4 - allocation site (loaded from vector[slot])
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
- __ Branch(&miss, ne, a1, Operand(at));
+ __ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
- __ dsrl(at, a3, 32 - kPointerSizeLog2);
- __ Daddu(at, a2, Operand(at));
- __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize));
-
- // Verify that a4 contains an AllocationSite
- __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, a5, Operand(at));
// Increment the call count for monomorphic function calls.
__ dsrl(t0, a3, 32 - kPointerSizeLog2);
@@ -2936,18 +2732,6 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- // Unreachable.
- __ stop("Unexpected code address");
}
@@ -2960,7 +2744,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -3014,7 +2798,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -3022,11 +2806,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&slow_start, eq, a4, Operand(at));
+ // Verify that a4 contains an AllocationSite
+ __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&not_allocation_site, ne, a5, Operand(at));
+
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -3101,8 +2894,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
// the slow case
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &slow);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, a4, a4);
@@ -3118,10 +2911,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss //
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -3521,7 +3311,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
Label not_string, slow_string;
__ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
// Check if string has a cached array index.
- __ ld(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
__ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
__ Branch(&slow_string, ne, at, Operand(zero_reg));
__ IndexFromHash(a2, a0);
@@ -3539,7 +3329,39 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_string;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_string);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3649,35 +3471,34 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
- __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertString(a1);
+ __ AssertString(a0);
Label not_same;
__ Branch(&not_same, ne, a0, Operand(a1));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
- __ DropAndRet(2);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
+ a2);
+ __ Ret();
__ bind(&not_same);
- // Check that both objects are sequential one_byte strings.
+ // Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
- // Compare flat one_byte strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
- __ Daddu(sp, sp, Operand(2 * kPointerSize));
- StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
+ // Compare flat ASCII strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
+ __ Push(a1, a0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3710,6 +3531,30 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ Dsubu(v0, a1, a0);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -4027,8 +3872,20 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Branch(&miss, ne, a2, Operand(a4));
__ Branch(&miss, ne, a3, Operand(a4));
- __ Ret(USE_DELAY_SLOT);
- __ dsubu(v0, a0, a1);
+ if (Token::IsEqualityOp(op())) {
+ __ Ret(USE_DELAY_SLOT);
+ __ dsubu(v0, a0, a1);
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ li(a2, Operand(Smi::FromInt(GREATER)));
+ } else {
+ __ li(a2, Operand(Smi::FromInt(LESS)));
+ }
+ __ Push(a1, a0, a2);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4627,33 +4484,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, a2);
+ __ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, a2);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4662,11 +4512,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4776,8 +4625,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&not_array, ne, scratch1, Operand(at));
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, a7, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
__ bind(&not_array);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
@@ -4838,8 +4686,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, a7, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
__ bind(&not_array);
// Is it generic?
@@ -4858,8 +4705,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Daddu(feedback, vector, Operand(feedback));
__ ld(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, a7, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4871,14 +4717,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4895,11 +4741,50 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorStoreICDescriptor::NameRegister(); // a2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // a4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+ Register feedback = a5;
+ Register receiver_map = a6;
+ Register scratch1 = a7;
+
+ __ SmiScale(scratch1, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(scratch1));
+ __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
+
+ Register scratch2 = t0;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ Branch(USE_DELAY_SLOT, &compare_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
@@ -4913,12 +4798,129 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+
+ __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ SmiScale(too_far, too_far, kPointerSizeLog2);
+ __ Daddu(too_far, feedback, Operand(too_far));
+ __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Daddu(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ ld(cached_map, MemOperand(pointer_reg));
+ __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
+ // Is it a transitioning store?
+ __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&transition_call, ne, too_far, Operand(at));
+
+ __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&transition_call);
+ __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ Move(feedback, too_far);
+ __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&prepare_next);
+ __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
+
+ // We exhausted our array of map handler pairs.
+ __ Branch(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorStoreICDescriptor::NameRegister(); // a2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // a4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+ Register feedback = a5;
+ Register receiver_map = a6;
+ Register scratch1 = a7;
+
+ __ SmiScale(scratch1, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(scratch1));
+ __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
+
+ // We have a polymorphic element handler.
+ Label try_poly_name;
+
+ Register scratch2 = t0;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Branch(&miss, ne, key, Operand(feedback));
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiScale(scratch1, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(scratch1));
+ __ ld(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ Branch(USE_DELAY_SLOT, &compare_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
@@ -5602,8 +5604,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// --
// -- sp[0] : last argument
// -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[argc * 4] : receiver
+ // -- sp[(argc - 1)* 8] : first argument
+ // -- sp[argc * 8] : receiver
// -----------------------------------
Register callee = a0;
@@ -5662,10 +5664,12 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
__ sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
__ li(at, Operand(argc.immediate()));
- __ sd(at, MemOperand(a0, 2 * kPointerSize));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_ = 0
- __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
+ __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
} else {
// FunctionCallbackInfo::values_
__ dsll(at, argc.reg(), kPointerSizeLog2);
@@ -5673,11 +5677,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
__ sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
- __ sd(argc.reg(), MemOperand(a0, 2 * kPointerSize));
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_
__ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
__ dsll(at, argc.reg(), kPointerSizeLog2);
- __ sd(at, MemOperand(a0, 3 * kPointerSize));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize + kIntSize));
}
ExternalReference thunk_ref =