summaryrefslogtreecommitdiff
path: root/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc')
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc280
1 files changed, 50 insertions, 230 deletions
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index a615030fae..29d19ee809 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -120,7 +120,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
}
@@ -164,7 +164,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(a1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -173,7 +173,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -192,7 +192,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sd(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
@@ -830,60 +830,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1810,39 +1756,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch, Operand(stamp));
- __ ld(scratch, MemOperand(scratch));
- __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch, Operand(scratch0()));
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ li(a1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2254,7 +2167,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2624,27 +2537,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ GetObjectType(input, temp, temp2);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- __ GetObjectType(input, temp, temp2);
- __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ GetObjectType(input, temp, temp2);
- __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2715,6 +2612,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = scratch1();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2730,6 +2628,19 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(object_instance_type, object_instance_type,
+ Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ Operand(zero_reg));
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
__ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -2771,7 +2682,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(v0);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
@@ -2849,7 +2760,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ld(result, ContextOperand(context, instr->slot_index()));
+ __ ld(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2869,7 +2780,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3405,14 +3316,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
__ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ld(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ld(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
+ __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3471,7 +3381,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3514,7 +3425,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3533,7 +3444,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize a0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
// Invoke function.
@@ -3910,7 +3822,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3960,11 +3872,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(a0, Operand(instr->arity()));
-
// Change context.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ li(a0, Operand(instr->arity()));
+
// Load the code entry address
__ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -4003,19 +3917,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- // No cell in a2 for construct type feedback in optimized code
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4038,7 +3939,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4052,17 +3953,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4736,7 +4637,7 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
FPURegister dbl_scratch = double_scratch0();
__ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22?
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
}
@@ -4793,7 +4694,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
__ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
__ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch);
}
if (FLAG_inline_new) {
@@ -5518,59 +5419,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // a7 = literals array.
- // a1 = regexp literal.
- // a0 = regexp literal clone.
- // a2 and a4-a6 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ li(a7, instr->hydrogen()->literals());
- __ ld(a1, FieldMemOperand(a7, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a1, Operand(at));
-
- // Create regexp literal using runtime function
- // Result will be in v0.
- __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a5, Operand(instr->hydrogen()->pattern()));
- __ li(a4, Operand(instr->hydrogen()->flags()));
- __ Push(a7, a6, a5, a4);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(a1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(a1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ld(a3, FieldMemOperand(a1, i));
- __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
- __ sd(a3, FieldMemOperand(v0, i));
- __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
- __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5678,9 +5526,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
@@ -5713,34 +5561,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
-
- EmitBranch(instr, eq, temp1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, temp2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5872,10 +5692,10 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ And(at, object, kSmiTagMask);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
__ GetObjectType(object, a1, a1);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(LAST_JS_PROXY_TYPE));
+ Operand(JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5889,7 +5709,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
@@ -6011,7 +5831,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ li(at, scope_info);
__ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}