aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/lithium-codegen-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/lithium-codegen-arm64.cc')
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc333
1 files changed, 177 insertions, 156 deletions
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index df9e7b5ae5..ef01c91d47 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -410,8 +410,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(x3));
+ DCHECK(vector_register.is(x2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ Mov(vector_register, vector);
+ __ Mov(slot_register, Operand(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
after_push_argument_ = false;
}
@@ -640,8 +662,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
@@ -835,7 +856,7 @@ bool LCodeGen::GenerateJumpTable() {
__ Bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load the base
@@ -918,7 +939,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -942,6 +962,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -984,8 +1005,9 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, const char* detail, BranchType branch_type,
- Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
+ BranchType branch_type, Register reg, int bit,
+ Deoptimizer::BailoutType* override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
@@ -1035,21 +1057,22 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
- !frame_is_built_);
+ new (zone()) Deoptimizer::JumpTableEntry(
+ entry, deopt_info, bailout_type, !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -1058,85 +1081,89 @@ void LCodeGen::DeoptimizeBranch(
}
-void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
+void LCodeGen::Deoptimize(LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
+ DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
+ override_bailout_type);
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
}
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_zero, rt);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
}
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_not_zero, rt);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
}
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
+ DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail) {
+ LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, instr, detail);
+ DeoptimizeIf(eq, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail) {
+ LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, instr, detail);
+ DeoptimizeIf(ne, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, instr, detail);
+ DeoptimizeIf(vs, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, "not heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
}
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
}
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
}
@@ -1510,7 +1537,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1524,7 +1551,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1650,7 +1677,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr, "too many arguments");
+ DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1777,8 +1804,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -1832,7 +1858,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr, "out of bounds");
+ DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -1911,7 +1937,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr, "Smi");
+ DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
}
Register map = NoReg;
@@ -1972,7 +1998,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr, "unexpected object");
+ Deoptimize(instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -1980,27 +2006,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- Register function_reg) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
// The function interface relies on the following register assignments.
- DCHECK(function_reg.Is(x1) || function_reg.IsNone());
+ Register function_reg = x1;
Register arity_reg = x0;
LPointerMap* pointers = instr->pointer_map();
- // If necessary, load the function object.
- if (function_reg.IsNone()) {
- function_reg = x1;
- __ LoadObject(function_reg, function);
- }
-
if (FLAG_debug_code) {
Label is_not_smi;
// Try to confirm that function_reg (x1) is a tagged pointer.
@@ -2186,7 +2204,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr, "instance migration failed");
+ DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
}
@@ -2241,7 +2259,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
__ Bind(&success);
@@ -2250,7 +2268,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
}
}
@@ -2258,7 +2276,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr, "not a Smi");
+ DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
}
@@ -2276,15 +2294,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr, "wrong instance type");
+ DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr, "wrong instance type");
+ DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
}
} else {
uint8_t mask;
@@ -2295,10 +2313,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
- "wrong instance type");
+ Deoptimizer::kWrongInstanceType);
} else {
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
- "wrong instance type");
+ Deoptimizer::kWrongInstanceType);
}
} else {
if (tag == 0) {
@@ -2307,7 +2325,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -2347,7 +2365,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is coverted to zero for clamping conversion.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- "not a heap number/undefined");
+ Deoptimizer::kNotAHeapNumberUndefined);
__ Mov(result, 0);
__ B(&done);
@@ -2652,7 +2670,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, "value mismatch");
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
}
@@ -2676,9 +2694,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr, "Smi");
+ DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, "not a date object");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2728,21 +2746,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, "division by zero");
+ DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2770,14 +2788,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, "division by zero");
+ Deoptimize(instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, "minus zero");
+ DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2789,7 +2807,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr, "lost precision");
+ DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
}
}
@@ -2806,13 +2824,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Sdiv(result, dividend, divisor);
if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DCHECK_EQ(NULL, instr->temp());
+ DCHECK(!instr->temp());
return;
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, "division by zero");
+ DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2824,7 +2842,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
@@ -2836,13 +2854,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr, "lost precision");
+ DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
}
@@ -2851,11 +2869,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, "minus zero");
+ DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2887,7 +2905,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ Mov(x2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -2916,7 +2934,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr, "no cache");
+ DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
__ Bind(&done);
}
@@ -2929,17 +2947,18 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
+ Deoptimizer::kUndefined);
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr, "null");
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull);
- DeoptimizeIfSmi(object, instr, "Smi");
+ DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, "not a JavaScript object");
+ DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
@@ -2953,7 +2972,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
+ Deoptimizer::kWrongMap);
__ Bind(&use_cache);
}
@@ -3216,9 +3236,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- x1);
+ instr->arity(), instr);
}
after_push_argument_ = false;
}
@@ -3346,7 +3364,8 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3367,7 +3386,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3387,7 +3407,8 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
}
}
@@ -3523,7 +3544,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr, "negative value");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3615,12 +3636,9 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
Register scratch = ToRegister(instr->temp());
- // Detect the hole NaN by adding one to the integer representation of the
- // result, and checking for overflow.
- STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
- __ Ldr(scratch, mem_op);
- __ Cmn(scratch, 1);
- DeoptimizeIf(vs, instr, "hole");
+ __ Fmov(scratch, result);
+ __ Eor(scratch, scratch, kHoleNanInt64);
+ DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
}
}
@@ -3658,9 +3676,10 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr, "not a Smi");
+ DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
}
}
}
@@ -3764,7 +3783,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
}
@@ -3916,7 +3935,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, "minus zero");
+ DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
}
__ Fcvtms(result, input);
@@ -3926,7 +3945,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
}
@@ -3952,13 +3971,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
return;
}
@@ -3981,14 +4000,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, "division by zero");
+ Deoptimize(instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, "minus zero");
+ DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -4031,14 +4050,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr, "division by zero");
+ DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
// Check for (0 / -x) that will produce negative zero.
@@ -4048,7 +4067,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
Label done;
@@ -4207,18 +4226,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr, "overflow");
+ DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr, "minus zero");
+ DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr, "NaN");
+ DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -4296,7 +4315,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ B(&done);
}
@@ -4315,7 +4334,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr, "division by zero");
+ Deoptimize(instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -4329,7 +4348,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr, "minus zero");
+ DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -4344,12 +4363,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, "division by zero");
+ DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr, "minus zero");
+ DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
}
__ Bind(&done);
}
@@ -4372,10 +4391,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr, "minus zero");
+ DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr, "minus zero");
+ DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
}
}
@@ -4385,7 +4404,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Neg(result, left);
}
@@ -4401,7 +4420,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Add(result, left, left);
}
@@ -4420,7 +4439,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr, "overflow");
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
}
if (right >= 0) {
@@ -4430,7 +4449,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4488,13 +4507,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
} else {
__ Mul(result, left, right);
}
@@ -4518,7 +4537,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4526,7 +4545,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4702,14 +4721,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr, "minus zero");
+ DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- "not a heap number/undefined");
+ Deoptimizer::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4903,7 +4922,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr, "overflow");
+ DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
}
__ SmiTag(output, input);
}
@@ -4915,7 +4934,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr, "not a Smi");
+ DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
}
__ Bind(&untag);
@@ -4940,7 +4959,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, "negative value");
+ DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -4950,7 +4969,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, "negative value");
+ DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -5003,7 +5022,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, "negative value");
+ DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -5013,7 +5032,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, "negative value");
+ DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
}
__ Mov(result, left);
} else {
@@ -5141,7 +5160,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -5179,7 +5199,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
}
// Store the value.
@@ -5361,7 +5382,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5471,7 +5492,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5601,7 +5622,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5615,7 +5636,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5656,7 +5677,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- "not a heap number/undefined/true/false");
+ Deoptimizer::kNotAHeapNumberUndefinedBoolean);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5667,13 +5688,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr, "minus zero");
+ DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
}
}
__ Bind(&done);
@@ -5814,7 +5835,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, "memento found");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
__ Bind(&no_memento_found);
}
@@ -5939,7 +5960,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
@@ -5973,10 +5994,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr, "Smi");
+ DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, &copy_receiver);
- Deoptimize(instr, "not a JavaScript object");
+ Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));