summaryrefslogtreecommitdiff
path: root/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc')
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc470
1 files changed, 219 insertions, 251 deletions
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 9bbc8b87e8..b5e1245f3a 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -12,7 +12,6 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -365,15 +364,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->constructor()).is(x1));
__ Mov(x0, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ Mov(x2, instr->hydrogen()->site());
- } else {
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- }
-
+ __ Mov(x2, instr->hydrogen()->site());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
@@ -406,7 +397,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -447,20 +438,13 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
- LoadContextFromDeferred(context);
+ if (context != nullptr) LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -599,7 +583,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Allocate a local context if needed.
- if (info()->num_heap_slots() > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in x1.
@@ -610,14 +594,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
// Context is returned in x0. It replaces the context passed to us. It's
@@ -625,10 +607,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Mov(cp, x0);
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
Register value = x0;
Register scratch = x3;
@@ -775,8 +758,6 @@ bool LCodeGen::GenerateJumpTable() {
// table.
__ Bl(&call_deopt_entry);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
masm()->CheckConstPool(false, false);
}
@@ -839,11 +820,9 @@ void LCodeGen::FinishCode(Handle<Code> code) {
PopulateDeoptimizationData(code);
}
-
void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
- BranchType branch_type, Register reg, int bit,
- Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type,
+ Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
@@ -892,7 +871,7 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
@@ -900,14 +879,13 @@ void LCodeGen::DeoptimizeBranch(
frame_is_built_ && !info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(
entry, deopt_info, bailout_type, !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -916,70 +894,59 @@ void LCodeGen::DeoptimizeBranch(
}
}
-
-void LCodeGen::Deoptimize(LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type) {
DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
override_bailout_type);
}
-
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
}
-
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
}
-
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
}
-
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
__ CompareRoot(rt, index);
DeoptimizeIf(eq, instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
__ CompareRoot(rt, index);
DeoptimizeIf(ne, instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
__ TestForMinusZero(input);
DeoptimizeIf(vs, instr, deopt_reason);
}
@@ -987,18 +954,16 @@ void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
-
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
}
-
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
}
@@ -1377,7 +1342,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1391,7 +1356,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1416,7 +1381,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register temp2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -1426,6 +1391,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -1466,6 +1436,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Mov(ToRegister(instr->result()), Smi::FromInt(0));
PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
// We're in a SafepointRegistersScope so we can use any scratch registers.
Register size = x0;
if (instr->size()->IsConstantOperand()) {
@@ -1484,9 +1455,51 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Mov(x10, Smi::FromInt(flags));
__ Push(size, x10);
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, nullptr);
__ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = x10;
+ __ Sub(x0, x0, Operand(kHeapObjectTag));
+ __ Mov(top_address, Operand(allocation_top));
+ __ Str(x0, MemOperand(top_address));
+ __ Add(x0, x0, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
@@ -1506,7 +1519,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1698,7 +1711,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -1778,7 +1791,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
}
Register map = NoReg;
@@ -1845,7 +1858,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr, Deoptimizer::kUnexpectedObject);
+ Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -1981,7 +1994,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIfSmi(temp, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -2036,7 +2049,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ Bind(&success);
@@ -2045,7 +2058,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi);
}
}
@@ -2053,7 +2066,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi);
}
@@ -2066,7 +2079,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -2084,15 +2097,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
}
} else {
uint8_t mask;
@@ -2103,10 +2116,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
}
} else {
if (tag == 0) {
@@ -2115,7 +2128,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -2155,7 +2168,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is coverted to zero for clamping conversion.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeReason::kNotAHeapNumberUndefined);
__ Mov(result, 0);
__ B(&done);
@@ -2170,30 +2183,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ Fmov(result_reg, value_reg);
- __ Lsr(result_reg, result_reg, 32);
- } else {
- __ Fmov(result_reg.W(), value_reg.S());
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
-
- // Insert the least significant 32 bits of hi_reg into the most significant
- // 32 bits of lo_reg, and move to a floating point register.
- __ Bfi(lo_reg, hi_reg, 32, 32);
- __ Fmov(result_reg, lo_reg);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Handle<String> class_name = instr->hydrogen()->class_name();
Label* true_label = instr->TrueLabel(chunk_);
@@ -2432,7 +2421,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@@ -2469,21 +2458,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2511,14 +2500,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2530,7 +2519,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -2553,7 +2542,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2565,7 +2554,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
@@ -2577,13 +2566,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision);
}
@@ -2592,11 +2581,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2636,7 +2625,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
+ DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
__ Bind(&done);
}
@@ -2758,16 +2747,6 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(x0));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2792,16 +2771,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ Ldrb(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Cmp(object_prototype, prototype);
- __ B(eq, instr->TrueLabel(chunk_));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
__ B(eq, instr->FalseLabel(chunk_));
+ __ Cmp(object_prototype, prototype);
+ __ B(eq, instr->TrueLabel(chunk_));
__ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ B(&loop);
}
@@ -2830,7 +2809,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -2981,7 +2962,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3003,7 +2984,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3051,14 +3032,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
- __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3152,7 +3131,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3248,7 +3227,7 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
Register scratch = ToRegister(instr->temp());
__ Fmov(scratch, result);
__ Eor(scratch, scratch, kHoleNanInt64);
- DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
+ DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole);
}
}
@@ -3286,10 +3265,10 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi);
} else {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -3303,7 +3282,7 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
- DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ Bind(&done);
@@ -3316,13 +3295,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3376,10 +3351,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -3404,7 +3376,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -3527,19 +3499,25 @@ void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
__ Bind(&done);
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
- DoubleRegister double_temp2 = double_scratch();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Register temp3 = ToRegister(instr->temp3());
-
- MathExpGenerator::EmitMathExp(masm(), input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3);
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
}
@@ -3556,7 +3534,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
}
__ Fcvtms(result, input);
@@ -3566,7 +3544,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
}
@@ -3592,13 +3570,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -3621,14 +3599,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -3671,14 +3649,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Check for (0 / -x) that will produce negative zero.
@@ -3688,7 +3666,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
Label done;
@@ -3708,8 +3686,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
- 0, 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
DCHECK(ToDoubleRegister(instr->result()).Is(d0));
}
@@ -3847,18 +3824,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow);
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero);
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN);
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -3936,7 +3913,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ B(&done);
}
@@ -3955,7 +3932,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -3969,7 +3946,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -3984,12 +3961,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
}
__ Bind(&done);
}
@@ -4012,10 +3989,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero);
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero);
}
}
@@ -4025,7 +4002,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Neg(result, left);
}
@@ -4041,7 +4018,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, left);
}
@@ -4060,7 +4037,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow);
}
if (right >= 0) {
@@ -4070,7 +4047,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4128,13 +4105,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
__ Mul(result, left, right);
}
@@ -4158,7 +4135,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4166,7 +4143,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4199,12 +4176,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ Mov(result, 0);
PushSafepointRegistersScope scope(this);
- // NumberTagU and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!result.is(cp)) {
+ __ Mov(cp, 0);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4264,13 +4239,10 @@ void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
{
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagU and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ Mov(cp, 0);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4342,14 +4314,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero);
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4537,7 +4509,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
+ DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(output, input);
}
@@ -4549,7 +4521,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi);
}
__ Bind(&untag);
@@ -4574,7 +4546,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -4584,7 +4556,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -4637,7 +4609,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -4647,7 +4619,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
}
__ Mov(result, left);
} else {
@@ -4691,6 +4663,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(scratch1, scratch2);
+ __ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector());
+ __ Push(scratch1);
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -4776,7 +4750,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -4960,13 +4934,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5043,8 +5015,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
__ SmiTag(x3);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5052,7 +5023,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
}
// Deopt on smi, which means the elements array changed to dictionary mode.
- DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi);
}
@@ -5160,14 +5131,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5296,7 +5265,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5310,7 +5279,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5351,7 +5320,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5362,13 +5331,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero);
}
}
__ Bind(&done);
@@ -5442,8 +5411,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(this);
__ Mov(x1, Operand(to_map));
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -5459,7 +5427,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ Bind(&no_memento_found);
}
@@ -5483,8 +5451,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ Mov(x0, Immediate(isolate()->factory()->number_string()));
__ B(&end);
__ Bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ Bind(&end);
}
@@ -5605,7 +5573,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
@@ -5639,10 +5607,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi);
__ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
__ B(ge, &copy_receiver);
- Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
+ Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));