summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm/macro-assembler-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm/macro-assembler-arm.cc')
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc433
1 files changed, 251 insertions, 182 deletions
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 6af3d6c20c..a08673d462 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -89,17 +89,6 @@ int MacroAssembler::CallStubSize(
}
-int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
- Address target,
- RelocInfo::Mode rmode,
- Condition cond) {
- Instr mov_instr = cond | MOV | LeaveCC;
- Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
- return kInstrSize +
- mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
-}
-
-
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond,
@@ -131,12 +120,6 @@ void MacroAssembler::Call(Address target,
// blx ip
// @ return address
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
-
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
@@ -173,6 +156,40 @@ void MacroAssembler::Call(Handle<Code> code,
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
+void MacroAssembler::CallDeoptimizer(Address target) {
+ BlockConstPoolScope block_const_pool(this);
+
+ uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
+
+ // We use blx, like a call, but it does not return here. The link register is
+ // used by the deoptimizer to work out what called it.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
+ movw(ip, target_raw & 0xffff);
+ movt(ip, (target_raw >> 16) & 0xffff);
+ blx(ip);
+ } else {
+ // We need to load a literal, but we can't use the usual constant pool
+ // because we call this from a patcher, and cannot afford the guard
+ // instruction and other administrative overhead.
+ ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
+ blx(ip);
+ dd(target_raw);
+ }
+}
+
+int MacroAssembler::CallDeoptimizerSize() {
+ // ARMv7+:
+ // movw ip, ...
+ // movt ip, ...
+ // blx ip @ This never returns.
+ //
+ // ARMv6:
+ // ldr ip, =address
+ // blx ip @ This never returns.
+ // .word address
+ return 3 * kInstrSize;
+}
void MacroAssembler::Ret(Condition cond) {
bx(lr, cond);
@@ -223,19 +240,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- mov(dst, Operand(value));
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- mov(dst, Operand(cell));
- ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- mov(dst, Operand(value));
- }
- }
+ mov(dst, Operand(value));
}
@@ -245,6 +250,11 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
}
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
+ if (!dst.is(src)) {
+ vmov(dst, src);
+ }
+}
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
if (!dst.is(src)) {
@@ -252,11 +262,10 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
}
}
-
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) {
- if (CpuFeatures::IsSupported(MLS)) {
- CpuFeatureScope scope(this, MLS);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
mls(dst, src1, src2, srcA, cond);
} else {
DCHECK(!srcA.is(ip));
@@ -355,37 +364,6 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
}
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
- Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- DCHECK(!dst.is(pc) && !src.rm().is(pc));
- DCHECK((satpos >= 0) && (satpos <= 31));
-
- // These asserts are required to ensure compatibility with the ARMv7
- // implementation.
- DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
- DCHECK(src.rs().is(no_reg));
-
- Label done;
- int satval = (1 << satpos) - 1;
-
- if (cond != al) {
- b(NegateCondition(cond), &done); // Skip saturate if !condition.
- }
- if (!(src.is_reg() && dst.is(src.rm()))) {
- mov(dst, src);
- }
- tst(dst, Operand(~satval));
- b(eq, &done);
- mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
- mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
- bind(&done);
- } else {
- usat(dst, satpos, src, cond);
- }
-}
-
-
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@@ -872,8 +850,7 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
DCHECK(!serializer_enabled());
// General purpose registers are pushed last on the stack.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
@@ -889,10 +866,8 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
// below doesn't support it yet.
DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
- // Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
- (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
- CpuFeatureScope scope(this, ARMv7);
+ // Generate two ldr instructions if ldrd is not applicable.
+ if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
ldrd(dst1, dst2, src, cond);
} else {
if ((src.am() == Offset) || (src.am() == NegOffset)) {
@@ -930,10 +905,8 @@ void MacroAssembler::Strd(Register src1, Register src2,
// below doesn't support it yet.
DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
- // Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
- (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
- CpuFeatureScope scope(this, ARMv7);
+ // Generate two str instructions if strd is not applicable.
+ if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
@@ -950,30 +923,12 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
}
-
-void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
- // If needed, restore wanted bits of FPSCR.
- Label fpscr_done;
- vmrs(scratch);
- if (emit_debug_code()) {
- Label rounding_mode_correct;
- tst(scratch, Operand(kVFPRoundingModeMask));
- b(eq, &rounding_mode_correct);
- // Don't call Assert here, since Runtime_Abort could re-enter here.
- stop("Default rounding mode not set");
- bind(&rounding_mode_correct);
- }
- tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
- b(ne, &fpscr_done);
- orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
- vmsr(scratch);
- bind(&fpscr_done);
-}
-
-
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
vsub(dst, src, kDoubleRegZero, cond);
}
@@ -1049,13 +1004,11 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value_rep(imm);
+ int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
- if (value_rep == zero) {
+ if (imm_bits == bit_cast<int64_t>(0.0)) {
vmov(dst, kDoubleRegZero);
- } else if (value_rep == minus_zero) {
+ } else if (imm_bits == bit_cast<int64_t>(-0.0)) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
@@ -1290,9 +1243,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- ldr(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -1332,13 +1284,29 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends;
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(lr, fp, context, target);
+ add(fp, sp, Operand(2 * kPointerSize));
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(lr, fp, context, target);
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
+ mov(ip, Operand(Smi::FromInt(frame_type)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1606,12 +1574,13 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- mov(r4, Operand(step_in_enabled));
- ldrb(r4, MemOperand(r4));
- cmp(r4, Operand(0));
- b(eq, &skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ mov(r4, Operand(last_step_action));
+ ldrsb(r4, MemOperand(r4));
+ cmp(r4, Operand(StepIn));
+ b(lt, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2003,6 +1972,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2090,26 +2060,29 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(result_end, source, bits_operand, SetCC, cond);
+ add(result_end, source, bits_operand, LeaveCC, cond);
source = result_end;
cond = cc;
}
}
- b(cs, gc_required);
+
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
- str(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2185,7 +2158,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
add(result_end, result, Operand(object_size), SetCC);
}
- b(cs, gc_required);
+
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
@@ -2194,14 +2167,122 @@ void MacroAssembler::Allocate(Register object_size, Register result,
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
- str(result_end, MemOperand(top_address));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+ }
+
+ // Tag object.
+ add(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
+ Register top_address = scratch;
+ mov(top_address, Operand(allocation_top));
+ ldr(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
}
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
+ } else {
+ add(result_end, result, Operand(object_size), SetCC);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ tst(result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+
+ add(result, result, Operand(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+ ldr(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes. We must preserve the ip register at
+ // this point, so we cannot just use add().
+ DCHECK(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ DCHECK(bits_operand.instructions_required(this) == 1);
+ add(result_end, source, bits_operand, LeaveCC, cond);
+ source = result_end;
+ cond = cc;
+ }
+ }
+
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+
+ add(result, result, Operand(kHeapObjectTag));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -2218,12 +2299,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -2247,12 +2324,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -2266,7 +2339,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -2280,12 +2353,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2298,7 +2367,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -2314,7 +2383,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2414,12 +2483,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
- if (emit_debug_code()) {
- vmrs(ip);
- tst(ip, Operand(kVFPDefaultNaNModeControlBit));
- Assert(ne, kDefaultNaNModeNotSet);
- }
VFPCanonicalizeNaN(double_scratch);
b(&store);
@@ -2803,18 +2866,18 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
#if defined(__thumb__)
// Thumb mode builtin.
DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2902,17 +2965,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(Smi::FromInt(reason)));
- push(r0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(r1, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
if (is_const_pool_blocked()) {
@@ -3129,6 +3194,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject);
+ push(object);
+ CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -3225,12 +3301,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -3238,11 +3313,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -3267,7 +3338,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3306,17 +3378,7 @@ void MacroAssembler::CopyBytes(Register src,
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
- if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
- } else {
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- }
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
sub(length, length, Operand(kPointerSize));
b(&word_loop);
@@ -3662,7 +3724,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Usat(output_reg, 8, Operand(input_reg));
+ usat(output_reg, 8, Operand(input_reg));
}
@@ -3770,7 +3832,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3780,7 +3842,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ ldr(ip, MemOperand(ip));
+ eor(scratch_reg, scratch_reg, Operand(ip));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
@@ -3796,7 +3860,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// we are below top.
bind(&top_check);
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- cmp(scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ ldr(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
b(gt, no_memento_found);
// Memento map check.
bind(&map_check);
@@ -3818,8 +3884,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
@@ -3917,6 +3982,10 @@ CodePatcher::~CodePatcher() {
Assembler::FlushICache(masm_.isolate(), address_, size_);
}
+ // Check that we don't have any pending constant pools.
+ DCHECK(masm_.pending_32_bit_constants_.empty());
+ DCHECK(masm_.pending_64_bit_constants_.empty());
+
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);