summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/deoptimizer-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/deoptimizer-arm64.cc')
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc252
1 files changed, 154 insertions, 98 deletions
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 0180797215..5f372eadd2 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -5,7 +5,6 @@
#include "src/api.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
@@ -17,6 +16,77 @@ namespace internal {
#define __ masm()->
+namespace {
+
+void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
+ int dst_offset, const CPURegList& reg_list,
+ const Register& temp0, const Register& temp1,
+ int src_offset = 0) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList copy_to_input = reg_list;
+ int reg_size = reg_list.RegisterSizeInBytes();
+ DCHECK_EQ(temp0.SizeInBytes(), reg_size);
+ DCHECK_EQ(temp1.SizeInBytes(), reg_size);
+
+ // Compute some temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, masm->StackPointer(), src_offset);
+ masm->Add(dst, dst, dst_offset);
+
+ // Write reg_list into the frame pointed to by dst.
+ for (int i = 0; i < reg_list.Count(); i += 2) {
+ masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
+
+ CPURegister reg0 = copy_to_input.PopLowestIndex();
+ CPURegister reg1 = copy_to_input.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent stores, otherwise write them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Stp(temp0, temp1, MemOperand(dst, offset0));
+ } else {
+ masm->Str(temp0, MemOperand(dst, offset0));
+ masm->Str(temp1, MemOperand(dst, offset1));
+ }
+ }
+ masm->Sub(dst, dst, dst_offset);
+}
+
+void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
+ const Register& src_base, int src_offset) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList restore_list = reg_list;
+ int reg_size = restore_list.RegisterSizeInBytes();
+
+ // Compute a temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, src_base, src_offset);
+
+ // Restore every register in restore_list from src.
+ while (!restore_list.IsEmpty()) {
+ CPURegister reg0 = restore_list.PopLowestIndex();
+ CPURegister reg1 = restore_list.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent loads, otherwise read them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Ldp(reg0, reg1, MemOperand(src, offset0));
+ } else {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ masm->Ldr(reg1, MemOperand(src, offset1));
+ }
+ }
+}
+} // namespace
+
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
@@ -28,17 +98,23 @@ void Deoptimizer::TableEntryGenerator::Generate() {
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Default()->allocatable_double_codes_mask());
+ DCHECK_EQ(saved_double_registers.Count() % 2, 0);
__ PushCPURegList(saved_double_registers);
- // Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Default()->allocatable_float_codes_mask());
+ DCHECK_EQ(saved_float_registers.Count() % 4, 0);
__ PushCPURegList(saved_float_registers);
- // We save all the registers expcept jssp, sp and lr.
+ // We save all the registers except sp, lr and the masm scratches.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ saved_registers.Remove(ip0);
+ saved_registers.Remove(ip1);
+ // TODO(arm): padding here can be replaced with jssp/x28 when allocatable.
+ saved_registers.Combine(padreg);
saved_registers.Combine(fp);
+ DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
@@ -64,18 +140,24 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
- // Compute the fp-to-sp delta, and correct one word for bailout id.
+ // Compute the fp-to-sp delta, adding two words for alignment padding and
+ // bailout id.
__ Add(fp_to_sp, __ StackPointer(),
- kSavedRegistersAreaSize + (1 * kPointerSize));
+ kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
- __ Mov(x0, 0);
- Label context_check;
__ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(x1, &context_check);
+
+ // Ensure we can safely load from below fp.
+ DCHECK_GT(kSavedRegistersAreaSize,
+ -JavaScriptFrameConstants::kFunctionOffset);
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ bind(&context_check);
+
+ // If x1 is a smi, zero x0.
+ __ Tst(x1, kSmiTagMask);
+ __ CzeroX(x0, eq);
+
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
@@ -96,70 +178,47 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
- CPURegList copy_to_input = saved_registers;
- for (int i = 0; i < saved_registers.Count(); i++) {
- __ Peek(x2, i * kPointerSize);
- CPURegister current_reg = copy_to_input.PopLowestIndex();
- int offset = (current_reg.code() * kPointerSize) +
- FrameDescription::registers_offset();
- __ Str(x2, MemOperand(x1, offset));
- }
+ CopyRegListToFrame(masm(), x1, FrameDescription::registers_offset(),
+ saved_registers, x2, x3);
// Copy double registers to the input frame.
- CPURegList copy_double_to_input = saved_double_registers;
- for (int i = 0; i < saved_double_registers.Count(); i++) {
- int src_offset = kDoubleRegistersOffset + (i * kDoubleSize);
- __ Peek(x2, src_offset);
- CPURegister reg = copy_double_to_input.PopLowestIndex();
- int dst_offset = FrameDescription::double_registers_offset() +
- (reg.code() * kDoubleSize);
- __ Str(x2, MemOperand(x1, dst_offset));
- }
+ CopyRegListToFrame(masm(), x1, FrameDescription::double_registers_offset(),
+ saved_double_registers, x2, x3, kDoubleRegistersOffset);
// Copy float registers to the input frame.
- CPURegList copy_float_to_input = saved_float_registers;
- for (int i = 0; i < saved_float_registers.Count(); i++) {
- int src_offset = kFloatRegistersOffset + (i * kFloatSize);
- __ Peek(w2, src_offset);
- CPURegister reg = copy_float_to_input.PopLowestIndex();
- int dst_offset =
- FrameDescription::float_registers_offset() + (reg.code() * kFloatSize);
- __ Str(w2, MemOperand(x1, dst_offset));
- }
+ // TODO(arm): these are the lower 32-bits of the double registers stored
+ // above, so we shouldn't need to store them again.
+ CopyRegListToFrame(masm(), x1, FrameDescription::float_registers_offset(),
+ saved_float_registers, w2, w3, kFloatRegistersOffset);
- // Remove the bailout id and the saved registers from the stack.
- __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
+ // Remove the padding, bailout id and the saved registers from the stack.
+ DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
+ __ Drop(2 + (kSavedRegistersAreaSize / kXRegSize));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
- __ Add(unwind_limit, unwind_limit, __ StackPointer());
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
- Label pop_loop;
- Label pop_loop_header;
- __ B(&pop_loop_header);
- __ Bind(&pop_loop);
- __ Pop(x4);
- __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
- __ Bind(&pop_loop_header);
- __ Cmp(unwind_limit, __ StackPointer());
- __ B(ne, &pop_loop);
+ __ SlotAddress(x1, 0);
+ __ Lsr(unwind_limit, unwind_limit, kPointerSizeLog2);
+ __ Mov(x5, unwind_limit);
+ __ CopyDoubleWords(x3, x1, x5);
+ __ Drop(unwind_limit);
// Compute the output frame in the deoptimizer.
- __ Push(x0); // Preserve deoptimizer object across call.
-
+ __ Push(padreg, x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
- __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+ __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
__ Ldr(__ StackPointer(),
MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
@@ -174,43 +233,29 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Bind(&outer_push_loop);
Register current_frame = x2;
- __ Ldr(current_frame, MemOperand(x0, 0));
+ Register frame_size = x3;
+ __ Ldr(current_frame, MemOperand(x0, kPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
- __ B(&inner_loop_header);
+ __ Lsr(frame_size, x3, kPointerSizeLog2);
+ __ Claim(frame_size);
- __ Bind(&inner_push_loop);
- __ Sub(x3, x3, kPointerSize);
- __ Add(x6, current_frame, x3);
- __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
- __ Push(x7);
- __ Bind(&inner_loop_header);
- __ Cbnz(x3, &inner_push_loop);
+ __ Add(x7, current_frame, FrameDescription::frame_content_offset());
+ __ SlotAddress(x6, 0);
+ __ CopyDoubleWords(x6, x7, frame_size);
- __ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- DCHECK(!saved_double_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
- !saved_double_registers.IncludesAliasOf(fp_zero) &&
- !saved_double_registers.IncludesAliasOf(fp_scratch));
- while (!saved_double_registers.IsEmpty()) {
- const CPURegister reg = saved_double_registers.PopLowestIndex();
- int src_offset = FrameDescription::double_registers_offset() +
- (reg.code() * kDoubleSize);
- __ Ldr(reg, MemOperand(x1, src_offset));
- }
+ RestoreRegList(masm(), saved_double_registers, x1,
+ FrameDescription::double_registers_offset());
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
- // TODO(all): This code needs to be revisited, We probably don't need to
- // restore all the registers as fullcodegen does not keep live values in
- // registers (note that at least fp must be restored though).
-
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
@@ -219,19 +264,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
- // We don't need to restore x7 as it will be clobbered later to hold the
- // continuation address.
- Register continuation = x7;
- saved_registers.Remove(continuation);
-
- while (!saved_registers.IsEmpty()) {
- // TODO(all): Look for opportunities to optimize this by using ldp.
- CPURegister current_reg = saved_registers.PopLowestIndex();
- int offset = (current_reg.code() * kPointerSize) +
- FrameDescription::registers_offset();
- __ Ldr(current_reg, MemOperand(last_output_frame, offset));
- }
+ RestoreRegList(masm(), saved_registers, last_output_frame,
+ FrameDescription::registers_offset());
+ Register continuation = x7;
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
@@ -239,37 +275,57 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Br(continuation);
}
-
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
-const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
-
+const int Deoptimizer::table_entry_size_ = kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
+ // The address at which the deopt table is entered should be in x16, the first
+ // temp register allocated. We can't assert that the address is in there, but
+ // we can check that it's the first allocated temp. Later, we'll also check
+ // the computed entry_id is in the expected range.
+ Register entry_addr = temps.AcquireX();
Register entry_id = temps.AcquireX();
+ DCHECK(entry_addr.Is(x16));
+ DCHECK(entry_id.Is(x17));
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label done;
{
InstructionAccurateScope scope(masm());
- // The number of entry will never exceed kMaxNumberOfEntries.
- // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
- // a movz instruction to load the entry id.
- DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
-
+ Label start_of_table, end_of_table;
+ __ bind(&start_of_table);
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ movz(entry_id, i);
- __ b(&done);
+ __ b(&end_of_table);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
+ __ bind(&end_of_table);
+
+ // Get the address of the start of the table.
+ DCHECK(is_int21(table_entry_size_ * count()));
+ __ adr(entry_id, &start_of_table);
+
+ // Compute the gap in bytes between the entry address, which should have
+ // been left in entry_addr (x16) by CallForDeoptimization, and the start of
+ // the table.
+ __ sub(entry_id, entry_addr, entry_id);
+
+ // Shift down to obtain the entry_id.
+ DCHECK_EQ(table_entry_size_, kInstructionSize);
+ __ lsr(entry_id, entry_id, kInstructionSizeLog2);
+ }
+
+ __ Push(padreg, entry_id);
+
+ if (__ emit_debug_code()) {
+ // Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
+ __ Cmp(entry_id, count());
+ __ Check(lo, kOffsetOutOfRange);
}
- __ Bind(&done);
- __ Push(entry_id);
}
bool Deoptimizer::PadTopOfStackRegister() { return true; }