diff options
author | Michaël Zasso <targos@protonmail.com> | 2019-08-01 08:38:30 +0200 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2019-08-01 12:53:56 +0200 |
commit | 2dcc3665abf57c3607cebffdeeca062f5894885d (patch) | |
tree | 4f560748132edcfb4c22d6f967a7e80d23d7ea2c /deps/v8/src/deoptimizer | |
parent | 1ee47d550c6de132f06110aa13eceb7551d643b3 (diff) | |
download | android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.gz android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.bz2 android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.zip |
deps: update V8 to 7.6.303.28
PR-URL: https://github.com/nodejs/node/pull/28016
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Refael Ackermann (רפאל פלחי) <refack@gmail.com>
Reviewed-By: Rich Trott <rtrott@gmail.com>
Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Diffstat (limited to 'deps/v8/src/deoptimizer')
-rw-r--r-- | deps/v8/src/deoptimizer/OWNERS | 5 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc | 255 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc | 306 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/deoptimize-reason.cc | 37 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/deoptimize-reason.h | 77 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/deoptimizer.cc | 4071 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/deoptimizer.h | 1046 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc | 226 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc | 256 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc | 257 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc | 246 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc | 252 | ||||
-rw-r--r-- | deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc | 253 |
13 files changed, 7287 insertions, 0 deletions
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS new file mode 100644 index 0000000000..97a194d7cf --- /dev/null +++ b/deps/v8/src/deoptimizer/OWNERS @@ -0,0 +1,5 @@ +bmeurer@chromium.org +jarin@chromium.org +mstarzinger@chromium.org +sigurds@chromium.org +tebbi@chromium.org diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc new file mode 100644 index 0000000000..4004dfd90f --- /dev/null +++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc @@ -0,0 +1,255 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/assembler-inl.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Save all general purpose registers before messing with them. + const int kNumberOfRegisters = Register::kNumRegisters; + + // Everything but pc, lr and ip which will be saved but not restored. + RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); + + const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters; + const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters; + + // Save all allocatable VFP registers before messing with them. + { + // We use a run-time check for VFP32DREGS. + CpuFeatureScope scope(masm, VFP32DREGS, + CpuFeatureScope::kDontCheckSupported); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(scratch); + + // Push registers d0-d15, and possibly d16-d31, on the stack. + // If d16-d31 are not pushed, decrease the stack pointer instead. + __ vstm(db_w, sp, d16, d31, ne); + // Okay to not call AllocateStackSpace here because the size is a known + // small number and we need to use condition codes. + __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); + __ vstm(db_w, sp, d0, d15); + + // Push registers s0-s31 on the stack. + __ vstm(db_w, sp, s0, s31); + } + + // Push all 16 registers (needed to populate FrameDescription::registers_). + // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps + // handle this a bit differently. + __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); + + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ mov(scratch, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate))); + __ str(fp, MemOperand(scratch)); + } + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; + + // Get the bailout id is passed as r10 by the caller. + __ mov(r2, r10); + + // Get the address of the location in the code object (r3) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register r4. + __ mov(r3, lr); + __ add(r4, sp, Operand(kSavedRegistersAreaSize)); + __ sub(r4, fp, r4); + + // Allocate a new deoptimizer object. + // Pass four arguments in r0 to r3 and fifth argument on stack. + __ PrepareCallCFunction(6); + __ mov(r0, Operand(0)); + Label context_check; + __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(r1, &context_check); + __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ mov(r1, Operand(static_cast<int>(deopt_kind))); + // r2: bailout id already loaded. + // r3: code address or 0 already loaded. + __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. + __ mov(r5, Operand(ExternalReference::isolate_address(isolate))); + __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve "deoptimizer" object in register r0 and get the input + // frame descriptor pointer to r1 (deoptimizer->input_); + __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + __ ldr(r2, MemOperand(sp, i * kPointerSize)); + __ str(r2, MemOperand(r1, offset)); + } + + // Copy VFP registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + int double_regs_offset = FrameDescription::double_registers_offset(); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize; + __ vldr(d0, sp, src_offset); + __ vstr(d0, r1, dst_offset); + } + + // Copy VFP registers to + // float_registers_[FloatRegister::kNumAllocatableRegisters] + int float_regs_offset = FrameDescription::float_registers_offset(); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + int dst_offset = code * kFloatSize + float_regs_offset; + int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize; + __ ldr(r2, MemOperand(sp, src_offset)); + __ str(r2, MemOperand(r1, dst_offset)); + } + + // Remove the saved registers from the stack. + __ add(sp, sp, Operand(kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register r2; that is + // the first stack slot not part of the input frame. + __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset())); + __ add(r2, r2, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ add(r3, r1, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ b(&pop_loop_header); + __ bind(&pop_loop); + __ pop(r4); + __ str(r4, MemOperand(r3, 0)); + __ add(r3, r3, Operand(sizeof(uint32_t))); + __ bind(&pop_loop_header); + __ cmp(r2, sp); + __ b(ne, &pop_loop); + + // Compute the output frame in the deoptimizer. + __ push(r0); // Preserve deoptimizer object across call. + // r0: deoptimizer object; r1: scratch. + __ PrepareCallCFunction(1); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(r0); // Restore deoptimizer object (class Deoptimizer). + + __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: r4 = current "FrameDescription** output_", + // r1 = one past the last FrameDescription**. + __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); + __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_. + __ add(r1, r4, Operand(r1, LSL, 2)); + __ jmp(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: r2 = current FrameDescription*, r3 = loop index. + __ ldr(r2, MemOperand(r4, 0)); // output_[ix] + __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); + __ bind(&inner_push_loop); + __ sub(r3, r3, Operand(sizeof(uint32_t))); + __ add(r6, r2, Operand(r3)); + __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset())); + __ push(r6); + __ bind(&inner_loop_header); + __ cmp(r3, Operand::Zero()); + __ b(ne, &inner_push_loop); // test for gt? + __ add(r4, r4, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ cmp(r4, r1); + __ b(lt, &outer_push_loop); + + __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + DwVfpRegister reg = DwVfpRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ vldr(reg, r1, src_offset); + } + + // Push pc and continuation from the last output frame. + __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset())); + __ push(r6); + __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset())); + __ push(r6); + + // Push the registers from the last output frame. + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + __ ldr(r6, MemOperand(r2, offset)); + __ push(r6); + } + + // Restore the registers from the stack. + __ ldm(ia_w, sp, restored_regs); // all but pc registers. + + // Remove sp, lr and pc. + __ Drop(3); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ pop(scratch); // get continuation, leave pc on stack + __ pop(lr); + __ Jump(scratch); + } + __ stop("Unreachable."); +} + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No embedded constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc new file mode 100644 index 0000000000..a96b1263ab --- /dev/null +++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc @@ -0,0 +1,306 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/api/api.h" +#include "src/codegen/arm64/assembler-arm64-inl.h" +#include "src/codegen/arm64/macro-assembler-arm64-inl.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/execution/frame-constants.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +namespace { + +void CopyRegListToFrame(MacroAssembler* masm, const Register& dst, + int dst_offset, const CPURegList& reg_list, + const Register& temp0, const Register& temp1, + int src_offset = 0) { + DCHECK_EQ(reg_list.Count() % 2, 0); + UseScratchRegisterScope temps(masm); + CPURegList copy_to_input = reg_list; + int reg_size = reg_list.RegisterSizeInBytes(); + DCHECK_EQ(temp0.SizeInBytes(), reg_size); + DCHECK_EQ(temp1.SizeInBytes(), reg_size); + + // Compute some temporary addresses to avoid having the macro assembler set + // up a temp with an offset for accesses out of the range of the addressing + // mode. + Register src = temps.AcquireX(); + masm->Add(src, sp, src_offset); + masm->Add(dst, dst, dst_offset); + + // Write reg_list into the frame pointed to by dst. + for (int i = 0; i < reg_list.Count(); i += 2) { + masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size)); + + CPURegister reg0 = copy_to_input.PopLowestIndex(); + CPURegister reg1 = copy_to_input.PopLowestIndex(); + int offset0 = reg0.code() * reg_size; + int offset1 = reg1.code() * reg_size; + + // Pair up adjacent stores, otherwise write them separately. + if (offset1 == offset0 + reg_size) { + masm->Stp(temp0, temp1, MemOperand(dst, offset0)); + } else { + masm->Str(temp0, MemOperand(dst, offset0)); + masm->Str(temp1, MemOperand(dst, offset1)); + } + } + masm->Sub(dst, dst, dst_offset); +} + +void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list, + const Register& src_base, int src_offset) { + DCHECK_EQ(reg_list.Count() % 2, 0); + UseScratchRegisterScope temps(masm); + CPURegList restore_list = reg_list; + int reg_size = restore_list.RegisterSizeInBytes(); + + // Compute a temporary addresses to avoid having the macro assembler set + // up a temp with an offset for accesses out of the range of the addressing + // mode. + Register src = temps.AcquireX(); + masm->Add(src, src_base, src_offset); + + // No need to restore padreg. + restore_list.Remove(padreg); + + // Restore every register in restore_list from src. + while (!restore_list.IsEmpty()) { + CPURegister reg0 = restore_list.PopLowestIndex(); + CPURegister reg1 = restore_list.PopLowestIndex(); + int offset0 = reg0.code() * reg_size; + + if (reg1 == NoCPUReg) { + masm->Ldr(reg0, MemOperand(src, offset0)); + break; + } + + int offset1 = reg1.code() * reg_size; + + // Pair up adjacent loads, otherwise read them separately. + if (offset1 == offset0 + reg_size) { + masm->Ldp(reg0, reg1, MemOperand(src, offset0)); + } else { + masm->Ldr(reg0, MemOperand(src, offset0)); + masm->Ldr(reg1, MemOperand(src, offset1)); + } + } +} +} // namespace + +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // TODO(all): This code needs to be revisited. We probably only need to save + // caller-saved registers here. Callee-saved registers can be stored directly + // in the input frame. + + // Save all allocatable double registers. + CPURegList saved_double_registers( + CPURegister::kVRegister, kDRegSizeInBits, + RegisterConfiguration::Default()->allocatable_double_codes_mask()); + DCHECK_EQ(saved_double_registers.Count() % 2, 0); + __ PushCPURegList(saved_double_registers); + + CPURegList saved_float_registers( + CPURegister::kVRegister, kSRegSizeInBits, + RegisterConfiguration::Default()->allocatable_float_codes_mask()); + DCHECK_EQ(saved_float_registers.Count() % 4, 0); + __ PushCPURegList(saved_float_registers); + + // We save all the registers except sp, lr, platform register (x18) and the + // masm scratches. + CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28); + saved_registers.Remove(ip0); + saved_registers.Remove(ip1); + saved_registers.Remove(x18); + saved_registers.Combine(fp); + saved_registers.Align(); + DCHECK_EQ(saved_registers.Count() % 2, 0); + __ PushCPURegList(saved_registers); + + __ Mov(x3, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate))); + __ Str(fp, MemOperand(x3)); + + const int kSavedRegistersAreaSize = + (saved_registers.Count() * kXRegSize) + + (saved_double_registers.Count() * kDRegSize) + + (saved_float_registers.Count() * kSRegSize); + + // Floating point registers are saved on the stack above core registers. + const int kFloatRegistersOffset = saved_registers.Count() * kXRegSize; + const int kDoubleRegistersOffset = + kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize; + + // The bailout id was passed by the caller in x26. + Register bailout_id = x2; + __ Mov(bailout_id, x26); + + Register code_object = x3; + Register fp_to_sp = x4; + // Get the address of the location in the code object. This is the return + // address for lazy deoptimization. + __ Mov(code_object, lr); + // Compute the fp-to-sp delta. + __ Add(fp_to_sp, sp, kSavedRegistersAreaSize); + __ Sub(fp_to_sp, fp, fp_to_sp); + + // Allocate a new deoptimizer object. + __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + + // Ensure we can safely load from below fp. + DCHECK_GT(kSavedRegistersAreaSize, + -JavaScriptFrameConstants::kFunctionOffset); + __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + + // If x1 is a smi, zero x0. + __ Tst(x1, kSmiTagMask); + __ CzeroX(x0, eq); + + __ Mov(x1, static_cast<int>(deopt_kind)); + // Following arguments are already loaded: + // - x2: bailout id + // - x3: code object address + // - x4: fp-to-sp delta + __ Mov(x5, ExternalReference::isolate_address(isolate)); + + { + // Call Deoptimizer::New(). + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve "deoptimizer" object in register x0. + Register deoptimizer = x0; + + // Get the input frame descriptor pointer. + __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset())); + + // Copy core registers into the input frame. + CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(), + saved_registers, x2, x3); + + // Copy double registers to the input frame. + CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(), + saved_double_registers, x2, x3, kDoubleRegistersOffset); + + // Copy float registers to the input frame. + // TODO(arm): these are the lower 32-bits of the double registers stored + // above, so we shouldn't need to store them again. + CopyRegListToFrame(masm, x1, FrameDescription::float_registers_offset(), + saved_float_registers, w2, w3, kFloatRegistersOffset); + + // Remove the saved registers from the stack. + DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0); + __ Drop(kSavedRegistersAreaSize / kXRegSize); + + // Compute a pointer to the unwinding limit in register x2; that is + // the first stack slot not part of the input frame. + Register unwind_limit = x2; + __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset())); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ Add(x3, x1, FrameDescription::frame_content_offset()); + __ SlotAddress(x1, 0); + __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2); + __ Mov(x5, unwind_limit); + __ CopyDoubleWords(x3, x1, x5); + __ Drop(unwind_limit); + + // Compute the output frame in the deoptimizer. + __ Push(padreg, x0); // Preserve deoptimizer object across call. + { + // Call Deoptimizer::ComputeOutputFrames(). + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer). + + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset())); + __ Mov(sp, scratch); + } + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, outer_loop_header; + __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset())); + __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset())); + __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2)); + __ B(&outer_loop_header); + + __ Bind(&outer_push_loop); + Register current_frame = x2; + Register frame_size = x3; + __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex)); + __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset())); + __ Lsr(frame_size, x3, kSystemPointerSizeLog2); + __ Claim(frame_size); + + __ Add(x7, current_frame, FrameDescription::frame_content_offset()); + __ SlotAddress(x6, 0); + __ CopyDoubleWords(x6, x7, frame_size); + + __ Bind(&outer_loop_header); + __ Cmp(x0, x1); + __ B(lt, &outer_push_loop); + + __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset())); + RestoreRegList(masm, saved_double_registers, x1, + FrameDescription::double_registers_offset()); + + // TODO(all): ARM copies a lot (if not all) of the last output frame onto the + // stack, then pops it all into registers. Here, we try to load it directly + // into the relevant registers. Is this correct? If so, we should improve the + // ARM code. + + // Restore registers from the last output frame. + // Note that lr is not in the list of saved_registers and will be restored + // later. We can use it to hold the address of last output frame while + // reloading the other registers. + DCHECK(!saved_registers.IncludesAliasOf(lr)); + Register last_output_frame = lr; + __ Mov(last_output_frame, current_frame); + + RestoreRegList(masm, saved_registers, last_output_frame, + FrameDescription::registers_offset()); + + Register continuation = x7; + __ Ldr(continuation, MemOperand(last_output_frame, + FrameDescription::continuation_offset())); + __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset())); + __ Br(continuation); +} + +bool Deoptimizer::PadTopOfStackRegister() { return true; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No embedded constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.cc b/deps/v8/src/deoptimizer/deoptimize-reason.cc new file mode 100644 index 0000000000..ed5954bf9c --- /dev/null +++ b/deps/v8/src/deoptimizer/deoptimize-reason.cc @@ -0,0 +1,37 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/deoptimizer/deoptimize-reason.h" + +namespace v8 { +namespace internal { + +std::ostream& operator<<(std::ostream& os, DeoptimizeReason reason) { + switch (reason) { +#define DEOPTIMIZE_REASON(Name, message) \ + case DeoptimizeReason::k##Name: \ + return os << #Name; + DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON) +#undef DEOPTIMIZE_REASON + } + UNREACHABLE(); +} + +size_t hash_value(DeoptimizeReason reason) { + return static_cast<uint8_t>(reason); +} + +char const* DeoptimizeReasonToString(DeoptimizeReason reason) { + static char const* kDeoptimizeReasonStrings[] = { +#define DEOPTIMIZE_REASON(Name, message) message, + DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON) +#undef DEOPTIMIZE_REASON + }; + size_t const index = static_cast<size_t>(reason); + DCHECK_LT(index, arraysize(kDeoptimizeReasonStrings)); + return kDeoptimizeReasonStrings[index]; +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h new file mode 100644 index 0000000000..d556e89927 --- /dev/null +++ b/deps/v8/src/deoptimizer/deoptimize-reason.h @@ -0,0 +1,77 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_DEOPTIMIZER_DEOPTIMIZE_REASON_H_ +#define V8_DEOPTIMIZER_DEOPTIMIZE_REASON_H_ + +#include "src/common/globals.h" + +namespace v8 { +namespace internal { + +#define DEOPTIMIZE_REASON_LIST(V) \ + V(ArrayBufferWasDetached, "array buffer was detached") \ + V(CowArrayElementsChanged, "copy-on-write array's elements changed") \ + V(CouldNotGrowElements, "failed to grow elements store") \ + V(DeoptimizeNow, "%_DeoptimizeNow") \ + V(DivisionByZero, "division by zero") \ + V(Hole, "hole") \ + V(InstanceMigrationFailed, "instance migration failed") \ + V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \ + V(InsufficientTypeFeedbackForConstruct, \ + "Insufficient type feedback for construct") \ + V(InsufficientTypeFeedbackForForIn, "Insufficient type feedback for for-in") \ + V(InsufficientTypeFeedbackForBinaryOperation, \ + "Insufficient type feedback for binary operation") \ + V(InsufficientTypeFeedbackForCompareOperation, \ + "Insufficient type feedback for compare operation") \ + V(InsufficientTypeFeedbackForGenericNamedAccess, \ + "Insufficient type feedback for generic named access") \ + V(InsufficientTypeFeedbackForGenericKeyedAccess, \ + "Insufficient type feedback for generic keyed access") \ + V(InsufficientTypeFeedbackForUnaryOperation, \ + "Insufficient type feedback for unary operation") \ + V(LostPrecision, "lost precision") \ + V(LostPrecisionOrNaN, "lost precision or NaN") \ + V(MinusZero, "minus zero") \ + V(NaN, "NaN") \ + V(NoCache, "no cache") \ + V(NotAHeapNumber, "not a heap number") \ + V(NotAJavaScriptObject, "not a JavaScript object") \ + V(NotAJavaScriptObjectOrNullOrUndefined, \ + "not a JavaScript object, Null or Undefined") \ + V(NotANumberOrOddball, "not a Number or Oddball") \ + V(NotASmi, "not a Smi") \ + V(NotAString, "not a String") \ + V(NotASymbol, "not a Symbol") \ + V(OutOfBounds, "out of bounds") \ + V(Overflow, "overflow") \ + V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \ + V(Smi, "Smi") \ + V(Unknown, "(unknown)") \ + V(ValueMismatch, "value mismatch") \ + V(WrongCallTarget, "wrong call target") \ + V(WrongEnumIndices, "wrong enum indices") \ + V(WrongInstanceType, "wrong instance type") \ + V(WrongMap, "wrong map") \ + V(WrongName, "wrong name") \ + V(WrongValue, "wrong value") \ + V(NoInitialElement, "no initial element") + +enum class DeoptimizeReason : uint8_t { +#define DEOPTIMIZE_REASON(Name, message) k##Name, + DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON) +#undef DEOPTIMIZE_REASON +}; + +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, DeoptimizeReason); + +size_t hash_value(DeoptimizeReason reason); + +V8_EXPORT_PRIVATE char const* DeoptimizeReasonToString(DeoptimizeReason reason); + +} // namespace internal +} // namespace v8 + +#endif // V8_DEOPTIMIZER_DEOPTIMIZE_REASON_H_ diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc new file mode 100644 index 0000000000..91556cfbdc --- /dev/null +++ b/deps/v8/src/deoptimizer/deoptimizer.cc @@ -0,0 +1,4071 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/deoptimizer/deoptimizer.h" + +#include <memory> + +#include "src/ast/prettyprinter.h" +#include "src/builtins/accessors.h" +#include "src/codegen/assembler-inl.h" +#include "src/codegen/callable.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/diagnostics/disasm.h" +#include "src/execution/frames-inl.h" +#include "src/execution/v8threads.h" +#include "src/handles/global-handles.h" +#include "src/heap/heap-inl.h" +#include "src/init/v8.h" +#include "src/interpreter/interpreter.h" +#include "src/logging/counters.h" +#include "src/logging/log.h" +#include "src/objects/debug-objects-inl.h" +#include "src/objects/heap-number-inl.h" +#include "src/objects/smi.h" +#include "src/tracing/trace-event.h" + +// Has to be the last include (doesn't have include guards) +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +// {FrameWriter} offers a stack writer abstraction for writing +// FrameDescriptions. The main service the class provides is managing +// {top_offset_}, i.e. the offset of the next slot to write to. +class FrameWriter { + public: + static const int NO_INPUT_INDEX = -1; + FrameWriter(Deoptimizer* deoptimizer, FrameDescription* frame, + CodeTracer::Scope* trace_scope) + : deoptimizer_(deoptimizer), + frame_(frame), + trace_scope_(trace_scope), + top_offset_(frame->GetFrameSize()) {} + + void PushRawValue(intptr_t value, const char* debug_hint) { + PushValue(value); + + if (trace_scope_ != nullptr) { + DebugPrintOutputValue(value, debug_hint); + } + } + + void PushRawObject(Object obj, const char* debug_hint) { + intptr_t value = obj.ptr(); + PushValue(value); + if (trace_scope_ != nullptr) { + DebugPrintOutputObject(obj, top_offset_, debug_hint); + } + } + + void PushCallerPc(intptr_t pc) { + top_offset_ -= kPCOnStackSize; + frame_->SetCallerPc(top_offset_, pc); + DebugPrintOutputValue(pc, "caller's pc\n"); + } + + void PushCallerFp(intptr_t fp) { + top_offset_ -= kFPOnStackSize; + frame_->SetCallerFp(top_offset_, fp); + DebugPrintOutputValue(fp, "caller's fp\n"); + } + + void PushCallerConstantPool(intptr_t cp) { + top_offset_ -= kSystemPointerSize; + frame_->SetCallerConstantPool(top_offset_, cp); + DebugPrintOutputValue(cp, "caller's constant_pool\n"); + } + + void PushTranslatedValue(const TranslatedFrame::iterator& iterator, + const char* debug_hint = "") { + Object obj = iterator->GetRawValue(); + + PushRawObject(obj, debug_hint); + + if (trace_scope_) { + PrintF(trace_scope_->file(), " (input #%d)\n", iterator.input_index()); + } + + deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj, + iterator); + } + + unsigned top_offset() const { return top_offset_; } + + private: + void PushValue(intptr_t value) { + CHECK_GE(top_offset_, 0); + top_offset_ -= kSystemPointerSize; + frame_->SetFrameSlot(top_offset_, value); + } + + Address output_address(unsigned output_offset) { + Address output_address = + static_cast<Address>(frame_->GetTop()) + output_offset; + return output_address; + } + + void DebugPrintOutputValue(intptr_t value, const char* debug_hint = "") { + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), + " " V8PRIxPTR_FMT ": [top + %3d] <- " V8PRIxPTR_FMT " ; %s", + output_address(top_offset_), top_offset_, value, debug_hint); + } + } + + void DebugPrintOutputObject(Object obj, unsigned output_offset, + const char* debug_hint = "") { + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- ", + output_address(output_offset), output_offset); + if (obj.IsSmi()) { + PrintF(V8PRIxPTR_FMT " <Smi %d>", obj.ptr(), Smi::cast(obj).value()); + } else { + obj.ShortPrint(trace_scope_->file()); + } + PrintF(trace_scope_->file(), " ; %s", debug_hint); + } + } + + Deoptimizer* deoptimizer_; + FrameDescription* frame_; + CodeTracer::Scope* trace_scope_; + unsigned top_offset_; +}; + +DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) { + Code* start = &deopt_entry_code_[0]; + Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1]; + heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end)); +} + +DeoptimizerData::~DeoptimizerData() { + Code* start = &deopt_entry_code_[0]; + heap_->UnregisterStrongRoots(FullObjectSlot(start)); +} + +Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) { + return deopt_entry_code_[static_cast<int>(kind)]; +} + +void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) { + deopt_entry_code_[static_cast<int>(kind)] = code; +} + +Code Deoptimizer::FindDeoptimizingCode(Address addr) { + if (function_.IsHeapObject()) { + // Search all deoptimizing code in the native context of the function. + Isolate* isolate = isolate_; + Context native_context = function_.context().native_context(); + Object element = native_context.DeoptimizedCodeListHead(); + while (!element.IsUndefined(isolate)) { + Code code = Code::cast(element); + CHECK(code.kind() == Code::OPTIMIZED_FUNCTION); + if (code.contains(addr)) return code; + element = code.next_code_link(); + } + } + return Code(); +} + +// We rely on this function not causing a GC. It is called from generated code +// without having a real stack frame in place. +Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind, + unsigned bailout_id, Address from, + int fp_to_sp_delta, Isolate* isolate) { + JSFunction function = JSFunction::cast(Object(raw_function)); + Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind, + bailout_id, from, fp_to_sp_delta); + CHECK_NULL(isolate->deoptimizer_data()->current_); + isolate->deoptimizer_data()->current_ = deoptimizer; + return deoptimizer; +} + +Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { + Deoptimizer* result = isolate->deoptimizer_data()->current_; + CHECK_NOT_NULL(result); + result->DeleteFrameDescriptions(); + isolate->deoptimizer_data()->current_ = nullptr; + return result; +} + +DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( + JavaScriptFrame* frame, int jsframe_index, Isolate* isolate) { + CHECK(frame->is_optimized()); + + TranslatedState translated_values(frame); + translated_values.Prepare(frame->fp()); + + TranslatedState::iterator frame_it = translated_values.end(); + int counter = jsframe_index; + for (auto it = translated_values.begin(); it != translated_values.end(); + it++) { + if (it->kind() == TranslatedFrame::kInterpretedFunction || + it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || + it->kind() == + TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { + if (counter == 0) { + frame_it = it; + break; + } + counter--; + } + } + CHECK(frame_it != translated_values.end()); + // We only include kJavaScriptBuiltinContinuation frames above to get the + // counting right. + CHECK_EQ(frame_it->kind(), TranslatedFrame::kInterpretedFunction); + + DeoptimizedFrameInfo* info = + new DeoptimizedFrameInfo(&translated_values, frame_it, isolate); + + return info; +} + +namespace { +class ActivationsFinder : public ThreadVisitor { + public: + explicit ActivationsFinder(std::set<Code>* codes, Code topmost_optimized_code, + bool safe_to_deopt_topmost_optimized_code) + : codes_(codes) { +#ifdef DEBUG + topmost_ = topmost_optimized_code; + safe_to_deopt_ = safe_to_deopt_topmost_optimized_code; +#endif + } + + // Find the frames with activations of codes marked for deoptimization, search + // for the trampoline to the deoptimizer call respective to each code, and use + // it to replace the current pc on the stack. + void VisitThread(Isolate* isolate, ThreadLocalTop* top) override { + for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { + if (it.frame()->type() == StackFrame::OPTIMIZED) { + Code code = it.frame()->LookupCode(); + if (code.kind() == Code::OPTIMIZED_FUNCTION && + code.marked_for_deoptimization()) { + codes_->erase(code); + // Obtain the trampoline to the deoptimizer call. + SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc()); + int trampoline_pc = safepoint.trampoline_pc(); + DCHECK_IMPLIES(code == topmost_, safe_to_deopt_); + // Replace the current pc on the stack with the trampoline. + it.frame()->set_pc(code.raw_instruction_start() + trampoline_pc); + } + } + } + } + + private: + std::set<Code>* codes_; + +#ifdef DEBUG + Code topmost_; + bool safe_to_deopt_; +#endif +}; +} // namespace + +// Move marked code from the optimized code list to the deoptimized code list, +// and replace pc on the stack for codes marked for deoptimization. +void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) { + DisallowHeapAllocation no_allocation; + + Isolate* isolate = context.GetIsolate(); + Code topmost_optimized_code; + bool safe_to_deopt_topmost_optimized_code = false; +#ifdef DEBUG + // Make sure all activations of optimized code can deopt at their current PC. + // The topmost optimized code has special handling because it cannot be + // deoptimized due to weak object dependency. + for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done(); + it.Advance()) { + StackFrame::Type type = it.frame()->type(); + if (type == StackFrame::OPTIMIZED) { + Code code = it.frame()->LookupCode(); + JSFunction function = + static_cast<OptimizedFrame*>(it.frame())->function(); + if (FLAG_trace_deopt) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), "[deoptimizer found activation of function: "); + function.PrintName(scope.file()); + PrintF(scope.file(), " / %" V8PRIxPTR "]\n", function.ptr()); + } + SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc()); + + // Turbofan deopt is checked when we are patching addresses on stack. + bool safe_if_deopt_triggered = safepoint.has_deoptimization_index(); + bool is_builtin_code = code.kind() == Code::BUILTIN; + DCHECK(topmost_optimized_code.is_null() || safe_if_deopt_triggered || + is_builtin_code); + if (topmost_optimized_code.is_null()) { + topmost_optimized_code = code; + safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered; + } + } + } +#endif + + // We will use this set to mark those Code objects that are marked for + // deoptimization and have not been found in stack frames. + std::set<Code> codes; + + // Move marked code from the optimized code list to the deoptimized code list. + // Walk over all optimized code objects in this native context. + Code prev; + Object element = context.OptimizedCodeListHead(); + while (!element.IsUndefined(isolate)) { + Code code = Code::cast(element); + CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION); + Object next = code.next_code_link(); + + if (code.marked_for_deoptimization()) { + codes.insert(code); + + if (!prev.is_null()) { + // Skip this code in the optimized code list. + prev.set_next_code_link(next); + } else { + // There was no previous node, the next node is the new head. + context.SetOptimizedCodeListHead(next); + } + + // Move the code to the _deoptimized_ code list. + code.set_next_code_link(context.DeoptimizedCodeListHead()); + context.SetDeoptimizedCodeListHead(code); + } else { + // Not marked; preserve this element. + prev = code; + } + element = next; + } + + ActivationsFinder visitor(&codes, topmost_optimized_code, + safe_to_deopt_topmost_optimized_code); + // Iterate over the stack of this thread. + visitor.VisitThread(isolate, isolate->thread_local_top()); + // In addition to iterate over the stack of this thread, we also + // need to consider all the other threads as they may also use + // the code currently beings deoptimized. + isolate->thread_manager()->IterateArchivedThreads(&visitor); + + // If there's no activation of a code in any stack then we can remove its + // deoptimization data. We do this to ensure that code objects that are + // unlinked don't transitively keep objects alive unnecessarily. + for (Code code : codes) { + isolate->heap()->InvalidateCodeDeoptimizationData(code); + } +} + +void Deoptimizer::DeoptimizeAll(Isolate* isolate) { + RuntimeCallTimerScope runtimeTimer(isolate, + RuntimeCallCounterId::kDeoptimizeCode); + TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); + TRACE_EVENT0("v8", "V8.DeoptimizeCode"); + if (FLAG_trace_deopt) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), "[deoptimize all code in all contexts]\n"); + } + isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock); + DisallowHeapAllocation no_allocation; + // For all contexts, mark all code, then deoptimize. + Object context = isolate->heap()->native_contexts_list(); + while (!context.IsUndefined(isolate)) { + Context native_context = Context::cast(context); + MarkAllCodeForContext(native_context); + DeoptimizeMarkedCodeForContext(native_context); + context = native_context.next_context_link(); + } +} + +void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) { + RuntimeCallTimerScope runtimeTimer(isolate, + RuntimeCallCounterId::kDeoptimizeCode); + TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); + TRACE_EVENT0("v8", "V8.DeoptimizeCode"); + if (FLAG_trace_deopt) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), "[deoptimize marked code in all contexts]\n"); + } + DisallowHeapAllocation no_allocation; + // For all contexts, deoptimize code already marked. + Object context = isolate->heap()->native_contexts_list(); + while (!context.IsUndefined(isolate)) { + Context native_context = Context::cast(context); + DeoptimizeMarkedCodeForContext(native_context); + context = native_context.next_context_link(); + } +} + +void Deoptimizer::MarkAllCodeForContext(Context context) { + Object element = context.OptimizedCodeListHead(); + Isolate* isolate = context.GetIsolate(); + while (!element.IsUndefined(isolate)) { + Code code = Code::cast(element); + CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION); + code.set_marked_for_deoptimization(true); + element = code.next_code_link(); + } +} + +void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { + Isolate* isolate = function.GetIsolate(); + RuntimeCallTimerScope runtimeTimer(isolate, + RuntimeCallCounterId::kDeoptimizeCode); + TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); + TRACE_EVENT0("v8", "V8.DeoptimizeCode"); + function.ResetIfBytecodeFlushed(); + if (code.is_null()) code = function.code(); + + if (code.kind() == Code::OPTIMIZED_FUNCTION) { + // Mark the code for deoptimization and unlink any functions that also + // refer to that code. The code cannot be shared across native contexts, + // so we only need to search one. + code.set_marked_for_deoptimization(true); + // The code in the function's optimized code feedback vector slot might + // be different from the code on the function - evict it if necessary. + function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization( + function.shared(), "unlinking code marked for deopt"); + if (!code.deopt_already_counted()) { + code.set_deopt_already_counted(true); + } + DeoptimizeMarkedCodeForContext(function.context().native_context()); + } +} + +void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { + deoptimizer->DoComputeOutputFrames(); +} + +const char* Deoptimizer::MessageFor(DeoptimizeKind kind) { + switch (kind) { + case DeoptimizeKind::kEager: + return "eager"; + case DeoptimizeKind::kSoft: + return "soft"; + case DeoptimizeKind::kLazy: + return "lazy"; + } + FATAL("Unsupported deopt kind"); + return nullptr; +} + +Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function, + DeoptimizeKind kind, unsigned bailout_id, Address from, + int fp_to_sp_delta) + : isolate_(isolate), + function_(function), + bailout_id_(bailout_id), + deopt_kind_(kind), + from_(from), + fp_to_sp_delta_(fp_to_sp_delta), + deoptimizing_throw_(false), + catch_handler_data_(-1), + catch_handler_pc_offset_(-1), + input_(nullptr), + output_count_(0), + jsframe_count_(0), + output_(nullptr), + caller_frame_top_(0), + caller_fp_(0), + caller_pc_(0), + caller_constant_pool_(0), + input_frame_context_(0), + stack_fp_(0), + trace_scope_(nullptr) { + if (isolate->deoptimizer_lazy_throw()) { + isolate->set_deoptimizer_lazy_throw(false); + deoptimizing_throw_ = true; + } + + DCHECK_NE(from, kNullAddress); + compiled_code_ = FindOptimizedCode(); + DCHECK(!compiled_code_.is_null()); + + DCHECK(function.IsJSFunction()); + trace_scope_ = FLAG_trace_deopt + ? new CodeTracer::Scope(isolate->GetCodeTracer()) + : nullptr; +#ifdef DEBUG + DCHECK(AllowHeapAllocation::IsAllowed()); + disallow_heap_allocation_ = new DisallowHeapAllocation(); +#endif // DEBUG + if ((compiled_code_.kind() != Code::OPTIMIZED_FUNCTION || + !compiled_code_.deopt_already_counted()) && + deopt_kind_ == DeoptimizeKind::kSoft) { + isolate->counters()->soft_deopts_executed()->Increment(); + } + if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) { + compiled_code_.set_deopt_already_counted(true); + PROFILE(isolate_, + CodeDeoptEvent(compiled_code_, kind, from_, fp_to_sp_delta_)); + } + unsigned size = ComputeInputFrameSize(); + int parameter_count = function.shared().internal_formal_parameter_count() + 1; + input_ = new (size) FrameDescription(size, parameter_count); +} + +Code Deoptimizer::FindOptimizedCode() { + Code compiled_code = FindDeoptimizingCode(from_); + return !compiled_code.is_null() ? compiled_code + : isolate_->FindCodeObject(from_); +} + +void Deoptimizer::PrintFunctionName() { + if (function_.IsHeapObject() && function_.IsJSFunction()) { + function_.ShortPrint(trace_scope_->file()); + } else { + PrintF(trace_scope_->file(), "%s", + Code::Kind2String(compiled_code_.kind())); + } +} + +Handle<JSFunction> Deoptimizer::function() const { + return Handle<JSFunction>(function_, isolate()); +} +Handle<Code> Deoptimizer::compiled_code() const { + return Handle<Code>(compiled_code_, isolate()); +} + +Deoptimizer::~Deoptimizer() { + DCHECK(input_ == nullptr && output_ == nullptr); + DCHECK_NULL(disallow_heap_allocation_); + delete trace_scope_; +} + +void Deoptimizer::DeleteFrameDescriptions() { + delete input_; + for (int i = 0; i < output_count_; ++i) { + if (output_[i] != input_) delete output_[i]; + } + delete[] output_; + input_ = nullptr; + output_ = nullptr; +#ifdef DEBUG + DCHECK(!AllowHeapAllocation::IsAllowed()); + DCHECK_NOT_NULL(disallow_heap_allocation_); + delete disallow_heap_allocation_; + disallow_heap_allocation_ = nullptr; +#endif // DEBUG +} + +Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, + DeoptimizeKind kind) { + DeoptimizerData* data = isolate->deoptimizer_data(); + CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind); + CHECK(!data->deopt_entry_code(kind).is_null()); + return data->deopt_entry_code(kind).raw_instruction_start(); +} + +bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr, + DeoptimizeKind type) { + DeoptimizerData* data = isolate->deoptimizer_data(); + CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind); + Code code = data->deopt_entry_code(type); + if (code.is_null()) return false; + return addr == code.raw_instruction_start(); +} + +bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr, + DeoptimizeKind* type) { + if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) { + *type = DeoptimizeKind::kEager; + return true; + } + if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) { + *type = DeoptimizeKind::kSoft; + return true; + } + if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) { + *type = DeoptimizeKind::kLazy; + return true; + } + return false; +} + +int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { + int length = 0; + // Count all entries in the deoptimizing code list of every context. + Object context = isolate->heap()->native_contexts_list(); + while (!context.IsUndefined(isolate)) { + Context native_context = Context::cast(context); + Object element = native_context.DeoptimizedCodeListHead(); + while (!element.IsUndefined(isolate)) { + Code code = Code::cast(element); + DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION); + if (!code.marked_for_deoptimization()) { + length++; + } + element = code.next_code_link(); + } + context = Context::cast(context).next_context_link(); + } + return length; +} + +namespace { + +int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) { + switch (translated_frame->kind()) { + case TranslatedFrame::kInterpretedFunction: { + int bytecode_offset = translated_frame->node_id().ToInt(); + HandlerTable table( + translated_frame->raw_shared_info().GetBytecodeArray()); + return table.LookupRange(bytecode_offset, data_out, nullptr); + } + case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: { + return 0; + } + default: + break; + } + return -1; +} + +bool ShouldPadArguments(int arg_count) { + return kPadArguments && (arg_count % 2 != 0); +} + +} // namespace + +// We rely on this function not causing a GC. It is called from generated code +// without having a real stack frame in place. +void Deoptimizer::DoComputeOutputFrames() { + base::ElapsedTimer timer; + + // Determine basic deoptimization information. The optimized frame is + // described by the input data. + DeoptimizationData input_data = + DeoptimizationData::cast(compiled_code_.deoptimization_data()); + + { + // Read caller's PC, caller's FP and caller's constant pool values + // from input frame. Compute caller's frame top address. + + Register fp_reg = JavaScriptFrame::fp_register(); + stack_fp_ = input_->GetRegister(fp_reg.code()); + + caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize(); + + Address fp_address = input_->GetFramePointerAddress(); + caller_fp_ = Memory<intptr_t>(fp_address); + caller_pc_ = + Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset); + input_frame_context_ = Memory<intptr_t>( + fp_address + CommonFrameConstants::kContextOrFrameTypeOffset); + + if (FLAG_enable_embedded_constant_pool) { + caller_constant_pool_ = Memory<intptr_t>( + fp_address + CommonFrameConstants::kConstantPoolOffset); + } + } + + StackGuard* const stack_guard = isolate()->stack_guard(); + CHECK_GT(static_cast<uintptr_t>(caller_frame_top_), + stack_guard->real_jslimit()); + + if (trace_scope_ != nullptr) { + timer.Start(); + PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ", + MessageFor(deopt_kind_)); + PrintFunctionName(); + PrintF(trace_scope_->file(), + " (opt #%d) @%d, FP to SP delta: %d, caller sp: " V8PRIxPTR_FMT + "]\n", + input_data.OptimizationId().value(), bailout_id_, fp_to_sp_delta_, + caller_frame_top_); + if (deopt_kind_ == DeoptimizeKind::kEager || + deopt_kind_ == DeoptimizeKind::kSoft) { + compiled_code_.PrintDeoptLocation( + trace_scope_->file(), " ;;; deoptimize at ", from_); + } + } + + BailoutId node_id = input_data.BytecodeOffset(bailout_id_); + ByteArray translations = input_data.TranslationByteArray(); + unsigned translation_index = input_data.TranslationIndex(bailout_id_).value(); + + TranslationIterator state_iterator(translations, translation_index); + translated_state_.Init( + isolate_, input_->GetFramePointerAddress(), &state_iterator, + input_data.LiteralArray(), input_->GetRegisterValues(), + trace_scope_ == nullptr ? nullptr : trace_scope_->file(), + function_.IsHeapObject() + ? function_.shared().internal_formal_parameter_count() + : 0); + + // Do the input frame to output frame(s) translation. + size_t count = translated_state_.frames().size(); + // If we are supposed to go to the catch handler, find the catching frame + // for the catch and make sure we only deoptimize upto that frame. + if (deoptimizing_throw_) { + size_t catch_handler_frame_index = count; + for (size_t i = count; i-- > 0;) { + catch_handler_pc_offset_ = LookupCatchHandler( + &(translated_state_.frames()[i]), &catch_handler_data_); + if (catch_handler_pc_offset_ >= 0) { + catch_handler_frame_index = i; + break; + } + } + CHECK_LT(catch_handler_frame_index, count); + count = catch_handler_frame_index + 1; + } + + DCHECK_NULL(output_); + output_ = new FrameDescription*[count]; + for (size_t i = 0; i < count; ++i) { + output_[i] = nullptr; + } + output_count_ = static_cast<int>(count); + + // Translate each output frame. + int frame_index = 0; // output_frame_index + size_t total_output_frame_size = 0; + for (size_t i = 0; i < count; ++i, ++frame_index) { + // Read the ast node id, function, and frame height for this output frame. + TranslatedFrame* translated_frame = &(translated_state_.frames()[i]); + bool handle_exception = deoptimizing_throw_ && i == count - 1; + switch (translated_frame->kind()) { + case TranslatedFrame::kInterpretedFunction: + DoComputeInterpretedFrame(translated_frame, frame_index, + handle_exception); + jsframe_count_++; + break; + case TranslatedFrame::kArgumentsAdaptor: + DoComputeArgumentsAdaptorFrame(translated_frame, frame_index); + break; + case TranslatedFrame::kConstructStub: + DoComputeConstructStubFrame(translated_frame, frame_index); + break; + case TranslatedFrame::kBuiltinContinuation: + DoComputeBuiltinContinuation(translated_frame, frame_index, + BuiltinContinuationMode::STUB); + break; + case TranslatedFrame::kJavaScriptBuiltinContinuation: + DoComputeBuiltinContinuation(translated_frame, frame_index, + BuiltinContinuationMode::JAVASCRIPT); + break; + case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: + DoComputeBuiltinContinuation( + translated_frame, frame_index, + handle_exception + ? BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION + : BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH); + break; + case TranslatedFrame::kInvalid: + FATAL("invalid frame"); + break; + } + total_output_frame_size += output_[frame_index]->GetFrameSize(); + } + + FrameDescription* topmost = output_[count - 1]; + topmost->GetRegisterValues()->SetRegister(kRootRegister.code(), + isolate()->isolate_root()); + + // Print some helpful diagnostic information. + if (trace_scope_ != nullptr) { + double ms = timer.Elapsed().InMillisecondsF(); + int index = output_count_ - 1; // Index of the topmost frame. + PrintF(trace_scope_->file(), "[deoptimizing (%s): end ", + MessageFor(deopt_kind_)); + PrintFunctionName(); + PrintF(trace_scope_->file(), + " @%d => node=%d, pc=" V8PRIxPTR_FMT ", caller sp=" V8PRIxPTR_FMT + ", took %0.3f ms]\n", + bailout_id_, node_id.ToInt(), output_[index]->GetPc(), + caller_frame_top_, ms); + } + + // TODO(jgruber,neis): + // The situation that the output frames do not fit into the stack space should + // be prevented by an optimized function's initial stack check: That check + // must fail if the (interpreter) frames generated upon deoptimization of the + // function would overflow the stack. + CHECK_GT(static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size, + stack_guard->real_jslimit()); +} + +void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame, + int frame_index, + bool goto_catch_handler) { + SharedFunctionInfo shared = translated_frame->raw_shared_info(); + + TranslatedFrame::iterator value_iterator = translated_frame->begin(); + bool is_bottommost = (0 == frame_index); + bool is_topmost = (output_count_ - 1 == frame_index); + + int bytecode_offset = translated_frame->node_id().ToInt(); + int height = translated_frame->height(); + int register_count = height - 1; // Exclude accumulator. + int register_stack_slot_count = + InterpreterFrameConstants::RegisterStackSlotCount(register_count); + int height_in_bytes = register_stack_slot_count * kSystemPointerSize; + + // The topmost frame will contain the accumulator. + if (is_topmost) { + height_in_bytes += kSystemPointerSize; + if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize; + } + + TranslatedFrame::iterator function_iterator = value_iterator++; + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), " translating interpreted frame "); + std::unique_ptr<char[]> name = shared.DebugName().ToCString(); + PrintF(trace_scope_->file(), "%s", name.get()); + PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n", + bytecode_offset, height_in_bytes, + goto_catch_handler ? " (throw)" : ""); + } + if (goto_catch_handler) { + bytecode_offset = catch_handler_pc_offset_; + } + + // The 'fixed' part of the frame consists of the incoming parameters and + // the part described by InterpreterFrameConstants. This will include + // argument padding, when needed. + unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared); + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + int parameter_count = shared.internal_formal_parameter_count() + 1; + FrameDescription* output_frame = new (output_frame_size) + FrameDescription(output_frame_size, parameter_count); + FrameWriter frame_writer(this, output_frame, trace_scope_); + + CHECK(frame_index >= 0 && frame_index < output_count_); + CHECK_NULL(output_[frame_index]); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous frame's top and + // this frame's size. + intptr_t top_address; + if (is_bottommost) { + top_address = caller_frame_top_ - output_frame_size; + } else { + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + } + output_frame->SetTop(top_address); + + // Compute the incoming parameter translation. + + ReadOnlyRoots roots(isolate()); + if (ShouldPadArguments(parameter_count)) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + + for (int i = 0; i < parameter_count; ++i, ++value_iterator) { + frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); + } + + DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), + frame_writer.top_offset()); + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), " -------------------------\n"); + } + + // There are no translation commands for the caller's pc and fp, the + // context, the function and the bytecode offset. Synthesize + // their values and set them up + // explicitly. + // + // The caller's pc for the bottommost output frame is the same as in the + // input frame. For all subsequent output frames, it can be read from the + // previous one. This frame's pc can be computed from the non-optimized + // function code and AST id of the bailout. + const intptr_t caller_pc = + is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); + frame_writer.PushCallerPc(caller_pc); + + // The caller's frame pointer for the bottommost output frame is the same + // as in the input frame. For all subsequent output frames, it can be + // read from the previous one. Also compute and set this frame's frame + // pointer. + const intptr_t caller_fp = + is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); + frame_writer.PushCallerFp(caller_fp); + + intptr_t fp_value = top_address + frame_writer.top_offset(); + output_frame->SetFp(fp_value); + if (is_topmost) { + Register fp_reg = InterpretedFrame::fp_register(); + output_frame->SetRegister(fp_reg.code(), fp_value); + } + + if (FLAG_enable_embedded_constant_pool) { + // For the bottommost output frame the constant pool pointer can be gotten + // from the input frame. For subsequent output frames, it can be read from + // the previous frame. + const intptr_t caller_cp = + is_bottommost ? caller_constant_pool_ + : output_[frame_index - 1]->GetConstantPool(); + frame_writer.PushCallerConstantPool(caller_cp); + } + + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. + + // When deoptimizing into a catch block, we need to take the context + // from a register that was specified in the handler table. + TranslatedFrame::iterator context_pos = value_iterator++; + if (goto_catch_handler) { + // Skip to the translated value of the register specified + // in the handler table. + for (int i = 0; i < catch_handler_data_ + 1; ++i) { + context_pos++; + } + } + // Read the context from the translations. + Object context = context_pos->GetRawValue(); + output_frame->SetContext(static_cast<intptr_t>(context.ptr())); + frame_writer.PushTranslatedValue(context_pos, "context"); + + // The function was mentioned explicitly in the BEGIN_FRAME. + frame_writer.PushTranslatedValue(function_iterator, "function"); + + // Set the bytecode array pointer. + Object bytecode_array = shared.HasBreakInfo() + ? shared.GetDebugInfo().DebugBytecodeArray() + : shared.GetBytecodeArray(); + frame_writer.PushRawObject(bytecode_array, "bytecode array\n"); + + // The bytecode offset was mentioned explicitly in the BEGIN_FRAME. + int raw_bytecode_offset = + BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset; + Smi smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset); + frame_writer.PushRawObject(smi_bytecode_offset, "bytecode offset\n"); + + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), " -------------------------\n"); + } + + // Translate the rest of the interpreter registers in the frame. + // The return_value_offset is counted from the top. Here, we compute the + // register index (counted from the start). + int return_value_first_reg = + register_count - translated_frame->return_value_offset(); + int return_value_count = translated_frame->return_value_count(); + for (int i = 0; i < register_count; ++i, ++value_iterator) { + // Ensure we write the return value if we have one and we are returning + // normally to a lazy deopt point. + if (is_topmost && !goto_catch_handler && + deopt_kind_ == DeoptimizeKind::kLazy && i >= return_value_first_reg && + i < return_value_first_reg + return_value_count) { + int return_index = i - return_value_first_reg; + if (return_index == 0) { + frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()), + "return value 0\n"); + // We do not handle the situation when one return value should go into + // the accumulator and another one into an ordinary register. Since + // the interpreter should never create such situation, just assert + // this does not happen. + CHECK_LE(return_value_first_reg + return_value_count, register_count); + } else { + CHECK_EQ(return_index, 1); + frame_writer.PushRawValue(input_->GetRegister(kReturnRegister1.code()), + "return value 1\n"); + } + } else { + // This is not return value, just write the value from the translations. + frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); + } + } + + int register_slots_written = register_count; + DCHECK_LE(register_slots_written, register_stack_slot_count); + // Some architectures must pad the stack frame with extra stack slots + // to ensure the stack frame is aligned. Do this now. + while (register_slots_written < register_stack_slot_count) { + register_slots_written++; + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + + // Translate the accumulator register (depending on frame position). + if (is_topmost) { + if (PadTopOfStackRegister()) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + // For topmost frame, put the accumulator on the stack. The + // {NotifyDeoptimized} builtin pops it off the topmost frame (possibly + // after materialization). + if (goto_catch_handler) { + // If we are lazy deopting to a catch handler, we set the accumulator to + // the exception (which lives in the result register). + intptr_t accumulator_value = + input_->GetRegister(kInterpreterAccumulatorRegister.code()); + frame_writer.PushRawObject(Object(accumulator_value), "accumulator\n"); + } else { + // If we are lazily deoptimizing make sure we store the deopt + // return value into the appropriate slot. + if (deopt_kind_ == DeoptimizeKind::kLazy && + translated_frame->return_value_offset() == 0 && + translated_frame->return_value_count() > 0) { + CHECK_EQ(translated_frame->return_value_count(), 1); + frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()), + "return value 0\n"); + } else { + frame_writer.PushTranslatedValue(value_iterator, "accumulator"); + } + } + ++value_iterator; // Move over the accumulator. + } else { + // For non-topmost frames, skip the accumulator translation. For those + // frames, the return value from the callee will become the accumulator. + ++value_iterator; + } + CHECK_EQ(translated_frame->end(), value_iterator); + CHECK_EQ(0u, frame_writer.top_offset()); + + // Compute this frame's PC and state. The PC will be a special builtin that + // continues the bytecode dispatch. Note that non-topmost and lazy-style + // bailout handlers also advance the bytecode offset before dispatch, hence + // simulating what normal handlers do upon completion of the operation. + Builtins* builtins = isolate_->builtins(); + Code dispatch_builtin = + (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) && + !goto_catch_handler + ? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) + : builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch); + output_frame->SetPc( + static_cast<intptr_t>(dispatch_builtin.InstructionStart())); + + // Update constant pool. + if (FLAG_enable_embedded_constant_pool) { + intptr_t constant_pool_value = + static_cast<intptr_t>(dispatch_builtin.constant_pool()); + output_frame->SetConstantPool(constant_pool_value); + if (is_topmost) { + Register constant_pool_reg = + InterpretedFrame::constant_pool_pointer_register(); + output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); + } + } + + // Clear the context register. The context might be a de-materialized object + // and will be materialized by {Runtime_NotifyDeoptimized}. For additional + // safety we use Smi(0) instead of the potential {arguments_marker} here. + if (is_topmost) { + intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); + Register context_reg = JavaScriptFrame::context_register(); + output_frame->SetRegister(context_reg.code(), context_value); + // Set the continuation for the topmost frame. + Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized); + output_frame->SetContinuation( + static_cast<intptr_t>(continuation.InstructionStart())); + } +} + +void Deoptimizer::DoComputeArgumentsAdaptorFrame( + TranslatedFrame* translated_frame, int frame_index) { + TranslatedFrame::iterator value_iterator = translated_frame->begin(); + bool is_bottommost = (0 == frame_index); + + unsigned height = translated_frame->height(); + unsigned height_in_bytes = height * kSystemPointerSize; + int parameter_count = height; + if (ShouldPadArguments(parameter_count)) + height_in_bytes += kSystemPointerSize; + + TranslatedFrame::iterator function_iterator = value_iterator++; + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), + " translating arguments adaptor => height=%d\n", height_in_bytes); + } + + unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize; + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = new (output_frame_size) + FrameDescription(output_frame_size, parameter_count); + FrameWriter frame_writer(this, output_frame, trace_scope_); + + // Arguments adaptor can not be topmost. + CHECK(frame_index < output_count_ - 1); + CHECK_NULL(output_[frame_index]); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous frame's top and + // this frame's size. + intptr_t top_address; + if (is_bottommost) { + top_address = caller_frame_top_ - output_frame_size; + } else { + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + } + output_frame->SetTop(top_address); + + ReadOnlyRoots roots(isolate()); + if (ShouldPadArguments(parameter_count)) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + + // Compute the incoming parameter translation. + for (int i = 0; i < parameter_count; ++i, ++value_iterator) { + frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); + } + + DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), + frame_writer.top_offset()); + + // Read caller's PC from the previous frame. + const intptr_t caller_pc = + is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); + frame_writer.PushCallerPc(caller_pc); + + // Read caller's FP from the previous frame, and set this frame's FP. + const intptr_t caller_fp = + is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); + frame_writer.PushCallerFp(caller_fp); + + intptr_t fp_value = top_address + frame_writer.top_offset(); + output_frame->SetFp(fp_value); + + if (FLAG_enable_embedded_constant_pool) { + // Read the caller's constant pool from the previous frame. + const intptr_t caller_cp = + is_bottommost ? caller_constant_pool_ + : output_[frame_index - 1]->GetConstantPool(); + frame_writer.PushCallerConstantPool(caller_cp); + } + + // A marker value is used in place of the context. + intptr_t marker = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR); + frame_writer.PushRawValue(marker, "context (adaptor sentinel)\n"); + + // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. + frame_writer.PushTranslatedValue(function_iterator, "function\n"); + + // Number of incoming arguments. + frame_writer.PushRawObject(Smi::FromInt(height - 1), "argc\n"); + + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + + CHECK_EQ(translated_frame->end(), value_iterator); + DCHECK_EQ(0, frame_writer.top_offset()); + + Builtins* builtins = isolate_->builtins(); + Code adaptor_trampoline = + builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); + intptr_t pc_value = static_cast<intptr_t>( + adaptor_trampoline.InstructionStart() + + isolate_->heap()->arguments_adaptor_deopt_pc_offset().value()); + output_frame->SetPc(pc_value); + if (FLAG_enable_embedded_constant_pool) { + intptr_t constant_pool_value = + static_cast<intptr_t>(adaptor_trampoline.constant_pool()); + output_frame->SetConstantPool(constant_pool_value); + } +} + +void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame, + int frame_index) { + TranslatedFrame::iterator value_iterator = translated_frame->begin(); + bool is_topmost = (output_count_ - 1 == frame_index); + // The construct frame could become topmost only if we inlined a constructor + // call which does a tail call (otherwise the tail callee's frame would be + // the topmost one). So it could only be the DeoptimizeKind::kLazy case. + CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy); + + Builtins* builtins = isolate_->builtins(); + Code construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); + BailoutId bailout_id = translated_frame->node_id(); + unsigned height = translated_frame->height(); + unsigned parameter_count = height - 1; // Exclude the context. + unsigned height_in_bytes = parameter_count * kSystemPointerSize; + + // If the construct frame appears to be topmost we should ensure that the + // value of result register is preserved during continuation execution. + // We do this here by "pushing" the result of the constructor function to the + // top of the reconstructed stack and popping it in + // {Builtins::kNotifyDeoptimized}. + if (is_topmost) { + height_in_bytes += kSystemPointerSize; + if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize; + } + + if (ShouldPadArguments(parameter_count)) + height_in_bytes += kSystemPointerSize; + + TranslatedFrame::iterator function_iterator = value_iterator++; + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), + " translating construct stub => bailout_id=%d (%s), height=%d\n", + bailout_id.ToInt(), + bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke", + height_in_bytes); + } + + unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize; + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = new (output_frame_size) + FrameDescription(output_frame_size, parameter_count); + FrameWriter frame_writer(this, output_frame, trace_scope_); + + // Construct stub can not be topmost. + DCHECK(frame_index > 0 && frame_index < output_count_); + DCHECK_NULL(output_[frame_index]); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous frame's top and + // this frame's size. + intptr_t top_address; + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + ReadOnlyRoots roots(isolate()); + if (ShouldPadArguments(parameter_count)) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + + // The allocated receiver of a construct stub frame is passed as the + // receiver parameter through the translation. It might be encoding + // a captured object, so we need save it for later. + TranslatedFrame::iterator receiver_iterator = value_iterator; + + // Compute the incoming parameter translation. + for (unsigned i = 0; i < parameter_count; ++i, ++value_iterator) { + frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); + } + + DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), + frame_writer.top_offset()); + + // Read caller's PC from the previous frame. + const intptr_t caller_pc = output_[frame_index - 1]->GetPc(); + frame_writer.PushCallerPc(caller_pc); + + // Read caller's FP from the previous frame, and set this frame's FP. + const intptr_t caller_fp = output_[frame_index - 1]->GetFp(); + frame_writer.PushCallerFp(caller_fp); + + intptr_t fp_value = top_address + frame_writer.top_offset(); + output_frame->SetFp(fp_value); + if (is_topmost) { + Register fp_reg = JavaScriptFrame::fp_register(); + output_frame->SetRegister(fp_reg.code(), fp_value); + } + + if (FLAG_enable_embedded_constant_pool) { + // Read the caller's constant pool from the previous frame. + const intptr_t caller_cp = output_[frame_index - 1]->GetConstantPool(); + frame_writer.PushCallerConstantPool(caller_cp); + } + + // A marker value is used to mark the frame. + intptr_t marker = StackFrame::TypeToMarker(StackFrame::CONSTRUCT); + frame_writer.PushRawValue(marker, "context (construct stub sentinel)\n"); + + frame_writer.PushTranslatedValue(value_iterator++, "context"); + + // Number of incoming arguments. + frame_writer.PushRawObject(Smi::FromInt(parameter_count - 1), "argc\n"); + + // The constructor function was mentioned explicitly in the + // CONSTRUCT_STUB_FRAME. + frame_writer.PushTranslatedValue(function_iterator, "constructor function\n"); + + // The deopt info contains the implicit receiver or the new target at the + // position of the receiver. Copy it to the top of stack, with the hole value + // as padding to maintain alignment. + + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + + CHECK(bailout_id == BailoutId::ConstructStubCreate() || + bailout_id == BailoutId::ConstructStubInvoke()); + const char* debug_hint = bailout_id == BailoutId::ConstructStubCreate() + ? "new target\n" + : "allocated receiver\n"; + frame_writer.PushTranslatedValue(receiver_iterator, debug_hint); + + if (is_topmost) { + if (PadTopOfStackRegister()) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + // Ensure the result is restored back when we return to the stub. + Register result_reg = kReturnRegister0; + intptr_t result = input_->GetRegister(result_reg.code()); + frame_writer.PushRawValue(result, "subcall result\n"); + } + + CHECK_EQ(translated_frame->end(), value_iterator); + CHECK_EQ(0u, frame_writer.top_offset()); + + // Compute this frame's PC. + DCHECK(bailout_id.IsValidForConstructStub()); + Address start = construct_stub.InstructionStart(); + int pc_offset = + bailout_id == BailoutId::ConstructStubCreate() + ? isolate_->heap()->construct_stub_create_deopt_pc_offset().value() + : isolate_->heap()->construct_stub_invoke_deopt_pc_offset().value(); + intptr_t pc_value = static_cast<intptr_t>(start + pc_offset); + output_frame->SetPc(pc_value); + + // Update constant pool. + if (FLAG_enable_embedded_constant_pool) { + intptr_t constant_pool_value = + static_cast<intptr_t>(construct_stub.constant_pool()); + output_frame->SetConstantPool(constant_pool_value); + if (is_topmost) { + Register constant_pool_reg = + JavaScriptFrame::constant_pool_pointer_register(); + output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value); + } + } + + // Clear the context register. The context might be a de-materialized object + // and will be materialized by {Runtime_NotifyDeoptimized}. For additional + // safety we use Smi(0) instead of the potential {arguments_marker} here. + if (is_topmost) { + intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); + Register context_reg = JavaScriptFrame::context_register(); + output_frame->SetRegister(context_reg.code(), context_value); + } + + // Set the continuation for the topmost frame. + if (is_topmost) { + Builtins* builtins = isolate_->builtins(); + DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_); + Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized); + output_frame->SetContinuation( + static_cast<intptr_t>(continuation.InstructionStart())); + } +} + +bool Deoptimizer::BuiltinContinuationModeIsJavaScript( + BuiltinContinuationMode mode) { + switch (mode) { + case BuiltinContinuationMode::STUB: + return false; + case BuiltinContinuationMode::JAVASCRIPT: + case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: + case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: + return true; + } + UNREACHABLE(); +} + +bool Deoptimizer::BuiltinContinuationModeIsWithCatch( + BuiltinContinuationMode mode) { + switch (mode) { + case BuiltinContinuationMode::STUB: + case BuiltinContinuationMode::JAVASCRIPT: + return false; + case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: + case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: + return true; + } + UNREACHABLE(); +} + +StackFrame::Type Deoptimizer::BuiltinContinuationModeToFrameType( + BuiltinContinuationMode mode) { + switch (mode) { + case BuiltinContinuationMode::STUB: + return StackFrame::BUILTIN_CONTINUATION; + case BuiltinContinuationMode::JAVASCRIPT: + return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION; + case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: + return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH; + case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: + return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH; + } + UNREACHABLE(); +} + +Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation( + BuiltinContinuationMode mode, bool must_handle_result) { + switch (mode) { + case BuiltinContinuationMode::STUB: + return must_handle_result ? Builtins::kContinueToCodeStubBuiltinWithResult + : Builtins::kContinueToCodeStubBuiltin; + case BuiltinContinuationMode::JAVASCRIPT: + case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: + case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: + return must_handle_result + ? Builtins::kContinueToJavaScriptBuiltinWithResult + : Builtins::kContinueToJavaScriptBuiltin; + } + UNREACHABLE(); +} + +// BuiltinContinuationFrames capture the machine state that is expected as input +// to a builtin, including both input register values and stack parameters. When +// the frame is reactivated (i.e. the frame below it returns), a +// ContinueToBuiltin stub restores the register state from the frame and tail +// calls to the actual target builtin, making it appear that the stub had been +// directly called by the frame above it. The input values to populate the frame +// are taken from the deopt's FrameState. +// +// Frame translation happens in two modes, EAGER and LAZY. In EAGER mode, all of +// the parameters to the Builtin are explicitly specified in the TurboFan +// FrameState node. In LAZY mode, there is always one fewer parameters specified +// in the FrameState than expected by the Builtin. In that case, construction of +// BuiltinContinuationFrame adds the final missing parameter during +// deoptimization, and that parameter is always on the stack and contains the +// value returned from the callee of the call site triggering the LAZY deopt +// (e.g. rax on x64). This requires that continuation Builtins for LAZY deopts +// must have at least one stack parameter. +// +// TO +// | .... | +// +-------------------------+ +// | arg padding (arch dept) |<- at most 1*kSystemPointerSize +// +-------------------------+ +// | builtin param 0 |<- FrameState input value n becomes +// +-------------------------+ +// | ... | +// +-------------------------+ +// | builtin param m |<- FrameState input value n+m-1, or in +// +-----needs-alignment-----+ the LAZY case, return LAZY result value +// | ContinueToBuiltin entry | +// +-------------------------+ +// | | saved frame (FP) | +// | +=====needs=alignment=====+<- fpreg +// | |constant pool (if ool_cp)| +// v +-------------------------+ +// |BUILTIN_CONTINUATION mark| +// +-------------------------+ +// | JSFunction (or zero) |<- only if JavaScript builtin +// +-------------------------+ +// | frame height above FP | +// +-------------------------+ +// | context |<- this non-standard context slot contains +// +-------------------------+ the context, even for non-JS builtins. +// | builtin address | +// +-------------------------+ +// | builtin input GPR reg0 |<- populated from deopt FrameState using +// +-------------------------+ the builtin's CallInterfaceDescriptor +// | ... | to map a FrameState's 0..n-1 inputs to +// +-------------------------+ the builtin's n input register params. +// | builtin input GPR regn | +// +-------------------------+ +// | reg padding (arch dept) | +// +-----needs--alignment----+ +// | res padding (arch dept) |<- only if {is_topmost}; result is pop'd by +// +-------------------------+<- kNotifyDeopt ASM stub and moved to acc +// | result value |<- reg, as ContinueToBuiltin stub expects. +// +-----needs-alignment-----+<- spreg +// +void Deoptimizer::DoComputeBuiltinContinuation( + TranslatedFrame* translated_frame, int frame_index, + BuiltinContinuationMode mode) { + TranslatedFrame::iterator value_iterator = translated_frame->begin(); + + // The output frame must have room for all of the parameters that need to be + // passed to the builtin continuation. + const int height_in_words = translated_frame->height(); + + BailoutId bailout_id = translated_frame->node_id(); + Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id); + Code builtin = isolate()->builtins()->builtin(builtin_name); + Callable continuation_callable = + Builtins::CallableFor(isolate(), builtin_name); + CallInterfaceDescriptor continuation_descriptor = + continuation_callable.descriptor(); + + const bool is_bottommost = (0 == frame_index); + const bool is_topmost = (output_count_ - 1 == frame_index); + const bool must_handle_result = + !is_topmost || deopt_kind_ == DeoptimizeKind::kLazy; + + const RegisterConfiguration* config(RegisterConfiguration::Default()); + const int allocatable_register_count = + config->num_allocatable_general_registers(); + const int padding_slot_count = + BuiltinContinuationFrameConstants::PaddingSlotCount( + allocatable_register_count); + + const int register_parameter_count = + continuation_descriptor.GetRegisterParameterCount(); + // Make sure to account for the context by removing it from the register + // parameter count. + const int translated_stack_parameters = + height_in_words - register_parameter_count - 1; + const int stack_param_count = + translated_stack_parameters + (must_handle_result ? 1 : 0) + + (BuiltinContinuationModeIsWithCatch(mode) ? 1 : 0); + const int stack_param_pad_count = + ShouldPadArguments(stack_param_count) ? 1 : 0; + + // If the builtins frame appears to be topmost we should ensure that the + // value of result register is preserved during continuation execution. + // We do this here by "pushing" the result of callback function to the + // top of the reconstructed stack and popping it in + // {Builtins::kNotifyDeoptimized}. + const int push_result_count = + is_topmost ? (PadTopOfStackRegister() ? 2 : 1) : 0; + + const unsigned output_frame_size = + kSystemPointerSize * (stack_param_count + stack_param_pad_count + + allocatable_register_count + padding_slot_count + + push_result_count) + + BuiltinContinuationFrameConstants::kFixedFrameSize; + + const unsigned output_frame_size_above_fp = + kSystemPointerSize * (allocatable_register_count + padding_slot_count + + push_result_count) + + (BuiltinContinuationFrameConstants::kFixedFrameSize - + BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp); + + // Validate types of parameters. They must all be tagged except for argc for + // JS builtins. + bool has_argc = false; + for (int i = 0; i < register_parameter_count; ++i) { + MachineType type = continuation_descriptor.GetParameterType(i); + int code = continuation_descriptor.GetRegisterParameter(i).code(); + // Only tagged and int32 arguments are supported, and int32 only for the + // arguments count on JavaScript builtins. + if (type == MachineType::Int32()) { + CHECK_EQ(code, kJavaScriptCallArgCountRegister.code()); + has_argc = true; + } else { + // Any other argument must be a tagged value. + CHECK(IsAnyTagged(type.representation())); + } + } + CHECK_EQ(BuiltinContinuationModeIsJavaScript(mode), has_argc); + + if (trace_scope_ != nullptr) { + PrintF(trace_scope_->file(), + " translating BuiltinContinuation to %s," + " register param count %d," + " stack param count %d\n", + Builtins::name(builtin_name), register_parameter_count, + stack_param_count); + } + + FrameDescription* output_frame = new (output_frame_size) + FrameDescription(output_frame_size, stack_param_count); + output_[frame_index] = output_frame; + FrameWriter frame_writer(this, output_frame, trace_scope_); + + // The top address of the frame is computed from the previous frame's top and + // this frame's size. + intptr_t top_address; + if (is_bottommost) { + top_address = caller_frame_top_ - output_frame_size; + } else { + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + } + output_frame->SetTop(top_address); + + // Get the possible JSFunction for the case that this is a + // JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer + // like a normal JavaScriptFrame. + const intptr_t maybe_function = value_iterator->GetRawValue().ptr(); + ++value_iterator; + + ReadOnlyRoots roots(isolate()); + if (ShouldPadArguments(stack_param_count)) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + + for (int i = 0; i < translated_stack_parameters; ++i, ++value_iterator) { + frame_writer.PushTranslatedValue(value_iterator, "stack parameter"); + } + + switch (mode) { + case BuiltinContinuationMode::STUB: + break; + case BuiltinContinuationMode::JAVASCRIPT: + break; + case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: { + frame_writer.PushRawObject(roots.the_hole_value(), + "placeholder for exception on lazy deopt\n"); + } break; + case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: { + intptr_t accumulator_value = + input_->GetRegister(kInterpreterAccumulatorRegister.code()); + frame_writer.PushRawObject(Object(accumulator_value), + "exception (from accumulator)\n"); + } break; + } + + if (must_handle_result) { + frame_writer.PushRawObject(roots.the_hole_value(), + "placeholder for return result on lazy deopt\n"); + } + + DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(), + frame_writer.top_offset()); + + std::vector<TranslatedFrame::iterator> register_values; + int total_registers = config->num_general_registers(); + register_values.resize(total_registers, {value_iterator}); + + for (int i = 0; i < register_parameter_count; ++i, ++value_iterator) { + int code = continuation_descriptor.GetRegisterParameter(i).code(); + register_values[code] = value_iterator; + } + + // The context register is always implicit in the CallInterfaceDescriptor but + // its register must be explicitly set when continuing to the builtin. Make + // sure that it's harvested from the translation and copied into the register + // set (it was automatically added at the end of the FrameState by the + // instruction selector). + Object context = value_iterator->GetRawValue(); + const intptr_t value = context.ptr(); + TranslatedFrame::iterator context_register_value = value_iterator++; + register_values[kContextRegister.code()] = context_register_value; + output_frame->SetContext(value); + output_frame->SetRegister(kContextRegister.code(), value); + + // Set caller's PC (JSFunction continuation). + const intptr_t caller_pc = + is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc(); + frame_writer.PushCallerPc(caller_pc); + + // Read caller's FP from the previous frame, and set this frame's FP. + const intptr_t caller_fp = + is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp(); + frame_writer.PushCallerFp(caller_fp); + + const intptr_t fp_value = top_address + frame_writer.top_offset(); + output_frame->SetFp(fp_value); + + DCHECK_EQ(output_frame_size_above_fp, frame_writer.top_offset()); + + if (FLAG_enable_embedded_constant_pool) { + // Read the caller's constant pool from the previous frame. + const intptr_t caller_cp = + is_bottommost ? caller_constant_pool_ + : output_[frame_index - 1]->GetConstantPool(); + frame_writer.PushCallerConstantPool(caller_cp); + } + + // A marker value is used in place of the context. + const intptr_t marker = + StackFrame::TypeToMarker(BuiltinContinuationModeToFrameType(mode)); + frame_writer.PushRawValue(marker, + "context (builtin continuation sentinel)\n"); + + if (BuiltinContinuationModeIsJavaScript(mode)) { + frame_writer.PushRawValue(maybe_function, "JSFunction\n"); + } else { + frame_writer.PushRawValue(0, "unused\n"); + } + + // The delta from the SP to the FP; used to reconstruct SP in + // Isolate::UnwindAndFindHandler. + frame_writer.PushRawObject(Smi::FromInt(output_frame_size_above_fp), + "frame height at deoptimization\n"); + + // The context even if this is a stub contininuation frame. We can't use the + // usual context slot, because we must store the frame marker there. + frame_writer.PushTranslatedValue(context_register_value, + "builtin JavaScript context\n"); + + // The builtin to continue to. + frame_writer.PushRawObject(builtin, "builtin address\n"); + + for (int i = 0; i < allocatable_register_count; ++i) { + int code = config->GetAllocatableGeneralCode(i); + ScopedVector<char> str(128); + if (trace_scope_ != nullptr) { + if (BuiltinContinuationModeIsJavaScript(mode) && + code == kJavaScriptCallArgCountRegister.code()) { + SNPrintF( + str, + "tagged argument count %s (will be untagged by continuation)\n", + RegisterName(Register::from_code(code))); + } else { + SNPrintF(str, "builtin register argument %s\n", + RegisterName(Register::from_code(code))); + } + } + frame_writer.PushTranslatedValue( + register_values[code], trace_scope_ != nullptr ? str.begin() : ""); + } + + // Some architectures must pad the stack frame with extra stack slots + // to ensure the stack frame is aligned. + for (int i = 0; i < padding_slot_count; ++i) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + + if (is_topmost) { + if (PadTopOfStackRegister()) { + frame_writer.PushRawObject(roots.the_hole_value(), "padding\n"); + } + // Ensure the result is restored back when we return to the stub. + + if (must_handle_result) { + Register result_reg = kReturnRegister0; + frame_writer.PushRawValue(input_->GetRegister(result_reg.code()), + "callback result\n"); + } else { + frame_writer.PushRawObject(roots.undefined_value(), "callback result\n"); + } + } + + CHECK_EQ(translated_frame->end(), value_iterator); + CHECK_EQ(0u, frame_writer.top_offset()); + + // Clear the context register. The context might be a de-materialized object + // and will be materialized by {Runtime_NotifyDeoptimized}. For additional + // safety we use Smi(0) instead of the potential {arguments_marker} here. + if (is_topmost) { + intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr()); + Register context_reg = JavaScriptFrame::context_register(); + output_frame->SetRegister(context_reg.code(), context_value); + } + + // Ensure the frame pointer register points to the callee's frame. The builtin + // will build its own frame once we continue to it. + Register fp_reg = JavaScriptFrame::fp_register(); + output_frame->SetRegister(fp_reg.code(), fp_value); + + Code continue_to_builtin = isolate()->builtins()->builtin( + TrampolineForBuiltinContinuation(mode, must_handle_result)); + output_frame->SetPc( + static_cast<intptr_t>(continue_to_builtin.InstructionStart())); + + Code continuation = + isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized); + output_frame->SetContinuation( + static_cast<intptr_t>(continuation.InstructionStart())); +} + +void Deoptimizer::MaterializeHeapObjects() { + translated_state_.Prepare(static_cast<Address>(stack_fp_)); + if (FLAG_deopt_every_n_times > 0) { + // Doing a GC here will find problems with the deoptimized frames. + isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, + GarbageCollectionReason::kTesting); + } + + for (auto& materialization : values_to_materialize_) { + Handle<Object> value = materialization.value_->GetValue(); + + if (trace_scope_ != nullptr) { + PrintF("Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; ", + static_cast<intptr_t>(materialization.output_slot_address_), + value->ptr()); + value->ShortPrint(trace_scope_->file()); + PrintF(trace_scope_->file(), "\n"); + } + + *(reinterpret_cast<Address*>(materialization.output_slot_address_)) = + value->ptr(); + } + + translated_state_.VerifyMaterializedObjects(); + + bool feedback_updated = translated_state_.DoUpdateFeedback(); + if (trace_scope_ != nullptr && feedback_updated) { + PrintF(trace_scope_->file(), "Feedback updated"); + compiled_code_.PrintDeoptLocation(trace_scope_->file(), + " from deoptimization at ", from_); + } + + isolate_->materialized_object_store()->Remove( + static_cast<Address>(stack_fp_)); +} + +void Deoptimizer::QueueValueForMaterialization( + Address output_address, Object obj, + const TranslatedFrame::iterator& iterator) { + if (obj == ReadOnlyRoots(isolate_).arguments_marker()) { + values_to_materialize_.push_back({output_address, iterator}); + } +} + +unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const { + unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp; + // TODO(jkummerow): If {function_->IsSmi()} can indeed be true, then + // {function_} should not have type {JSFunction}. + if (!function_.IsSmi()) { + fixed_size += ComputeIncomingArgumentSize(function_.shared()); + } + return fixed_size; +} + +unsigned Deoptimizer::ComputeInputFrameSize() const { + // The fp-to-sp delta already takes the context, constant pool pointer and the + // function into account so we have to avoid double counting them. + unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize(); + unsigned result = fixed_size_above_fp + fp_to_sp_delta_; + if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) { + unsigned stack_slots = compiled_code_.stack_slots(); + unsigned outgoing_size = 0; + // ComputeOutgoingArgumentSize(compiled_code_, bailout_id_); + CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) - + CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size, + result); + } + return result; +} + +// static +unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo shared) { + // The fixed part of the frame consists of the return address, frame + // pointer, function, context, bytecode offset and all the incoming arguments. + return ComputeIncomingArgumentSize(shared) + + InterpreterFrameConstants::kFixedFrameSize; +} + +// static +unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) { + int parameter_slots = shared.internal_formal_parameter_count() + 1; + if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2); + return parameter_slots * kSystemPointerSize; +} + +void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, + DeoptimizeKind kind) { + CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft || + kind == DeoptimizeKind::kLazy); + DeoptimizerData* data = isolate->deoptimizer_data(); + if (!data->deopt_entry_code(kind).is_null()) return; + + MacroAssembler masm(isolate, CodeObjectRequired::kYes, + NewAssemblerBuffer(16 * KB)); + masm.set_emit_debug_code(false); + GenerateDeoptimizationEntries(&masm, masm.isolate(), kind); + CodeDesc desc; + masm.GetCode(isolate, &desc); + DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc)); + + // Allocate the code as immovable since the entry addresses will be used + // directly and there is no support for relocating them. + Handle<Code> code = + Factory::CodeBuilder(isolate, desc, Code::STUB).set_immovable().Build(); + CHECK(isolate->heap()->IsImmovable(*code)); + + CHECK(data->deopt_entry_code(kind).is_null()); + data->set_deopt_entry_code(kind, *code); +} + +void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) { + EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager); + EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy); + EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft); +} + +FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count) + : frame_size_(frame_size), + parameter_count_(parameter_count), + top_(kZapUint32), + pc_(kZapUint32), + fp_(kZapUint32), + context_(kZapUint32), + constant_pool_(kZapUint32) { + // Zap all the registers. + for (int r = 0; r < Register::kNumRegisters; r++) { + // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register + // isn't used before the next safepoint, the GC will try to scan it as a + // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't. +#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64) + // x18 is reserved as platform register on Windows arm64 platform + const int kPlatformRegister = 18; + if (r != kPlatformRegister) { + SetRegister(r, kZapUint32); + } +#else + SetRegister(r, kZapUint32); +#endif + } + + // Zap all the slots. + for (unsigned o = 0; o < frame_size; o += kSystemPointerSize) { + SetFrameSlot(o, kZapUint32); + } +} + +void TranslationBuffer::Add(int32_t value) { + // This wouldn't handle kMinInt correctly if it ever encountered it. + DCHECK_NE(value, kMinInt); + // Encode the sign bit in the least significant bit. + bool is_negative = (value < 0); + uint32_t bits = (static_cast<uint32_t>(is_negative ? -value : value) << 1) | + static_cast<uint32_t>(is_negative); + // Encode the individual bytes using the least significant bit of + // each byte to indicate whether or not more bytes follow. + do { + uint32_t next = bits >> 7; + contents_.push_back(((bits << 1) & 0xFF) | (next != 0)); + bits = next; + } while (bits != 0); +} + +TranslationIterator::TranslationIterator(ByteArray buffer, int index) + : buffer_(buffer), index_(index) { + DCHECK(index >= 0 && index < buffer.length()); +} + +int32_t TranslationIterator::Next() { + // Run through the bytes until we reach one with a least significant + // bit of zero (marks the end). + uint32_t bits = 0; + for (int i = 0; true; i += 7) { + DCHECK(HasNext()); + uint8_t next = buffer_.get(index_++); + bits |= (next >> 1) << i; + if ((next & 1) == 0) break; + } + // The bits encode the sign in the least significant bit. + bool is_negative = (bits & 1) == 1; + int32_t result = bits >> 1; + return is_negative ? -result : result; +} + +bool TranslationIterator::HasNext() const { return index_ < buffer_.length(); } + +Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) { + Handle<ByteArray> result = + factory->NewByteArray(CurrentIndex(), AllocationType::kOld); + contents_.CopyTo(result->GetDataStartAddress()); + return result; +} + +void Translation::BeginBuiltinContinuationFrame(BailoutId bailout_id, + int literal_id, + unsigned height) { + buffer_->Add(BUILTIN_CONTINUATION_FRAME); + buffer_->Add(bailout_id.ToInt()); + buffer_->Add(literal_id); + buffer_->Add(height); +} + +void Translation::BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id, + int literal_id, + unsigned height) { + buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME); + buffer_->Add(bailout_id.ToInt()); + buffer_->Add(literal_id); + buffer_->Add(height); +} + +void Translation::BeginJavaScriptBuiltinContinuationWithCatchFrame( + BailoutId bailout_id, int literal_id, unsigned height) { + buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME); + buffer_->Add(bailout_id.ToInt()); + buffer_->Add(literal_id); + buffer_->Add(height); +} + +void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id, + unsigned height) { + buffer_->Add(CONSTRUCT_STUB_FRAME); + buffer_->Add(bailout_id.ToInt()); + buffer_->Add(literal_id); + buffer_->Add(height); +} + +void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) { + buffer_->Add(ARGUMENTS_ADAPTOR_FRAME); + buffer_->Add(literal_id); + buffer_->Add(height); +} + +void Translation::BeginInterpretedFrame(BailoutId bytecode_offset, + int literal_id, unsigned height, + int return_value_offset, + int return_value_count) { + buffer_->Add(INTERPRETED_FRAME); + buffer_->Add(bytecode_offset.ToInt()); + buffer_->Add(literal_id); + buffer_->Add(height); + buffer_->Add(return_value_offset); + buffer_->Add(return_value_count); +} + +void Translation::ArgumentsElements(CreateArgumentsType type) { + buffer_->Add(ARGUMENTS_ELEMENTS); + buffer_->Add(static_cast<uint8_t>(type)); +} + +void Translation::ArgumentsLength(CreateArgumentsType type) { + buffer_->Add(ARGUMENTS_LENGTH); + buffer_->Add(static_cast<uint8_t>(type)); +} + +void Translation::BeginCapturedObject(int length) { + buffer_->Add(CAPTURED_OBJECT); + buffer_->Add(length); +} + +void Translation::DuplicateObject(int object_index) { + buffer_->Add(DUPLICATED_OBJECT); + buffer_->Add(object_index); +} + +void Translation::StoreRegister(Register reg) { + buffer_->Add(REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreInt32Register(Register reg) { + buffer_->Add(INT32_REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreInt64Register(Register reg) { + buffer_->Add(INT64_REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreUint32Register(Register reg) { + buffer_->Add(UINT32_REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreBoolRegister(Register reg) { + buffer_->Add(BOOL_REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreFloatRegister(FloatRegister reg) { + buffer_->Add(FLOAT_REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreDoubleRegister(DoubleRegister reg) { + buffer_->Add(DOUBLE_REGISTER); + buffer_->Add(reg.code()); +} + +void Translation::StoreStackSlot(int index) { + buffer_->Add(STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreInt32StackSlot(int index) { + buffer_->Add(INT32_STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreInt64StackSlot(int index) { + buffer_->Add(INT64_STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreUint32StackSlot(int index) { + buffer_->Add(UINT32_STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreBoolStackSlot(int index) { + buffer_->Add(BOOL_STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreFloatStackSlot(int index) { + buffer_->Add(FLOAT_STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreDoubleStackSlot(int index) { + buffer_->Add(DOUBLE_STACK_SLOT); + buffer_->Add(index); +} + +void Translation::StoreLiteral(int literal_id) { + buffer_->Add(LITERAL); + buffer_->Add(literal_id); +} + +void Translation::AddUpdateFeedback(int vector_literal, int slot) { + buffer_->Add(UPDATE_FEEDBACK); + buffer_->Add(vector_literal); + buffer_->Add(slot); +} + +void Translation::StoreJSFrameFunction() { + StoreStackSlot((StandardFrameConstants::kCallerPCOffset - + StandardFrameConstants::kFunctionOffset) / + kSystemPointerSize); +} + +int Translation::NumberOfOperandsFor(Opcode opcode) { + switch (opcode) { + case DUPLICATED_OBJECT: + case ARGUMENTS_ELEMENTS: + case ARGUMENTS_LENGTH: + case CAPTURED_OBJECT: + case REGISTER: + case INT32_REGISTER: + case INT64_REGISTER: + case UINT32_REGISTER: + case BOOL_REGISTER: + case FLOAT_REGISTER: + case DOUBLE_REGISTER: + case STACK_SLOT: + case INT32_STACK_SLOT: + case INT64_STACK_SLOT: + case UINT32_STACK_SLOT: + case BOOL_STACK_SLOT: + case FLOAT_STACK_SLOT: + case DOUBLE_STACK_SLOT: + case LITERAL: + return 1; + case ARGUMENTS_ADAPTOR_FRAME: + case UPDATE_FEEDBACK: + return 2; + case BEGIN: + case CONSTRUCT_STUB_FRAME: + case BUILTIN_CONTINUATION_FRAME: + case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: + case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: + return 3; + case INTERPRETED_FRAME: + return 5; + } + FATAL("Unexpected translation type"); + return -1; +} + +#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) + +const char* Translation::StringFor(Opcode opcode) { +#define TRANSLATION_OPCODE_CASE(item) \ + case item: \ + return #item; + switch (opcode) { TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE) } +#undef TRANSLATION_OPCODE_CASE + UNREACHABLE(); +} + +#endif + +Handle<FixedArray> MaterializedObjectStore::Get(Address fp) { + int index = StackIdToIndex(fp); + if (index == -1) { + return Handle<FixedArray>::null(); + } + Handle<FixedArray> array = GetStackEntries(); + CHECK_GT(array->length(), index); + return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate())); +} + +void MaterializedObjectStore::Set(Address fp, + Handle<FixedArray> materialized_objects) { + int index = StackIdToIndex(fp); + if (index == -1) { + index = static_cast<int>(frame_fps_.size()); + frame_fps_.push_back(fp); + } + + Handle<FixedArray> array = EnsureStackEntries(index + 1); + array->set(index, *materialized_objects); +} + +bool MaterializedObjectStore::Remove(Address fp) { + auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp); + if (it == frame_fps_.end()) return false; + int index = static_cast<int>(std::distance(frame_fps_.begin(), it)); + + frame_fps_.erase(it); + FixedArray array = isolate()->heap()->materialized_objects(); + + CHECK_LT(index, array.length()); + int fps_size = static_cast<int>(frame_fps_.size()); + for (int i = index; i < fps_size; i++) { + array.set(i, array.get(i + 1)); + } + array.set(fps_size, ReadOnlyRoots(isolate()).undefined_value()); + return true; +} + +int MaterializedObjectStore::StackIdToIndex(Address fp) { + auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp); + return it == frame_fps_.end() + ? -1 + : static_cast<int>(std::distance(frame_fps_.begin(), it)); +} + +Handle<FixedArray> MaterializedObjectStore::GetStackEntries() { + return Handle<FixedArray>(isolate()->heap()->materialized_objects(), + isolate()); +} + +Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) { + Handle<FixedArray> array = GetStackEntries(); + if (array->length() >= length) { + return array; + } + + int new_length = length > 10 ? length : 10; + if (new_length < 2 * array->length()) { + new_length = 2 * array->length(); + } + + Handle<FixedArray> new_array = + isolate()->factory()->NewFixedArray(new_length, AllocationType::kOld); + for (int i = 0; i < array->length(); i++) { + new_array->set(i, array->get(i)); + } + HeapObject undefined_value = ReadOnlyRoots(isolate()).undefined_value(); + for (int i = array->length(); i < length; i++) { + new_array->set(i, undefined_value); + } + isolate()->heap()->SetRootMaterializedObjects(*new_array); + return new_array; +} + +namespace { + +Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it, + Isolate* isolate) { + if (it->GetRawValue() == ReadOnlyRoots(isolate).arguments_marker()) { + if (!it->IsMaterializableByDebugger()) { + return isolate->factory()->optimized_out(); + } + } + return it->GetValue(); +} + +} // namespace + +DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state, + TranslatedState::iterator frame_it, + Isolate* isolate) { + int parameter_count = + frame_it->shared_info()->internal_formal_parameter_count(); + TranslatedFrame::iterator stack_it = frame_it->begin(); + + // Get the function. Note that this might materialize the function. + // In case the debugger mutates this value, we should deoptimize + // the function and remember the value in the materialized value store. + function_ = Handle<JSFunction>::cast(stack_it->GetValue()); + stack_it++; // Skip the function. + stack_it++; // Skip the receiver. + + DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind()); + source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray( + *frame_it->shared_info(), frame_it->node_id()); + + DCHECK_EQ(parameter_count, + function_->shared().internal_formal_parameter_count()); + + parameters_.resize(static_cast<size_t>(parameter_count)); + for (int i = 0; i < parameter_count; i++) { + Handle<Object> parameter = GetValueForDebugger(stack_it, isolate); + SetParameter(i, parameter); + stack_it++; + } + + // Get the context. + context_ = GetValueForDebugger(stack_it, isolate); + stack_it++; + + // Get the expression stack. + int stack_height = frame_it->height(); + if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) { + // For interpreter frames, we should not count the accumulator. + // TODO(jarin): Clean up the indexing in translated frames. + stack_height--; + } + expression_stack_.resize(static_cast<size_t>(stack_height)); + for (int i = 0; i < stack_height; i++) { + Handle<Object> expression = GetValueForDebugger(stack_it, isolate); + SetExpression(i, expression); + stack_it++; + } + + // For interpreter frame, skip the accumulator. + if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) { + stack_it++; + } + CHECK(stack_it == frame_it->end()); +} + +Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) { + CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd()); + SourcePosition last_position = SourcePosition::Unknown(); + DeoptimizeReason last_reason = DeoptimizeReason::kUnknown; + int last_deopt_id = kNoDeoptimizationId; + int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) | + RelocInfo::ModeMask(RelocInfo::DEOPT_ID) | + RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) | + RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID); + for (RelocIterator it(code, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + if (info->pc() >= pc) break; + if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) { + int script_offset = static_cast<int>(info->data()); + it.next(); + DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID); + int inlining_id = static_cast<int>(it.rinfo()->data()); + last_position = SourcePosition(script_offset, inlining_id); + } else if (info->rmode() == RelocInfo::DEOPT_ID) { + last_deopt_id = static_cast<int>(info->data()); + } else if (info->rmode() == RelocInfo::DEOPT_REASON) { + last_reason = static_cast<DeoptimizeReason>(info->data()); + } + } + return DeoptInfo(last_position, last_reason, last_deopt_id); +} + +// static +int Deoptimizer::ComputeSourcePositionFromBytecodeArray( + SharedFunctionInfo shared, BailoutId node_id) { + DCHECK(shared.HasBytecodeArray()); + return AbstractCode::cast(shared.GetBytecodeArray()) + .SourcePosition(node_id.ToInt()); +} + +// static +TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container, + int length, + int object_index) { + TranslatedValue slot(container, kCapturedObject); + slot.materialization_info_ = {object_index, length}; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container, + int id) { + TranslatedValue slot(container, kDuplicatedObject); + slot.materialization_info_ = {id, -1}; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewFloat(TranslatedState* container, + Float32 value) { + TranslatedValue slot(container, kFloat); + slot.float_value_ = value; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewDouble(TranslatedState* container, + Float64 value) { + TranslatedValue slot(container, kDouble); + slot.double_value_ = value; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewInt32(TranslatedState* container, + int32_t value) { + TranslatedValue slot(container, kInt32); + slot.int32_value_ = value; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewInt64(TranslatedState* container, + int64_t value) { + TranslatedValue slot(container, kInt64); + slot.int64_value_ = value; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container, + uint32_t value) { + TranslatedValue slot(container, kUInt32); + slot.uint32_value_ = value; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewBool(TranslatedState* container, + uint32_t value) { + TranslatedValue slot(container, kBoolBit); + slot.uint32_value_ = value; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewTagged(TranslatedState* container, + Object literal) { + TranslatedValue slot(container, kTagged); + slot.raw_literal_ = literal; + return slot; +} + +// static +TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) { + return TranslatedValue(container, kInvalid); +} + +Isolate* TranslatedValue::isolate() const { return container_->isolate(); } + +Object TranslatedValue::raw_literal() const { + DCHECK_EQ(kTagged, kind()); + return raw_literal_; +} + +int32_t TranslatedValue::int32_value() const { + DCHECK_EQ(kInt32, kind()); + return int32_value_; +} + +int64_t TranslatedValue::int64_value() const { + DCHECK_EQ(kInt64, kind()); + return int64_value_; +} + +uint32_t TranslatedValue::uint32_value() const { + DCHECK(kind() == kUInt32 || kind() == kBoolBit); + return uint32_value_; +} + +Float32 TranslatedValue::float_value() const { + DCHECK_EQ(kFloat, kind()); + return float_value_; +} + +Float64 TranslatedValue::double_value() const { + DCHECK_EQ(kDouble, kind()); + return double_value_; +} + +int TranslatedValue::object_length() const { + DCHECK_EQ(kind(), kCapturedObject); + return materialization_info_.length_; +} + +int TranslatedValue::object_index() const { + DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject); + return materialization_info_.id_; +} + +Object TranslatedValue::GetRawValue() const { + // If we have a value, return it. + if (materialization_state() == kFinished) { + return *storage_; + } + + // Otherwise, do a best effort to get the value without allocation. + switch (kind()) { + case kTagged: + return raw_literal(); + + case kInt32: { + bool is_smi = Smi::IsValid(int32_value()); + if (is_smi) { + return Smi::FromInt(int32_value()); + } + break; + } + + case kInt64: { + bool is_smi = (int64_value() >= static_cast<int64_t>(Smi::kMinValue) && + int64_value() <= static_cast<int64_t>(Smi::kMaxValue)); + if (is_smi) { + return Smi::FromIntptr(static_cast<intptr_t>(int64_value())); + } + break; + } + + case kUInt32: { + bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue)); + if (is_smi) { + return Smi::FromInt(static_cast<int32_t>(uint32_value())); + } + break; + } + + case kBoolBit: { + if (uint32_value() == 0) { + return ReadOnlyRoots(isolate()).false_value(); + } else { + CHECK_EQ(1U, uint32_value()); + return ReadOnlyRoots(isolate()).true_value(); + } + } + + default: + break; + } + + // If we could not get the value without allocation, return the arguments + // marker. + return ReadOnlyRoots(isolate()).arguments_marker(); +} + +void TranslatedValue::set_initialized_storage(Handle<Object> storage) { + DCHECK_EQ(kUninitialized, materialization_state()); + storage_ = storage; + materialization_state_ = kFinished; +} + +Handle<Object> TranslatedValue::GetValue() { + // If we already have a value, then get it. + if (materialization_state() == kFinished) return storage_; + + // Otherwise we have to materialize. + switch (kind()) { + case TranslatedValue::kTagged: + case TranslatedValue::kInt32: + case TranslatedValue::kInt64: + case TranslatedValue::kUInt32: + case TranslatedValue::kBoolBit: + case TranslatedValue::kFloat: + case TranslatedValue::kDouble: { + MaterializeSimple(); + return storage_; + } + + case TranslatedValue::kCapturedObject: + case TranslatedValue::kDuplicatedObject: { + // We need to materialize the object (or possibly even object graphs). + // To make the object verifier happy, we materialize in two steps. + + // 1. Allocate storage for reachable objects. This makes sure that for + // each object we have allocated space on heap. The space will be + // a byte array that will be later initialized, or a fully + // initialized object if it is safe to allocate one that will + // pass the verifier. + container_->EnsureObjectAllocatedAt(this); + + // 2. Initialize the objects. If we have allocated only byte arrays + // for some objects, we now overwrite the byte arrays with the + // correct object fields. Note that this phase does not allocate + // any new objects, so it does not trigger the object verifier. + return container_->InitializeObjectAt(this); + } + + case TranslatedValue::kInvalid: + FATAL("unexpected case"); + return Handle<Object>::null(); + } + + FATAL("internal error: value missing"); + return Handle<Object>::null(); +} + +void TranslatedValue::MaterializeSimple() { + // If we already have materialized, return. + if (materialization_state() == kFinished) return; + + Object raw_value = GetRawValue(); + if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) { + // We can get the value without allocation, just return it here. + set_initialized_storage(Handle<Object>(raw_value, isolate())); + return; + } + + switch (kind()) { + case kInt32: + set_initialized_storage( + Handle<Object>(isolate()->factory()->NewNumber(int32_value()))); + return; + + case kInt64: + set_initialized_storage(Handle<Object>( + isolate()->factory()->NewNumber(static_cast<double>(int64_value())))); + return; + + case kUInt32: + set_initialized_storage( + Handle<Object>(isolate()->factory()->NewNumber(uint32_value()))); + return; + + case kFloat: { + double scalar_value = float_value().get_scalar(); + set_initialized_storage( + Handle<Object>(isolate()->factory()->NewNumber(scalar_value))); + return; + } + + case kDouble: { + double scalar_value = double_value().get_scalar(); + set_initialized_storage( + Handle<Object>(isolate()->factory()->NewNumber(scalar_value))); + return; + } + + case kCapturedObject: + case kDuplicatedObject: + case kInvalid: + case kTagged: + case kBoolBit: + FATAL("internal error: unexpected materialization."); + break; + } +} + +bool TranslatedValue::IsMaterializedObject() const { + switch (kind()) { + case kCapturedObject: + case kDuplicatedObject: + return true; + default: + return false; + } +} + +bool TranslatedValue::IsMaterializableByDebugger() const { + // At the moment, we only allow materialization of doubles. + return (kind() == kDouble); +} + +int TranslatedValue::GetChildrenCount() const { + if (kind() == kCapturedObject) { + return object_length(); + } else { + return 0; + } +} + +uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) { +#if V8_TARGET_ARCH_32_BIT + return ReadUnalignedValue<uint64_t>(fp + slot_offset); +#else + return Memory<uint64_t>(fp + slot_offset); +#endif +} + +uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) { + Address address = fp + slot_offset; +#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT + return Memory<uint32_t>(address + kIntSize); +#else + return Memory<uint32_t>(address); +#endif +} + +Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) { +#if !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 + return Float32::FromBits(GetUInt32Slot(fp, slot_offset)); +#else + return Float32::FromBits(Memory<uint32_t>(fp + slot_offset)); +#endif +} + +Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) { + return Float64::FromBits(GetUInt64Slot(fp, slot_offset)); +} + +void TranslatedValue::Handlify() { + if (kind() == kTagged) { + set_initialized_storage(Handle<Object>(raw_literal(), isolate())); + raw_literal_ = Object(); + } +} + +TranslatedFrame TranslatedFrame::InterpretedFrame( + BailoutId bytecode_offset, SharedFunctionInfo shared_info, int height, + int return_value_offset, int return_value_count) { + TranslatedFrame frame(kInterpretedFunction, shared_info, height, + return_value_offset, return_value_count); + frame.node_id_ = bytecode_offset; + return frame; +} + +TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame( + SharedFunctionInfo shared_info, int height) { + return TranslatedFrame(kArgumentsAdaptor, shared_info, height); +} + +TranslatedFrame TranslatedFrame::ConstructStubFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { + TranslatedFrame frame(kConstructStub, shared_info, height); + frame.node_id_ = bailout_id; + return frame; +} + +TranslatedFrame TranslatedFrame::BuiltinContinuationFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { + TranslatedFrame frame(kBuiltinContinuation, shared_info, height); + frame.node_id_ = bailout_id; + return frame; +} + +TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { + TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height); + frame.node_id_ = bailout_id; + return frame; +} + +TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height) { + TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info, + height); + frame.node_id_ = bailout_id; + return frame; +} + +int TranslatedFrame::GetValueCount() { + switch (kind()) { + case kInterpretedFunction: { + int parameter_count = + raw_shared_info_.internal_formal_parameter_count() + 1; + // + 2 for function and context. + return height_ + parameter_count + 2; + } + + case kArgumentsAdaptor: + case kConstructStub: + case kBuiltinContinuation: + case kJavaScriptBuiltinContinuation: + case kJavaScriptBuiltinContinuationWithCatch: + return 1 + height_; + + case kInvalid: + UNREACHABLE(); + } + UNREACHABLE(); +} + +void TranslatedFrame::Handlify() { + if (!raw_shared_info_.is_null()) { + shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_, + raw_shared_info_.GetIsolate()); + raw_shared_info_ = SharedFunctionInfo(); + } + for (auto& value : values_) { + value.Handlify(); + } +} + +TranslatedFrame TranslatedState::CreateNextTranslatedFrame( + TranslationIterator* iterator, FixedArray literal_array, Address fp, + FILE* trace_file) { + Translation::Opcode opcode = + static_cast<Translation::Opcode>(iterator->Next()); + switch (opcode) { + case Translation::INTERPRETED_FRAME: { + BailoutId bytecode_offset = BailoutId(iterator->Next()); + SharedFunctionInfo shared_info = + SharedFunctionInfo::cast(literal_array.get(iterator->Next())); + int height = iterator->Next(); + int return_value_offset = iterator->Next(); + int return_value_count = iterator->Next(); + if (trace_file != nullptr) { + std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); + PrintF(trace_file, " reading input frame %s", name.get()); + int arg_count = shared_info.internal_formal_parameter_count() + 1; + PrintF(trace_file, + " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); " + "inputs:\n", + bytecode_offset.ToInt(), arg_count, height, return_value_offset, + return_value_count); + } + return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info, + height, return_value_offset, + return_value_count); + } + + case Translation::ARGUMENTS_ADAPTOR_FRAME: { + SharedFunctionInfo shared_info = + SharedFunctionInfo::cast(literal_array.get(iterator->Next())); + int height = iterator->Next(); + if (trace_file != nullptr) { + std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); + PrintF(trace_file, " reading arguments adaptor frame %s", name.get()); + PrintF(trace_file, " => height=%d; inputs:\n", height); + } + return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height); + } + + case Translation::CONSTRUCT_STUB_FRAME: { + BailoutId bailout_id = BailoutId(iterator->Next()); + SharedFunctionInfo shared_info = + SharedFunctionInfo::cast(literal_array.get(iterator->Next())); + int height = iterator->Next(); + if (trace_file != nullptr) { + std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); + PrintF(trace_file, " reading construct stub frame %s", name.get()); + PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", + bailout_id.ToInt(), height); + } + return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info, + height); + } + + case Translation::BUILTIN_CONTINUATION_FRAME: { + BailoutId bailout_id = BailoutId(iterator->Next()); + SharedFunctionInfo shared_info = + SharedFunctionInfo::cast(literal_array.get(iterator->Next())); + int height = iterator->Next(); + if (trace_file != nullptr) { + std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); + PrintF(trace_file, " reading builtin continuation frame %s", + name.get()); + PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", + bailout_id.ToInt(), height); + } + // Add one to the height to account for the context which was implicitly + // added to the translation during code generation. + int height_with_context = height + 1; + return TranslatedFrame::BuiltinContinuationFrame(bailout_id, shared_info, + height_with_context); + } + + case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: { + BailoutId bailout_id = BailoutId(iterator->Next()); + SharedFunctionInfo shared_info = + SharedFunctionInfo::cast(literal_array.get(iterator->Next())); + int height = iterator->Next(); + if (trace_file != nullptr) { + std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); + PrintF(trace_file, " reading JavaScript builtin continuation frame %s", + name.get()); + PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", + bailout_id.ToInt(), height); + } + // Add one to the height to account for the context which was implicitly + // added to the translation during code generation. + int height_with_context = height + 1; + return TranslatedFrame::JavaScriptBuiltinContinuationFrame( + bailout_id, shared_info, height_with_context); + } + case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: { + BailoutId bailout_id = BailoutId(iterator->Next()); + SharedFunctionInfo shared_info = + SharedFunctionInfo::cast(literal_array.get(iterator->Next())); + int height = iterator->Next(); + if (trace_file != nullptr) { + std::unique_ptr<char[]> name = shared_info.DebugName().ToCString(); + PrintF(trace_file, + " reading JavaScript builtin continuation frame with catch %s", + name.get()); + PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n", + bailout_id.ToInt(), height); + } + // Add one to the height to account for the context which was implicitly + // added to the translation during code generation. + int height_with_context = height + 1; + return TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame( + bailout_id, shared_info, height_with_context); + } + case Translation::UPDATE_FEEDBACK: + case Translation::BEGIN: + case Translation::DUPLICATED_OBJECT: + case Translation::ARGUMENTS_ELEMENTS: + case Translation::ARGUMENTS_LENGTH: + case Translation::CAPTURED_OBJECT: + case Translation::REGISTER: + case Translation::INT32_REGISTER: + case Translation::INT64_REGISTER: + case Translation::UINT32_REGISTER: + case Translation::BOOL_REGISTER: + case Translation::FLOAT_REGISTER: + case Translation::DOUBLE_REGISTER: + case Translation::STACK_SLOT: + case Translation::INT32_STACK_SLOT: + case Translation::INT64_STACK_SLOT: + case Translation::UINT32_STACK_SLOT: + case Translation::BOOL_STACK_SLOT: + case Translation::FLOAT_STACK_SLOT: + case Translation::DOUBLE_STACK_SLOT: + case Translation::LITERAL: + break; + } + FATAL("We should never get here - unexpected deopt info."); + return TranslatedFrame::InvalidFrame(); +} + +// static +void TranslatedFrame::AdvanceIterator( + std::deque<TranslatedValue>::iterator* iter) { + int values_to_skip = 1; + while (values_to_skip > 0) { + // Consume the current element. + values_to_skip--; + // Add all the children. + values_to_skip += (*iter)->GetChildrenCount(); + + (*iter)++; + } +} + +Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer, + CreateArgumentsType type, + int* length) { + Address parent_frame_pointer = *reinterpret_cast<Address*>( + input_frame_pointer + StandardFrameConstants::kCallerFPOffset); + intptr_t parent_frame_type = Memory<intptr_t>( + parent_frame_pointer + CommonFrameConstants::kContextOrFrameTypeOffset); + + Address arguments_frame; + if (parent_frame_type == + StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)) { + if (length) + *length = Smi::cast(*FullObjectSlot( + parent_frame_pointer + + ArgumentsAdaptorFrameConstants::kLengthOffset)) + .value(); + arguments_frame = parent_frame_pointer; + } else { + if (length) *length = formal_parameter_count_; + arguments_frame = input_frame_pointer; + } + + if (type == CreateArgumentsType::kRestParameter) { + // If the actual number of arguments is less than the number of formal + // parameters, we have zero rest parameters. + if (length) *length = std::max(0, *length - formal_parameter_count_); + } + + return arguments_frame; +} + +// Creates translated values for an arguments backing store, or the backing +// store for rest parameters depending on the given {type}. The TranslatedValue +// objects for the fields are not read from the TranslationIterator, but instead +// created on-the-fly based on dynamic information in the optimized frame. +void TranslatedState::CreateArgumentsElementsTranslatedValues( + int frame_index, Address input_frame_pointer, CreateArgumentsType type, + FILE* trace_file) { + TranslatedFrame& frame = frames_[frame_index]; + + int length; + Address arguments_frame = + ComputeArgumentsPosition(input_frame_pointer, type, &length); + + int object_index = static_cast<int>(object_positions_.size()); + int value_index = static_cast<int>(frame.values_.size()); + if (trace_file != nullptr) { + PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)", + object_index, static_cast<uint8_t>(type), length); + } + + object_positions_.push_back({frame_index, value_index}); + frame.Add(TranslatedValue::NewDeferredObject( + this, length + FixedArray::kHeaderSize / kTaggedSize, object_index)); + + ReadOnlyRoots roots(isolate_); + frame.Add(TranslatedValue::NewTagged(this, roots.fixed_array_map())); + frame.Add(TranslatedValue::NewInt32(this, length)); + + int number_of_holes = 0; + if (type == CreateArgumentsType::kMappedArguments) { + // If the actual number of arguments is less than the number of formal + // parameters, we have fewer holes to fill to not overshoot the length. + number_of_holes = Min(formal_parameter_count_, length); + } + for (int i = 0; i < number_of_holes; ++i) { + frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value())); + } + for (int i = length - number_of_holes - 1; i >= 0; --i) { + Address argument_slot = arguments_frame + + CommonFrameConstants::kFixedFrameSizeAboveFp + + i * kSystemPointerSize; + frame.Add(TranslatedValue::NewTagged(this, *FullObjectSlot(argument_slot))); + } +} + +// We can't intermix stack decoding and allocations because the deoptimization +// infrastracture is not GC safe. +// Thus we build a temporary structure in malloced space. +// The TranslatedValue objects created correspond to the static translation +// instructions from the TranslationIterator, except for +// Translation::ARGUMENTS_ELEMENTS, where the number and values of the +// FixedArray elements depend on dynamic information from the optimized frame. +// Returns the number of expected nested translations from the +// TranslationIterator. +int TranslatedState::CreateNextTranslatedValue( + int frame_index, TranslationIterator* iterator, FixedArray literal_array, + Address fp, RegisterValues* registers, FILE* trace_file) { + disasm::NameConverter converter; + + TranslatedFrame& frame = frames_[frame_index]; + int value_index = static_cast<int>(frame.values_.size()); + + Translation::Opcode opcode = + static_cast<Translation::Opcode>(iterator->Next()); + switch (opcode) { + case Translation::BEGIN: + case Translation::INTERPRETED_FRAME: + case Translation::ARGUMENTS_ADAPTOR_FRAME: + case Translation::CONSTRUCT_STUB_FRAME: + case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: + case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: + case Translation::BUILTIN_CONTINUATION_FRAME: + case Translation::UPDATE_FEEDBACK: + // Peeled off before getting here. + break; + + case Translation::DUPLICATED_OBJECT: { + int object_id = iterator->Next(); + if (trace_file != nullptr) { + PrintF(trace_file, "duplicated object #%d", object_id); + } + object_positions_.push_back(object_positions_[object_id]); + TranslatedValue translated_value = + TranslatedValue::NewDuplicateObject(this, object_id); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::ARGUMENTS_ELEMENTS: { + CreateArgumentsType arguments_type = + static_cast<CreateArgumentsType>(iterator->Next()); + CreateArgumentsElementsTranslatedValues(frame_index, fp, arguments_type, + trace_file); + return 0; + } + + case Translation::ARGUMENTS_LENGTH: { + CreateArgumentsType arguments_type = + static_cast<CreateArgumentsType>(iterator->Next()); + int length; + ComputeArgumentsPosition(fp, arguments_type, &length); + if (trace_file != nullptr) { + PrintF(trace_file, "arguments length field (type = %d, length = %d)", + static_cast<uint8_t>(arguments_type), length); + } + frame.Add(TranslatedValue::NewInt32(this, length)); + return 0; + } + + case Translation::CAPTURED_OBJECT: { + int field_count = iterator->Next(); + int object_index = static_cast<int>(object_positions_.size()); + if (trace_file != nullptr) { + PrintF(trace_file, "captured object #%d (length = %d)", object_index, + field_count); + } + object_positions_.push_back({frame_index, value_index}); + TranslatedValue translated_value = + TranslatedValue::NewDeferredObject(this, field_count, object_index); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + intptr_t value = registers->GetRegister(input_reg); +#if defined(V8_COMPRESS_POINTERS) + Address uncompressed_value = DecompressTaggedAny( + isolate()->isolate_root(), static_cast<uint32_t>(value)); +#else + Address uncompressed_value = value; +#endif + if (trace_file != nullptr) { + PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value, + converter.NameOfCPURegister(input_reg)); + Object(uncompressed_value).ShortPrint(trace_file); + } + TranslatedValue translated_value = + TranslatedValue::NewTagged(this, Object(uncompressed_value)); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::INT32_REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + intptr_t value = registers->GetRegister(input_reg); + if (trace_file != nullptr) { + PrintF(trace_file, "%" V8PRIdPTR " ; %s (int32)", value, + converter.NameOfCPURegister(input_reg)); + } + TranslatedValue translated_value = + TranslatedValue::NewInt32(this, static_cast<int32_t>(value)); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::INT64_REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + intptr_t value = registers->GetRegister(input_reg); + if (trace_file != nullptr) { + PrintF(trace_file, "%" V8PRIdPTR " ; %s (int64)", value, + converter.NameOfCPURegister(input_reg)); + } + TranslatedValue translated_value = + TranslatedValue::NewInt64(this, static_cast<int64_t>(value)); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::UINT32_REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + intptr_t value = registers->GetRegister(input_reg); + if (trace_file != nullptr) { + PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint32)", value, + converter.NameOfCPURegister(input_reg)); + } + TranslatedValue translated_value = + TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value)); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::BOOL_REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + intptr_t value = registers->GetRegister(input_reg); + if (trace_file != nullptr) { + PrintF(trace_file, "%" V8PRIdPTR " ; %s (bool)", value, + converter.NameOfCPURegister(input_reg)); + } + TranslatedValue translated_value = + TranslatedValue::NewBool(this, static_cast<uint32_t>(value)); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::FLOAT_REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + Float32 value = registers->GetFloatRegister(input_reg); + if (trace_file != nullptr) { + PrintF(trace_file, "%e ; %s (float)", value.get_scalar(), + RegisterName(FloatRegister::from_code(input_reg))); + } + TranslatedValue translated_value = TranslatedValue::NewFloat(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::DOUBLE_REGISTER: { + int input_reg = iterator->Next(); + if (registers == nullptr) { + TranslatedValue translated_value = TranslatedValue::NewInvalid(this); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + Float64 value = registers->GetDoubleRegister(input_reg); + if (trace_file != nullptr) { + PrintF(trace_file, "%e ; %s (double)", value.get_scalar(), + RegisterName(DoubleRegister::from_code(input_reg))); + } + TranslatedValue translated_value = + TranslatedValue::NewDouble(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset)); +#if defined(V8_COMPRESS_POINTERS) + Address uncompressed_value = DecompressTaggedAny( + isolate()->isolate_root(), static_cast<uint32_t>(value)); +#else + Address uncompressed_value = value; +#endif + if (trace_file != nullptr) { + PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ", + uncompressed_value, slot_offset < 0 ? '-' : '+', + std::abs(slot_offset)); + Object(uncompressed_value).ShortPrint(trace_file); + } + TranslatedValue translated_value = + TranslatedValue::NewTagged(this, Object(uncompressed_value)); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::INT32_STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + uint32_t value = GetUInt32Slot(fp, slot_offset); + if (trace_file != nullptr) { + PrintF(trace_file, "%d ; (int32) [fp %c %3d] ", + static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+', + std::abs(slot_offset)); + } + TranslatedValue translated_value = TranslatedValue::NewInt32(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::INT64_STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + uint64_t value = GetUInt64Slot(fp, slot_offset); + if (trace_file != nullptr) { + PrintF(trace_file, "%" V8PRIdPTR " ; (int64) [fp %c %3d] ", + static_cast<intptr_t>(value), slot_offset < 0 ? '-' : '+', + std::abs(slot_offset)); + } + TranslatedValue translated_value = TranslatedValue::NewInt64(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::UINT32_STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + uint32_t value = GetUInt32Slot(fp, slot_offset); + if (trace_file != nullptr) { + PrintF(trace_file, "%u ; (uint32) [fp %c %3d] ", value, + slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); + } + TranslatedValue translated_value = + TranslatedValue::NewUInt32(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::BOOL_STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + uint32_t value = GetUInt32Slot(fp, slot_offset); + if (trace_file != nullptr) { + PrintF(trace_file, "%u ; (bool) [fp %c %3d] ", value, + slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); + } + TranslatedValue translated_value = TranslatedValue::NewBool(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::FLOAT_STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + Float32 value = GetFloatSlot(fp, slot_offset); + if (trace_file != nullptr) { + PrintF(trace_file, "%e ; (float) [fp %c %3d] ", value.get_scalar(), + slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); + } + TranslatedValue translated_value = TranslatedValue::NewFloat(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::DOUBLE_STACK_SLOT: { + int slot_offset = + OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next()); + Float64 value = GetDoubleSlot(fp, slot_offset); + if (trace_file != nullptr) { + PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(), + slot_offset < 0 ? '-' : '+', std::abs(slot_offset)); + } + TranslatedValue translated_value = + TranslatedValue::NewDouble(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + + case Translation::LITERAL: { + int literal_index = iterator->Next(); + Object value = literal_array.get(literal_index); + if (trace_file != nullptr) { + PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value.ptr(), + literal_index); + value.ShortPrint(trace_file); + } + + TranslatedValue translated_value = + TranslatedValue::NewTagged(this, value); + frame.Add(translated_value); + return translated_value.GetChildrenCount(); + } + } + + FATAL("We should never get here - unexpected deopt info."); +} + +TranslatedState::TranslatedState(const JavaScriptFrame* frame) { + int deopt_index = Safepoint::kNoDeoptimizationIndex; + DeoptimizationData data = + static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData( + &deopt_index); + DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex); + TranslationIterator it(data.TranslationByteArray(), + data.TranslationIndex(deopt_index).value()); + Init(frame->isolate(), frame->fp(), &it, data.LiteralArray(), + nullptr /* registers */, nullptr /* trace file */, + frame->function().shared().internal_formal_parameter_count()); +} + +void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer, + TranslationIterator* iterator, + FixedArray literal_array, RegisterValues* registers, + FILE* trace_file, int formal_parameter_count) { + DCHECK(frames_.empty()); + + formal_parameter_count_ = formal_parameter_count; + isolate_ = isolate; + + // Read out the 'header' translation. + Translation::Opcode opcode = + static_cast<Translation::Opcode>(iterator->Next()); + CHECK(opcode == Translation::BEGIN); + + int count = iterator->Next(); + frames_.reserve(count); + iterator->Next(); // Drop JS frames count. + int update_feedback_count = iterator->Next(); + CHECK_GE(update_feedback_count, 0); + CHECK_LE(update_feedback_count, 1); + + if (update_feedback_count == 1) { + ReadUpdateFeedback(iterator, literal_array, trace_file); + } + + std::stack<int> nested_counts; + + // Read the frames + for (int frame_index = 0; frame_index < count; frame_index++) { + // Read the frame descriptor. + frames_.push_back(CreateNextTranslatedFrame( + iterator, literal_array, input_frame_pointer, trace_file)); + TranslatedFrame& frame = frames_.back(); + + // Read the values. + int values_to_process = frame.GetValueCount(); + while (values_to_process > 0 || !nested_counts.empty()) { + if (trace_file != nullptr) { + if (nested_counts.empty()) { + // For top level values, print the value number. + PrintF(trace_file, + " %3i: ", frame.GetValueCount() - values_to_process); + } else { + // Take care of indenting for nested values. + PrintF(trace_file, " "); + for (size_t j = 0; j < nested_counts.size(); j++) { + PrintF(trace_file, " "); + } + } + } + + int nested_count = + CreateNextTranslatedValue(frame_index, iterator, literal_array, + input_frame_pointer, registers, trace_file); + + if (trace_file != nullptr) { + PrintF(trace_file, "\n"); + } + + // Update the value count and resolve the nesting. + values_to_process--; + if (nested_count > 0) { + nested_counts.push(values_to_process); + values_to_process = nested_count; + } else { + while (values_to_process == 0 && !nested_counts.empty()) { + values_to_process = nested_counts.top(); + nested_counts.pop(); + } + } + } + } + + CHECK(!iterator->HasNext() || static_cast<Translation::Opcode>( + iterator->Next()) == Translation::BEGIN); +} + +void TranslatedState::Prepare(Address stack_frame_pointer) { + for (auto& frame : frames_) frame.Handlify(); + + if (!feedback_vector_.is_null()) { + feedback_vector_handle_ = + Handle<FeedbackVector>(feedback_vector_, isolate()); + feedback_vector_ = FeedbackVector(); + } + stack_frame_pointer_ = stack_frame_pointer; + + UpdateFromPreviouslyMaterializedObjects(); +} + +TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) { + CHECK_LT(static_cast<size_t>(object_index), object_positions_.size()); + TranslatedState::ObjectPosition pos = object_positions_[object_index]; + return &(frames_[pos.frame_index_].values_[pos.value_index_]); +} + +Handle<Object> TranslatedState::InitializeObjectAt(TranslatedValue* slot) { + slot = ResolveCapturedObject(slot); + + DisallowHeapAllocation no_allocation; + if (slot->materialization_state() != TranslatedValue::kFinished) { + std::stack<int> worklist; + worklist.push(slot->object_index()); + slot->mark_finished(); + + while (!worklist.empty()) { + int index = worklist.top(); + worklist.pop(); + InitializeCapturedObjectAt(index, &worklist, no_allocation); + } + } + return slot->GetStorage(); +} + +void TranslatedState::InitializeCapturedObjectAt( + int object_index, std::stack<int>* worklist, + const DisallowHeapAllocation& no_allocation) { + CHECK_LT(static_cast<size_t>(object_index), object_positions_.size()); + TranslatedState::ObjectPosition pos = object_positions_[object_index]; + int value_index = pos.value_index_; + + TranslatedFrame* frame = &(frames_[pos.frame_index_]); + TranslatedValue* slot = &(frame->values_[value_index]); + value_index++; + + CHECK_EQ(TranslatedValue::kFinished, slot->materialization_state()); + CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); + + // Ensure all fields are initialized. + int children_init_index = value_index; + for (int i = 0; i < slot->GetChildrenCount(); i++) { + // If the field is an object that has not been initialized yet, queue it + // for initialization (and mark it as such). + TranslatedValue* child_slot = frame->ValueAt(children_init_index); + if (child_slot->kind() == TranslatedValue::kCapturedObject || + child_slot->kind() == TranslatedValue::kDuplicatedObject) { + child_slot = ResolveCapturedObject(child_slot); + if (child_slot->materialization_state() != TranslatedValue::kFinished) { + DCHECK_EQ(TranslatedValue::kAllocated, + child_slot->materialization_state()); + worklist->push(child_slot->object_index()); + child_slot->mark_finished(); + } + } + SkipSlots(1, frame, &children_init_index); + } + + // Read the map. + // The map should never be materialized, so let us check we already have + // an existing object here. + CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged); + Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue()); + CHECK(map->IsMap()); + value_index++; + + // Handle the special cases. + switch (map->instance_type()) { + case MUTABLE_HEAP_NUMBER_TYPE: + case FIXED_DOUBLE_ARRAY_TYPE: + return; + + case FIXED_ARRAY_TYPE: + case AWAIT_CONTEXT_TYPE: + case BLOCK_CONTEXT_TYPE: + case CATCH_CONTEXT_TYPE: + case DEBUG_EVALUATE_CONTEXT_TYPE: + case EVAL_CONTEXT_TYPE: + case FUNCTION_CONTEXT_TYPE: + case MODULE_CONTEXT_TYPE: + case NATIVE_CONTEXT_TYPE: + case SCRIPT_CONTEXT_TYPE: + case WITH_CONTEXT_TYPE: + case OBJECT_BOILERPLATE_DESCRIPTION_TYPE: + case HASH_TABLE_TYPE: + case ORDERED_HASH_MAP_TYPE: + case ORDERED_HASH_SET_TYPE: + case NAME_DICTIONARY_TYPE: + case GLOBAL_DICTIONARY_TYPE: + case NUMBER_DICTIONARY_TYPE: + case SIMPLE_NUMBER_DICTIONARY_TYPE: + case STRING_TABLE_TYPE: + case PROPERTY_ARRAY_TYPE: + case SCRIPT_CONTEXT_TABLE_TYPE: + InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map, + no_allocation); + break; + + default: + CHECK(map->IsJSObjectMap()); + InitializeJSObjectAt(frame, &value_index, slot, map, no_allocation); + break; + } + CHECK_EQ(value_index, children_init_index); +} + +void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) { + slot = ResolveCapturedObject(slot); + + if (slot->materialization_state() == TranslatedValue::kUninitialized) { + std::stack<int> worklist; + worklist.push(slot->object_index()); + slot->mark_allocated(); + + while (!worklist.empty()) { + int index = worklist.top(); + worklist.pop(); + EnsureCapturedObjectAllocatedAt(index, &worklist); + } + } +} + +void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame, + int* value_index, + TranslatedValue* slot, + Handle<Map> map) { + int length = Smi::cast(frame->values_[*value_index].GetRawValue()).value(); + (*value_index)++; + Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast( + isolate()->factory()->NewFixedDoubleArray(length)); + CHECK_GT(length, 0); + for (int i = 0; i < length; i++) { + CHECK_NE(TranslatedValue::kCapturedObject, + frame->values_[*value_index].kind()); + Handle<Object> value = frame->values_[*value_index].GetValue(); + if (value->IsNumber()) { + array->set(i, value->Number()); + } else { + CHECK(value.is_identical_to(isolate()->factory()->the_hole_value())); + array->set_the_hole(isolate(), i); + } + (*value_index)++; + } + slot->set_storage(array); +} + +void TranslatedState::MaterializeMutableHeapNumber(TranslatedFrame* frame, + int* value_index, + TranslatedValue* slot) { + CHECK_NE(TranslatedValue::kCapturedObject, + frame->values_[*value_index].kind()); + Handle<Object> value = frame->values_[*value_index].GetValue(); + CHECK(value->IsNumber()); + Handle<MutableHeapNumber> box = + isolate()->factory()->NewMutableHeapNumber(value->Number()); + (*value_index)++; + slot->set_storage(box); +} + +namespace { + +enum DoubleStorageKind : uint8_t { + kStoreTagged, + kStoreUnboxedDouble, + kStoreMutableHeapNumber, +}; + +} // namespace + +void TranslatedState::SkipSlots(int slots_to_skip, TranslatedFrame* frame, + int* value_index) { + while (slots_to_skip > 0) { + TranslatedValue* slot = &(frame->values_[*value_index]); + (*value_index)++; + slots_to_skip--; + + if (slot->kind() == TranslatedValue::kCapturedObject) { + slots_to_skip += slot->GetChildrenCount(); + } + } +} + +void TranslatedState::EnsureCapturedObjectAllocatedAt( + int object_index, std::stack<int>* worklist) { + CHECK_LT(static_cast<size_t>(object_index), object_positions_.size()); + TranslatedState::ObjectPosition pos = object_positions_[object_index]; + int value_index = pos.value_index_; + + TranslatedFrame* frame = &(frames_[pos.frame_index_]); + TranslatedValue* slot = &(frame->values_[value_index]); + value_index++; + + CHECK_EQ(TranslatedValue::kAllocated, slot->materialization_state()); + CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); + + // Read the map. + // The map should never be materialized, so let us check we already have + // an existing object here. + CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged); + Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue()); + CHECK(map->IsMap()); + value_index++; + + // Handle the special cases. + switch (map->instance_type()) { + case FIXED_DOUBLE_ARRAY_TYPE: + // Materialize (i.e. allocate&initialize) the array and return since + // there is no need to process the children. + return MaterializeFixedDoubleArray(frame, &value_index, slot, map); + + case MUTABLE_HEAP_NUMBER_TYPE: + // Materialize (i.e. allocate&initialize) the heap number and return. + // There is no need to process the children. + return MaterializeMutableHeapNumber(frame, &value_index, slot); + + case FIXED_ARRAY_TYPE: + case SCRIPT_CONTEXT_TABLE_TYPE: + case AWAIT_CONTEXT_TYPE: + case BLOCK_CONTEXT_TYPE: + case CATCH_CONTEXT_TYPE: + case DEBUG_EVALUATE_CONTEXT_TYPE: + case EVAL_CONTEXT_TYPE: + case FUNCTION_CONTEXT_TYPE: + case MODULE_CONTEXT_TYPE: + case NATIVE_CONTEXT_TYPE: + case SCRIPT_CONTEXT_TYPE: + case WITH_CONTEXT_TYPE: + case HASH_TABLE_TYPE: + case ORDERED_HASH_MAP_TYPE: + case ORDERED_HASH_SET_TYPE: + case NAME_DICTIONARY_TYPE: + case GLOBAL_DICTIONARY_TYPE: + case NUMBER_DICTIONARY_TYPE: + case SIMPLE_NUMBER_DICTIONARY_TYPE: + case STRING_TABLE_TYPE: { + // Check we have the right size. + int array_length = + Smi::cast(frame->values_[value_index].GetRawValue()).value(); + + int instance_size = FixedArray::SizeFor(array_length); + CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize); + + // Canonicalize empty fixed array. + if (*map == ReadOnlyRoots(isolate()).empty_fixed_array().map() && + array_length == 0) { + slot->set_storage(isolate()->factory()->empty_fixed_array()); + } else { + slot->set_storage(AllocateStorageFor(slot)); + } + + // Make sure all the remaining children (after the map) are allocated. + return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame, + &value_index, worklist); + } + + case PROPERTY_ARRAY_TYPE: { + // Check we have the right size. + int length_or_hash = + Smi::cast(frame->values_[value_index].GetRawValue()).value(); + int array_length = PropertyArray::LengthField::decode(length_or_hash); + int instance_size = PropertyArray::SizeFor(array_length); + CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize); + + slot->set_storage(AllocateStorageFor(slot)); + // Make sure all the remaining children (after the map) are allocated. + return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame, + &value_index, worklist); + } + + default: + CHECK(map->IsJSObjectMap()); + EnsureJSObjectAllocated(slot, map); + TranslatedValue* properties_slot = &(frame->values_[value_index]); + value_index++; + if (properties_slot->kind() == TranslatedValue::kCapturedObject) { + // If we are materializing the property array, make sure we put + // the mutable heap numbers at the right places. + EnsurePropertiesAllocatedAndMarked(properties_slot, map); + EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame, + &value_index, worklist); + } + // Make sure all the remaining children (after the map and properties) are + // allocated. + return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame, + &value_index, worklist); + } + UNREACHABLE(); +} + +void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame, + int* value_index, + std::stack<int>* worklist) { + // Ensure all children are allocated. + for (int i = 0; i < count; i++) { + // If the field is an object that has not been allocated yet, queue it + // for initialization (and mark it as such). + TranslatedValue* child_slot = frame->ValueAt(*value_index); + if (child_slot->kind() == TranslatedValue::kCapturedObject || + child_slot->kind() == TranslatedValue::kDuplicatedObject) { + child_slot = ResolveCapturedObject(child_slot); + if (child_slot->materialization_state() == + TranslatedValue::kUninitialized) { + worklist->push(child_slot->object_index()); + child_slot->mark_allocated(); + } + } else { + // Make sure the simple values (heap numbers, etc.) are properly + // initialized. + child_slot->MaterializeSimple(); + } + SkipSlots(1, frame, value_index); + } +} + +void TranslatedState::EnsurePropertiesAllocatedAndMarked( + TranslatedValue* properties_slot, Handle<Map> map) { + CHECK_EQ(TranslatedValue::kUninitialized, + properties_slot->materialization_state()); + + Handle<ByteArray> object_storage = AllocateStorageFor(properties_slot); + properties_slot->mark_allocated(); + properties_slot->set_storage(object_storage); + + // Set markers for the double properties. + Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); + int field_count = map->NumberOfOwnDescriptors(); + for (int i = 0; i < field_count; i++) { + FieldIndex index = FieldIndex::ForDescriptor(*map, i); + if (descriptors->GetDetails(i).representation().IsDouble() && + !index.is_inobject()) { + CHECK(!map->IsUnboxedDoubleField(index)); + int outobject_index = index.outobject_array_index(); + int array_index = outobject_index * kTaggedSize; + object_storage->set(array_index, kStoreMutableHeapNumber); + } + } +} + +Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) { + int allocate_size = + ByteArray::LengthFor(slot->GetChildrenCount() * kTaggedSize); + // It is important to allocate all the objects tenured so that the marker + // does not visit them. + Handle<ByteArray> object_storage = + isolate()->factory()->NewByteArray(allocate_size, AllocationType::kOld); + for (int i = 0; i < object_storage->length(); i++) { + object_storage->set(i, kStoreTagged); + } + return object_storage; +} + +void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot, + Handle<Map> map) { + CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize); + + Handle<ByteArray> object_storage = AllocateStorageFor(slot); + // Now we handle the interesting (JSObject) case. + Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); + int field_count = map->NumberOfOwnDescriptors(); + + // Set markers for the double properties. + for (int i = 0; i < field_count; i++) { + FieldIndex index = FieldIndex::ForDescriptor(*map, i); + if (descriptors->GetDetails(i).representation().IsDouble() && + index.is_inobject()) { + CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize); + int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize; + uint8_t marker = map->IsUnboxedDoubleField(index) + ? kStoreUnboxedDouble + : kStoreMutableHeapNumber; + object_storage->set(array_index, marker); + } + } + slot->set_storage(object_storage); +} + +Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame, + int* value_index) { + TranslatedValue* slot = frame->ValueAt(*value_index); + SkipSlots(1, frame, value_index); + if (slot->kind() == TranslatedValue::kDuplicatedObject) { + slot = ResolveCapturedObject(slot); + } + CHECK_NE(TranslatedValue::kUninitialized, slot->materialization_state()); + return slot->GetStorage(); +} + +void TranslatedState::InitializeJSObjectAt( + TranslatedFrame* frame, int* value_index, TranslatedValue* slot, + Handle<Map> map, const DisallowHeapAllocation& no_allocation) { + Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_); + DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); + + // The object should have at least a map and some payload. + CHECK_GE(slot->GetChildrenCount(), 2); + + // Notify the concurrent marker about the layout change. + isolate()->heap()->NotifyObjectLayoutChange( + *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation); + + // Fill the property array field. + { + Handle<Object> properties = GetValueAndAdvance(frame, value_index); + WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset, + *properties); + WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset, + *properties); + } + + // For all the other fields we first look at the fixed array and check the + // marker to see if we store an unboxed double. + DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset); + for (int i = 2; i < slot->GetChildrenCount(); i++) { + // Initialize and extract the value from its slot. + Handle<Object> field_value = GetValueAndAdvance(frame, value_index); + + // Read out the marker and ensure the field is consistent with + // what the markers in the storage say (note that all heap numbers + // should be fully initialized by now). + int offset = i * kTaggedSize; + uint8_t marker = object_storage->ReadField<uint8_t>(offset); + if (marker == kStoreUnboxedDouble) { + double double_field_value; + if (field_value->IsSmi()) { + double_field_value = Smi::cast(*field_value).value(); + } else { + CHECK(field_value->IsHeapNumber()); + double_field_value = HeapNumber::cast(*field_value).value(); + } + object_storage->WriteField<double>(offset, double_field_value); + } else if (marker == kStoreMutableHeapNumber) { + CHECK(field_value->IsMutableHeapNumber()); + WRITE_FIELD(*object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); + } else { + CHECK_EQ(kStoreTagged, marker); + WRITE_FIELD(*object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); + } + } + object_storage->synchronized_set_map(*map); +} + +void TranslatedState::InitializeObjectWithTaggedFieldsAt( + TranslatedFrame* frame, int* value_index, TranslatedValue* slot, + Handle<Map> map, const DisallowHeapAllocation& no_allocation) { + Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_); + + // Skip the writes if we already have the canonical empty fixed array. + if (*object_storage == ReadOnlyRoots(isolate()).empty_fixed_array()) { + CHECK_EQ(2, slot->GetChildrenCount()); + Handle<Object> length_value = GetValueAndAdvance(frame, value_index); + CHECK_EQ(*length_value, Smi::FromInt(0)); + return; + } + + // Notify the concurrent marker about the layout change. + isolate()->heap()->NotifyObjectLayoutChange( + *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation); + + // Write the fields to the object. + for (int i = 1; i < slot->GetChildrenCount(); i++) { + Handle<Object> field_value = GetValueAndAdvance(frame, value_index); + int offset = i * kTaggedSize; + uint8_t marker = object_storage->ReadField<uint8_t>(offset); + if (i > 1 && marker == kStoreMutableHeapNumber) { + CHECK(field_value->IsMutableHeapNumber()); + } else { + CHECK(marker == kStoreTagged || i == 1); + CHECK(!field_value->IsMutableHeapNumber()); + } + + WRITE_FIELD(*object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); + } + + object_storage->synchronized_set_map(*map); +} + +TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) { + while (slot->kind() == TranslatedValue::kDuplicatedObject) { + slot = GetValueByObjectIndex(slot->object_index()); + } + CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind()); + return slot; +} + +TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) { + for (size_t i = 0; i < frames_.size(); i++) { + if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction || + frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || + frames_[i].kind() == + TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { + if (jsframe_index > 0) { + jsframe_index--; + } else { + return &(frames_[i]); + } + } + } + return nullptr; +} + +TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex( + int jsframe_index, int* args_count) { + for (size_t i = 0; i < frames_.size(); i++) { + if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction || + frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation || + frames_[i].kind() == + TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) { + if (jsframe_index > 0) { + jsframe_index--; + } else { + // We have the JS function frame, now check if it has arguments + // adaptor. + if (i > 0 && + frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) { + *args_count = frames_[i - 1].height(); + return &(frames_[i - 1]); + } + + // JavaScriptBuiltinContinuation frames that are not preceeded by + // a arguments adapter frame are currently only used by C++ API calls + // from TurboFan. Calls to C++ API functions from TurboFan need + // a special marker frame state, otherwise the API call wouldn't + // be shown in a stack trace. + if (frames_[i].kind() == + TranslatedFrame::kJavaScriptBuiltinContinuation && + frames_[i].shared_info()->internal_formal_parameter_count() == + SharedFunctionInfo::kDontAdaptArgumentsSentinel) { + DCHECK(frames_[i].shared_info()->IsApiFunction()); + + // The argument count for this special case is always the second + // to last value in the TranslatedFrame. It should also always be + // {1}, as the GenericLazyDeoptContinuation builtin only has one + // argument (the receiver). + const int height = frames_[i].height(); + Object argc_object = frames_[i].ValueAt(height - 1)->GetRawValue(); + CHECK(argc_object.IsSmi()); + *args_count = Smi::ToInt(argc_object); + + DCHECK_EQ(*args_count, 1); + } else { + *args_count = + frames_[i].shared_info()->internal_formal_parameter_count() + 1; + } + return &(frames_[i]); + } + } + } + return nullptr; +} + +void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) { + MaterializedObjectStore* materialized_store = + isolate_->materialized_object_store(); + Handle<FixedArray> previously_materialized_objects = + materialized_store->Get(stack_frame_pointer_); + + Handle<Object> marker = isolate_->factory()->arguments_marker(); + + int length = static_cast<int>(object_positions_.size()); + bool new_store = false; + if (previously_materialized_objects.is_null()) { + previously_materialized_objects = + isolate_->factory()->NewFixedArray(length, AllocationType::kOld); + for (int i = 0; i < length; i++) { + previously_materialized_objects->set(i, *marker); + } + new_store = true; + } + + CHECK_EQ(length, previously_materialized_objects->length()); + + bool value_changed = false; + for (int i = 0; i < length; i++) { + TranslatedState::ObjectPosition pos = object_positions_[i]; + TranslatedValue* value_info = + &(frames_[pos.frame_index_].values_[pos.value_index_]); + + CHECK(value_info->IsMaterializedObject()); + + // Skip duplicate objects (i.e., those that point to some + // other object id). + if (value_info->object_index() != i) continue; + + Handle<Object> value(value_info->GetRawValue(), isolate_); + + if (!value.is_identical_to(marker)) { + if (previously_materialized_objects->get(i) == *marker) { + previously_materialized_objects->set(i, *value); + value_changed = true; + } else { + CHECK(previously_materialized_objects->get(i) == *value); + } + } + } + if (new_store && value_changed) { + materialized_store->Set(stack_frame_pointer_, + previously_materialized_objects); + CHECK_EQ(frames_[0].kind(), TranslatedFrame::kInterpretedFunction); + CHECK_EQ(frame->function(), frames_[0].front().GetRawValue()); + Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode()); + } +} + +void TranslatedState::UpdateFromPreviouslyMaterializedObjects() { + MaterializedObjectStore* materialized_store = + isolate_->materialized_object_store(); + Handle<FixedArray> previously_materialized_objects = + materialized_store->Get(stack_frame_pointer_); + + // If we have no previously materialized objects, there is nothing to do. + if (previously_materialized_objects.is_null()) return; + + Handle<Object> marker = isolate_->factory()->arguments_marker(); + + int length = static_cast<int>(object_positions_.size()); + CHECK_EQ(length, previously_materialized_objects->length()); + + for (int i = 0; i < length; i++) { + // For a previously materialized objects, inject their value into the + // translated values. + if (previously_materialized_objects->get(i) != *marker) { + TranslatedState::ObjectPosition pos = object_positions_[i]; + TranslatedValue* value_info = + &(frames_[pos.frame_index_].values_[pos.value_index_]); + CHECK(value_info->IsMaterializedObject()); + + if (value_info->kind() == TranslatedValue::kCapturedObject) { + value_info->set_initialized_storage( + Handle<Object>(previously_materialized_objects->get(i), isolate_)); + } + } + } +} + +void TranslatedState::VerifyMaterializedObjects() { +#if VERIFY_HEAP + int length = static_cast<int>(object_positions_.size()); + for (int i = 0; i < length; i++) { + TranslatedValue* slot = GetValueByObjectIndex(i); + if (slot->kind() == TranslatedValue::kCapturedObject) { + CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index())); + if (slot->materialization_state() == TranslatedValue::kFinished) { + slot->GetStorage()->ObjectVerify(isolate()); + } else { + CHECK_EQ(slot->materialization_state(), + TranslatedValue::kUninitialized); + } + } + } +#endif +} + +bool TranslatedState::DoUpdateFeedback() { + if (!feedback_vector_handle_.is_null()) { + CHECK(!feedback_slot_.IsInvalid()); + isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation); + FeedbackNexus nexus(feedback_vector_handle_, feedback_slot_); + nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation); + return true; + } + return false; +} + +void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator, + FixedArray literal_array, + FILE* trace_file) { + CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next()); + feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next())); + feedback_slot_ = FeedbackSlot(iterator->Next()); + if (trace_file != nullptr) { + PrintF(trace_file, " reading FeedbackVector (slot %d)\n", + feedback_slot_.ToInt()); + } +} + +} // namespace internal +} // namespace v8 + +// Undefine the heap manipulation macros. +#include "src/objects/object-macros-undef.h" diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h new file mode 100644 index 0000000000..67e3e54405 --- /dev/null +++ b/deps/v8/src/deoptimizer/deoptimizer.h @@ -0,0 +1,1046 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_DEOPTIMIZER_DEOPTIMIZER_H_ +#define V8_DEOPTIMIZER_DEOPTIMIZER_H_ + +#include <stack> +#include <vector> + +#include "src/base/macros.h" +#include "src/codegen/label.h" +#include "src/codegen/register-arch.h" +#include "src/codegen/source-position.h" +#include "src/common/globals.h" +#include "src/deoptimizer/deoptimize-reason.h" +#include "src/diagnostics/code-tracer.h" +#include "src/execution/frame-constants.h" +#include "src/execution/frames.h" +#include "src/execution/isolate.h" +#include "src/objects/feedback-vector.h" +#include "src/objects/shared-function-info.h" +#include "src/utils/allocation.h" +#include "src/utils/boxed-float.h" +#include "src/zone/zone-chunk-list.h" + +namespace v8 { +namespace internal { + +class FrameDescription; +class TranslationIterator; +class DeoptimizedFrameInfo; +class TranslatedState; +class RegisterValues; +class MacroAssembler; + +class TranslatedValue { + public: + // Allocation-less getter of the value. + // Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary + // to get the value. + Object GetRawValue() const; + + // Getter for the value, takes care of materializing the subgraph + // reachable from this value. + Handle<Object> GetValue(); + + bool IsMaterializedObject() const; + bool IsMaterializableByDebugger() const; + + private: + friend class TranslatedState; + friend class TranslatedFrame; + + enum Kind : uint8_t { + kInvalid, + kTagged, + kInt32, + kInt64, + kUInt32, + kBoolBit, + kFloat, + kDouble, + kCapturedObject, // Object captured by the escape analysis. + // The number of nested objects can be obtained + // with the DeferredObjectLength() method + // (the values of the nested objects follow + // this value in the depth-first order.) + kDuplicatedObject // Duplicated object of a deferred object. + }; + + enum MaterializationState : uint8_t { + kUninitialized, + kAllocated, // Storage for the object has been allocated (or + // enqueued for allocation). + kFinished, // The object has been initialized (or enqueued for + // initialization). + }; + + TranslatedValue(TranslatedState* container, Kind kind) + : kind_(kind), container_(container) {} + Kind kind() const { return kind_; } + MaterializationState materialization_state() const { + return materialization_state_; + } + void Handlify(); + int GetChildrenCount() const; + + static TranslatedValue NewDeferredObject(TranslatedState* container, + int length, int object_index); + static TranslatedValue NewDuplicateObject(TranslatedState* container, int id); + static TranslatedValue NewFloat(TranslatedState* container, Float32 value); + static TranslatedValue NewDouble(TranslatedState* container, Float64 value); + static TranslatedValue NewInt32(TranslatedState* container, int32_t value); + static TranslatedValue NewInt64(TranslatedState* container, int64_t value); + static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value); + static TranslatedValue NewBool(TranslatedState* container, uint32_t value); + static TranslatedValue NewTagged(TranslatedState* container, Object literal); + static TranslatedValue NewInvalid(TranslatedState* container); + + Isolate* isolate() const; + void MaterializeSimple(); + + void set_storage(Handle<HeapObject> storage) { storage_ = storage; } + void set_initialized_storage(Handle<Object> storage); + void mark_finished() { materialization_state_ = kFinished; } + void mark_allocated() { materialization_state_ = kAllocated; } + + Handle<Object> GetStorage() { + DCHECK_NE(kUninitialized, materialization_state()); + return storage_; + } + + Kind kind_; + MaterializationState materialization_state_ = kUninitialized; + TranslatedState* container_; // This is only needed for materialization of + // objects and constructing handles (to get + // to the isolate). + + Handle<Object> storage_; // Contains the materialized value or the + // byte-array that will be later morphed into + // the materialized object. + + struct MaterializedObjectInfo { + int id_; + int length_; // Applies only to kCapturedObject kinds. + }; + + union { + // kind kTagged. After handlification it is always nullptr. + Object raw_literal_; + // kind is kUInt32 or kBoolBit. + uint32_t uint32_value_; + // kind is kInt32. + int32_t int32_value_; + // kind is kInt64. + int64_t int64_value_; + // kind is kFloat + Float32 float_value_; + // kind is kDouble + Float64 double_value_; + // kind is kDuplicatedObject or kCapturedObject. + MaterializedObjectInfo materialization_info_; + }; + + // Checked accessors for the union members. + Object raw_literal() const; + int32_t int32_value() const; + int64_t int64_value() const; + uint32_t uint32_value() const; + Float32 float_value() const; + Float64 double_value() const; + int object_length() const; + int object_index() const; +}; + +class TranslatedFrame { + public: + enum Kind { + kInterpretedFunction, + kArgumentsAdaptor, + kConstructStub, + kBuiltinContinuation, + kJavaScriptBuiltinContinuation, + kJavaScriptBuiltinContinuationWithCatch, + kInvalid + }; + + int GetValueCount(); + + Kind kind() const { return kind_; } + BailoutId node_id() const { return node_id_; } + Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } + int height() const { return height_; } + int return_value_offset() const { return return_value_offset_; } + int return_value_count() const { return return_value_count_; } + + SharedFunctionInfo raw_shared_info() const { + CHECK(!raw_shared_info_.is_null()); + return raw_shared_info_; + } + + class iterator { + public: + iterator& operator++() { + ++input_index_; + AdvanceIterator(&position_); + return *this; + } + + iterator operator++(int) { + iterator original(position_, input_index_); + ++input_index_; + AdvanceIterator(&position_); + return original; + } + + bool operator==(const iterator& other) const { + // Ignore {input_index_} for equality. + return position_ == other.position_; + } + bool operator!=(const iterator& other) const { return !(*this == other); } + + TranslatedValue& operator*() { return (*position_); } + TranslatedValue* operator->() { return &(*position_); } + const TranslatedValue& operator*() const { return (*position_); } + const TranslatedValue* operator->() const { return &(*position_); } + + int input_index() const { return input_index_; } + + private: + friend TranslatedFrame; + + explicit iterator(std::deque<TranslatedValue>::iterator position, + int input_index = 0) + : position_(position), input_index_(input_index) {} + + std::deque<TranslatedValue>::iterator position_; + int input_index_; + }; + + using reference = TranslatedValue&; + using const_reference = TranslatedValue const&; + + iterator begin() { return iterator(values_.begin()); } + iterator end() { return iterator(values_.end()); } + + reference front() { return values_.front(); } + const_reference front() const { return values_.front(); } + + private: + friend class TranslatedState; + + // Constructor static methods. + static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset, + SharedFunctionInfo shared_info, + int height, int return_value_offset, + int return_value_count); + static TranslatedFrame AccessorFrame(Kind kind, + SharedFunctionInfo shared_info); + static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo shared_info, + int height); + static TranslatedFrame ConstructStubFrame(BailoutId bailout_id, + SharedFunctionInfo shared_info, + int height); + static TranslatedFrame BuiltinContinuationFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height); + static TranslatedFrame JavaScriptBuiltinContinuationFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height); + static TranslatedFrame JavaScriptBuiltinContinuationWithCatchFrame( + BailoutId bailout_id, SharedFunctionInfo shared_info, int height); + static TranslatedFrame InvalidFrame() { + return TranslatedFrame(kInvalid, SharedFunctionInfo()); + } + + static void AdvanceIterator(std::deque<TranslatedValue>::iterator* iter); + + TranslatedFrame(Kind kind, + SharedFunctionInfo shared_info = SharedFunctionInfo(), + int height = 0, int return_value_offset = 0, + int return_value_count = 0) + : kind_(kind), + node_id_(BailoutId::None()), + raw_shared_info_(shared_info), + height_(height), + return_value_offset_(return_value_offset), + return_value_count_(return_value_count) {} + + void Add(const TranslatedValue& value) { values_.push_back(value); } + TranslatedValue* ValueAt(int index) { return &(values_[index]); } + void Handlify(); + + Kind kind_; + BailoutId node_id_; + SharedFunctionInfo raw_shared_info_; + Handle<SharedFunctionInfo> shared_info_; + int height_; + int return_value_offset_; + int return_value_count_; + + using ValuesContainer = std::deque<TranslatedValue>; + + ValuesContainer values_; +}; + +// Auxiliary class for translating deoptimization values. +// Typical usage sequence: +// +// 1. Construct the instance. This will involve reading out the translations +// and resolving them to values using the supplied frame pointer and +// machine state (registers). This phase is guaranteed not to allocate +// and not to use any HandleScope. Any object pointers will be stored raw. +// +// 2. Handlify pointers. This will convert all the raw pointers to handles. +// +// 3. Reading out the frame values. +// +// Note: After the instance is constructed, it is possible to iterate over +// the values eagerly. + +class TranslatedState { + public: + TranslatedState() = default; + explicit TranslatedState(const JavaScriptFrame* frame); + + void Prepare(Address stack_frame_pointer); + + // Store newly materialized values into the isolate. + void StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame); + + using iterator = std::vector<TranslatedFrame>::iterator; + iterator begin() { return frames_.begin(); } + iterator end() { return frames_.end(); } + + using const_iterator = std::vector<TranslatedFrame>::const_iterator; + const_iterator begin() const { return frames_.begin(); } + const_iterator end() const { return frames_.end(); } + + std::vector<TranslatedFrame>& frames() { return frames_; } + + TranslatedFrame* GetFrameFromJSFrameIndex(int jsframe_index); + TranslatedFrame* GetArgumentsInfoFromJSFrameIndex(int jsframe_index, + int* arguments_count); + + Isolate* isolate() { return isolate_; } + + void Init(Isolate* isolate, Address input_frame_pointer, + TranslationIterator* iterator, FixedArray literal_array, + RegisterValues* registers, FILE* trace_file, int parameter_count); + + void VerifyMaterializedObjects(); + bool DoUpdateFeedback(); + + private: + friend TranslatedValue; + + TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator, + FixedArray literal_array, + Address fp, FILE* trace_file); + int CreateNextTranslatedValue(int frame_index, TranslationIterator* iterator, + FixedArray literal_array, Address fp, + RegisterValues* registers, FILE* trace_file); + Address ComputeArgumentsPosition(Address input_frame_pointer, + CreateArgumentsType type, int* length); + void CreateArgumentsElementsTranslatedValues(int frame_index, + Address input_frame_pointer, + CreateArgumentsType type, + FILE* trace_file); + + void UpdateFromPreviouslyMaterializedObjects(); + void MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index, + TranslatedValue* slot, Handle<Map> map); + void MaterializeMutableHeapNumber(TranslatedFrame* frame, int* value_index, + TranslatedValue* slot); + + void EnsureObjectAllocatedAt(TranslatedValue* slot); + + void SkipSlots(int slots_to_skip, TranslatedFrame* frame, int* value_index); + + Handle<ByteArray> AllocateStorageFor(TranslatedValue* slot); + void EnsureJSObjectAllocated(TranslatedValue* slot, Handle<Map> map); + void EnsurePropertiesAllocatedAndMarked(TranslatedValue* properties_slot, + Handle<Map> map); + void EnsureChildrenAllocated(int count, TranslatedFrame* frame, + int* value_index, std::stack<int>* worklist); + void EnsureCapturedObjectAllocatedAt(int object_index, + std::stack<int>* worklist); + Handle<Object> InitializeObjectAt(TranslatedValue* slot); + void InitializeCapturedObjectAt(int object_index, std::stack<int>* worklist, + const DisallowHeapAllocation& no_allocation); + void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index, + TranslatedValue* slot, Handle<Map> map, + const DisallowHeapAllocation& no_allocation); + void InitializeObjectWithTaggedFieldsAt( + TranslatedFrame* frame, int* value_index, TranslatedValue* slot, + Handle<Map> map, const DisallowHeapAllocation& no_allocation); + + void ReadUpdateFeedback(TranslationIterator* iterator, + FixedArray literal_array, FILE* trace_file); + + TranslatedValue* ResolveCapturedObject(TranslatedValue* slot); + TranslatedValue* GetValueByObjectIndex(int object_index); + Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index); + + static uint32_t GetUInt32Slot(Address fp, int slot_index); + static uint64_t GetUInt64Slot(Address fp, int slot_index); + static Float32 GetFloatSlot(Address fp, int slot_index); + static Float64 GetDoubleSlot(Address fp, int slot_index); + + std::vector<TranslatedFrame> frames_; + Isolate* isolate_ = nullptr; + Address stack_frame_pointer_ = kNullAddress; + int formal_parameter_count_; + + struct ObjectPosition { + int frame_index_; + int value_index_; + }; + std::deque<ObjectPosition> object_positions_; + Handle<FeedbackVector> feedback_vector_handle_; + FeedbackVector feedback_vector_; + FeedbackSlot feedback_slot_; +}; + +class OptimizedFunctionVisitor { + public: + virtual ~OptimizedFunctionVisitor() = default; + virtual void VisitFunction(JSFunction function) = 0; +}; + +class Deoptimizer : public Malloced { + public: + struct DeoptInfo { + DeoptInfo(SourcePosition position, DeoptimizeReason deopt_reason, + int deopt_id) + : position(position), deopt_reason(deopt_reason), deopt_id(deopt_id) {} + + SourcePosition position; + DeoptimizeReason deopt_reason; + int deopt_id; + + static const int kNoDeoptId = -1; + }; + + static DeoptInfo GetDeoptInfo(Code code, Address from); + + static int ComputeSourcePositionFromBytecodeArray(SharedFunctionInfo shared, + BailoutId node_id); + + static const char* MessageFor(DeoptimizeKind kind); + + int output_count() const { return output_count_; } + + Handle<JSFunction> function() const; + Handle<Code> compiled_code() const; + DeoptimizeKind deopt_kind() const { return deopt_kind_; } + + // Number of created JS frames. Not all created frames are necessarily JS. + int jsframe_count() const { return jsframe_count_; } + + static Deoptimizer* New(Address raw_function, DeoptimizeKind kind, + unsigned bailout_id, Address from, int fp_to_sp_delta, + Isolate* isolate); + static Deoptimizer* Grab(Isolate* isolate); + + // The returned object with information on the optimized frame needs to be + // freed before another one can be generated. + static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame, + int jsframe_index, + Isolate* isolate); + + // Deoptimize the function now. Its current optimized code will never be run + // again and any activations of the optimized code will get deoptimized when + // execution returns. If {code} is specified then the given code is targeted + // instead of the function code (e.g. OSR code not installed on function). + static void DeoptimizeFunction(JSFunction function, Code code = Code()); + + // Deoptimize all code in the given isolate. + V8_EXPORT_PRIVATE static void DeoptimizeAll(Isolate* isolate); + + // Deoptimizes all optimized code that has been previously marked + // (via code->set_marked_for_deoptimization) and unlinks all functions that + // refer to that code. + static void DeoptimizeMarkedCode(Isolate* isolate); + + ~Deoptimizer(); + + void MaterializeHeapObjects(); + + static void ComputeOutputFrames(Deoptimizer* deoptimizer); + + static Address GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind); + + // Returns true if {addr} is a deoptimization entry and stores its type in + // {type}. Returns false if {addr} is not a deoptimization entry. + static bool IsDeoptimizationEntry(Isolate* isolate, Address addr, + DeoptimizeKind* type); + + // Code generation support. + static int input_offset() { return OFFSET_OF(Deoptimizer, input_); } + static int output_count_offset() { + return OFFSET_OF(Deoptimizer, output_count_); + } + static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } + + static int caller_frame_top_offset() { + return OFFSET_OF(Deoptimizer, caller_frame_top_); + } + + V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate); + + static const int kNotDeoptimizationEntry = -1; + + static void EnsureCodeForDeoptimizationEntry(Isolate* isolate, + DeoptimizeKind kind); + static void EnsureCodeForDeoptimizationEntries(Isolate* isolate); + + Isolate* isolate() const { return isolate_; } + + static const int kMaxNumberOfEntries = 16384; + + private: + friend class FrameWriter; + void QueueValueForMaterialization(Address output_address, Object obj, + const TranslatedFrame::iterator& iterator); + + Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind, + unsigned bailout_id, Address from, int fp_to_sp_delta); + Code FindOptimizedCode(); + void PrintFunctionName(); + void DeleteFrameDescriptions(); + + static bool IsDeoptimizationEntry(Isolate* isolate, Address addr, + DeoptimizeKind type); + + void DoComputeOutputFrames(); + void DoComputeInterpretedFrame(TranslatedFrame* translated_frame, + int frame_index, bool goto_catch_handler); + void DoComputeArgumentsAdaptorFrame(TranslatedFrame* translated_frame, + int frame_index); + void DoComputeConstructStubFrame(TranslatedFrame* translated_frame, + int frame_index); + + enum class BuiltinContinuationMode { + STUB, + JAVASCRIPT, + JAVASCRIPT_WITH_CATCH, + JAVASCRIPT_HANDLE_EXCEPTION + }; + static bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode); + static bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode); + static StackFrame::Type BuiltinContinuationModeToFrameType( + BuiltinContinuationMode mode); + static Builtins::Name TrampolineForBuiltinContinuation( + BuiltinContinuationMode mode, bool must_handle_result); + + void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame, + int frame_index, + BuiltinContinuationMode mode); + + unsigned ComputeInputFrameAboveFpFixedSize() const; + unsigned ComputeInputFrameSize() const; + static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo shared); + + static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared); + static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id); + + static void GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind kind); + + // Marks all the code in the given context for deoptimization. + static void MarkAllCodeForContext(Context native_context); + + // Deoptimizes all code marked in the given context. + static void DeoptimizeMarkedCodeForContext(Context native_context); + + // Some architectures need to push padding together with the TOS register + // in order to maintain stack alignment. + static bool PadTopOfStackRegister(); + + // Searches the list of known deoptimizing code for a Code object + // containing the given address (which is supposedly faster than + // searching all code objects). + Code FindDeoptimizingCode(Address addr); + + Isolate* isolate_; + JSFunction function_; + Code compiled_code_; + unsigned bailout_id_; + DeoptimizeKind deopt_kind_; + Address from_; + int fp_to_sp_delta_; + bool deoptimizing_throw_; + int catch_handler_data_; + int catch_handler_pc_offset_; + + // Input frame description. + FrameDescription* input_; + // Number of output frames. + int output_count_; + // Number of output js frames. + int jsframe_count_; + // Array of output frame descriptions. + FrameDescription** output_; + + // Caller frame details computed from input frame. + intptr_t caller_frame_top_; + intptr_t caller_fp_; + intptr_t caller_pc_; + intptr_t caller_constant_pool_; + intptr_t input_frame_context_; + + // Key for lookup of previously materialized objects + intptr_t stack_fp_; + + TranslatedState translated_state_; + struct ValueToMaterialize { + Address output_slot_address_; + TranslatedFrame::iterator value_; + }; + std::vector<ValueToMaterialize> values_to_materialize_; + +#ifdef DEBUG + DisallowHeapAllocation* disallow_heap_allocation_; +#endif // DEBUG + + CodeTracer::Scope* trace_scope_; + + static const int table_entry_size_; + + friend class FrameDescription; + friend class DeoptimizedFrameInfo; +}; + +class RegisterValues { + public: + intptr_t GetRegister(unsigned n) const { +#if DEBUG + // This convoluted DCHECK is needed to work around a gcc problem that + // improperly detects an array bounds overflow in optimized debug builds + // when using a plain DCHECK. + if (n >= arraysize(registers_)) { + DCHECK(false); + return 0; + } +#endif + return registers_[n]; + } + + Float32 GetFloatRegister(unsigned n) const { + DCHECK(n < arraysize(float_registers_)); + return float_registers_[n]; + } + + Float64 GetDoubleRegister(unsigned n) const { + DCHECK(n < arraysize(double_registers_)); + return double_registers_[n]; + } + + void SetRegister(unsigned n, intptr_t value) { + DCHECK(n < arraysize(registers_)); + registers_[n] = value; + } + + void SetFloatRegister(unsigned n, Float32 value) { + DCHECK(n < arraysize(float_registers_)); + float_registers_[n] = value; + } + + void SetDoubleRegister(unsigned n, Float64 value) { + DCHECK(n < arraysize(double_registers_)); + double_registers_[n] = value; + } + + // Generated code is writing directly into the below arrays, make sure their + // element sizes fit what the machine instructions expect. + static_assert(sizeof(Float32) == kFloatSize, "size mismatch"); + static_assert(sizeof(Float64) == kDoubleSize, "size mismatch"); + + intptr_t registers_[Register::kNumRegisters]; + Float32 float_registers_[FloatRegister::kNumRegisters]; + Float64 double_registers_[DoubleRegister::kNumRegisters]; +}; + +class FrameDescription { + public: + explicit FrameDescription(uint32_t frame_size, int parameter_count = 0); + + void* operator new(size_t size, uint32_t frame_size) { + // Subtracts kSystemPointerSize, as the member frame_content_ already + // supplies the first element of the area to store the frame. + return malloc(size + frame_size - kSystemPointerSize); + } + + void operator delete(void* pointer, uint32_t frame_size) { free(pointer); } + + void operator delete(void* description) { free(description); } + + uint32_t GetFrameSize() const { + USE(frame_content_); + DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_); + return static_cast<uint32_t>(frame_size_); + } + + intptr_t GetFrameSlot(unsigned offset) { + return *GetFrameSlotPointer(offset); + } + + unsigned GetLastArgumentSlotOffset() { + int parameter_slots = parameter_count(); + if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2); + return GetFrameSize() - parameter_slots * kSystemPointerSize; + } + + Address GetFramePointerAddress() { + int fp_offset = + GetLastArgumentSlotOffset() - StandardFrameConstants::kCallerSPOffset; + return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset)); + } + + RegisterValues* GetRegisterValues() { return ®ister_values_; } + + void SetFrameSlot(unsigned offset, intptr_t value) { + *GetFrameSlotPointer(offset) = value; + } + + void SetCallerPc(unsigned offset, intptr_t value); + + void SetCallerFp(unsigned offset, intptr_t value); + + void SetCallerConstantPool(unsigned offset, intptr_t value); + + intptr_t GetRegister(unsigned n) const { + return register_values_.GetRegister(n); + } + + Float64 GetDoubleRegister(unsigned n) const { + return register_values_.GetDoubleRegister(n); + } + + void SetRegister(unsigned n, intptr_t value) { + register_values_.SetRegister(n, value); + } + + void SetDoubleRegister(unsigned n, Float64 value) { + register_values_.SetDoubleRegister(n, value); + } + + intptr_t GetTop() const { return top_; } + void SetTop(intptr_t top) { top_ = top; } + + intptr_t GetPc() const { return pc_; } + void SetPc(intptr_t pc) { pc_ = pc; } + + intptr_t GetFp() const { return fp_; } + void SetFp(intptr_t fp) { fp_ = fp; } + + intptr_t GetContext() const { return context_; } + void SetContext(intptr_t context) { context_ = context; } + + intptr_t GetConstantPool() const { return constant_pool_; } + void SetConstantPool(intptr_t constant_pool) { + constant_pool_ = constant_pool; + } + + void SetContinuation(intptr_t pc) { continuation_ = pc; } + + // Argument count, including receiver. + int parameter_count() { return parameter_count_; } + + static int registers_offset() { + return OFFSET_OF(FrameDescription, register_values_.registers_); + } + + static int double_registers_offset() { + return OFFSET_OF(FrameDescription, register_values_.double_registers_); + } + + static int float_registers_offset() { + return OFFSET_OF(FrameDescription, register_values_.float_registers_); + } + + static int frame_size_offset() { + return offsetof(FrameDescription, frame_size_); + } + + static int pc_offset() { return offsetof(FrameDescription, pc_); } + + static int continuation_offset() { + return offsetof(FrameDescription, continuation_); + } + + static int frame_content_offset() { + return offsetof(FrameDescription, frame_content_); + } + + private: + static const uint32_t kZapUint32 = 0xbeeddead; + + // Frame_size_ must hold a uint32_t value. It is only a uintptr_t to + // keep the variable-size array frame_content_ of type intptr_t at + // the end of the structure aligned. + uintptr_t frame_size_; // Number of bytes. + int parameter_count_; + RegisterValues register_values_; + intptr_t top_; + intptr_t pc_; + intptr_t fp_; + intptr_t context_; + intptr_t constant_pool_; + + // Continuation is the PC where the execution continues after + // deoptimizing. + intptr_t continuation_; + + // This must be at the end of the object as the object is allocated larger + // than it's definition indicate to extend this array. + intptr_t frame_content_[1]; + + intptr_t* GetFrameSlotPointer(unsigned offset) { + DCHECK(offset < frame_size_); + return reinterpret_cast<intptr_t*>(reinterpret_cast<Address>(this) + + frame_content_offset() + offset); + } +}; + +class DeoptimizerData { + public: + explicit DeoptimizerData(Heap* heap); + ~DeoptimizerData(); + +#ifdef DEBUG + bool IsDeoptEntryCode(Code code) const { + for (int i = 0; i < kLastDeoptimizeKind + 1; i++) { + if (code == deopt_entry_code_[i]) return true; + } + return false; + } +#endif // DEBUG + + private: + Heap* heap_; + static const int kLastDeoptimizeKind = + static_cast<int>(DeoptimizeKind::kLastDeoptimizeKind); + Code deopt_entry_code_[kLastDeoptimizeKind + 1]; + Code deopt_entry_code(DeoptimizeKind kind); + void set_deopt_entry_code(DeoptimizeKind kind, Code code); + + Deoptimizer* current_; + + friend class Deoptimizer; + + DISALLOW_COPY_AND_ASSIGN(DeoptimizerData); +}; + +class TranslationBuffer { + public: + explicit TranslationBuffer(Zone* zone) : contents_(zone) {} + + int CurrentIndex() const { return static_cast<int>(contents_.size()); } + void Add(int32_t value); + + Handle<ByteArray> CreateByteArray(Factory* factory); + + private: + ZoneChunkList<uint8_t> contents_; +}; + +class TranslationIterator { + public: + TranslationIterator(ByteArray buffer, int index); + + int32_t Next(); + + bool HasNext() const; + + void Skip(int n) { + for (int i = 0; i < n; i++) Next(); + } + + private: + ByteArray buffer_; + int index_; +}; + +#define TRANSLATION_OPCODE_LIST(V) \ + V(BEGIN) \ + V(INTERPRETED_FRAME) \ + V(BUILTIN_CONTINUATION_FRAME) \ + V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) \ + V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) \ + V(CONSTRUCT_STUB_FRAME) \ + V(ARGUMENTS_ADAPTOR_FRAME) \ + V(DUPLICATED_OBJECT) \ + V(ARGUMENTS_ELEMENTS) \ + V(ARGUMENTS_LENGTH) \ + V(CAPTURED_OBJECT) \ + V(REGISTER) \ + V(INT32_REGISTER) \ + V(INT64_REGISTER) \ + V(UINT32_REGISTER) \ + V(BOOL_REGISTER) \ + V(FLOAT_REGISTER) \ + V(DOUBLE_REGISTER) \ + V(STACK_SLOT) \ + V(INT32_STACK_SLOT) \ + V(INT64_STACK_SLOT) \ + V(UINT32_STACK_SLOT) \ + V(BOOL_STACK_SLOT) \ + V(FLOAT_STACK_SLOT) \ + V(DOUBLE_STACK_SLOT) \ + V(LITERAL) \ + V(UPDATE_FEEDBACK) + +class Translation { + public: +#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item, + enum Opcode { + TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM) LAST = LITERAL + }; +#undef DECLARE_TRANSLATION_OPCODE_ENUM + + Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count, + int update_feedback_count, Zone* zone) + : buffer_(buffer), index_(buffer->CurrentIndex()), zone_(zone) { + buffer_->Add(BEGIN); + buffer_->Add(frame_count); + buffer_->Add(jsframe_count); + buffer_->Add(update_feedback_count); + } + + int index() const { return index_; } + + // Commands. + void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id, + unsigned height, int return_value_offset, + int return_value_count); + void BeginArgumentsAdaptorFrame(int literal_id, unsigned height); + void BeginConstructStubFrame(BailoutId bailout_id, int literal_id, + unsigned height); + void BeginBuiltinContinuationFrame(BailoutId bailout_id, int literal_id, + unsigned height); + void BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id, + int literal_id, unsigned height); + void BeginJavaScriptBuiltinContinuationWithCatchFrame(BailoutId bailout_id, + int literal_id, + unsigned height); + void ArgumentsElements(CreateArgumentsType type); + void ArgumentsLength(CreateArgumentsType type); + void BeginCapturedObject(int length); + void AddUpdateFeedback(int vector_literal, int slot); + void DuplicateObject(int object_index); + void StoreRegister(Register reg); + void StoreInt32Register(Register reg); + void StoreInt64Register(Register reg); + void StoreUint32Register(Register reg); + void StoreBoolRegister(Register reg); + void StoreFloatRegister(FloatRegister reg); + void StoreDoubleRegister(DoubleRegister reg); + void StoreStackSlot(int index); + void StoreInt32StackSlot(int index); + void StoreInt64StackSlot(int index); + void StoreUint32StackSlot(int index); + void StoreBoolStackSlot(int index); + void StoreFloatStackSlot(int index); + void StoreDoubleStackSlot(int index); + void StoreLiteral(int literal_id); + void StoreJSFrameFunction(); + + Zone* zone() const { return zone_; } + + static int NumberOfOperandsFor(Opcode opcode); + +#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) + static const char* StringFor(Opcode opcode); +#endif + + private: + TranslationBuffer* buffer_; + int index_; + Zone* zone_; +}; + +class MaterializedObjectStore { + public: + explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {} + + Handle<FixedArray> Get(Address fp); + void Set(Address fp, Handle<FixedArray> materialized_objects); + bool Remove(Address fp); + + private: + Isolate* isolate() const { return isolate_; } + Handle<FixedArray> GetStackEntries(); + Handle<FixedArray> EnsureStackEntries(int size); + + int StackIdToIndex(Address fp); + + Isolate* isolate_; + std::vector<Address> frame_fps_; +}; + +// Class used to represent an unoptimized frame when the debugger +// needs to inspect a frame that is part of an optimized frame. The +// internally used FrameDescription objects are not GC safe so for use +// by the debugger frame information is copied to an object of this type. +// Represents parameters in unadapted form so their number might mismatch +// formal parameter count. +class DeoptimizedFrameInfo : public Malloced { + public: + DeoptimizedFrameInfo(TranslatedState* state, + TranslatedState::iterator frame_it, Isolate* isolate); + + // Return the number of incoming arguments. + int parameters_count() { return static_cast<int>(parameters_.size()); } + + // Return the height of the expression stack. + int expression_count() { return static_cast<int>(expression_stack_.size()); } + + // Get the frame function. + Handle<JSFunction> GetFunction() { return function_; } + + // Get the frame context. + Handle<Object> GetContext() { return context_; } + + // Get an incoming argument. + Handle<Object> GetParameter(int index) { + DCHECK(0 <= index && index < parameters_count()); + return parameters_[index]; + } + + // Get an expression from the expression stack. + Handle<Object> GetExpression(int index) { + DCHECK(0 <= index && index < expression_count()); + return expression_stack_[index]; + } + + int GetSourcePosition() { return source_position_; } + + private: + // Set an incoming argument. + void SetParameter(int index, Handle<Object> obj) { + DCHECK(0 <= index && index < parameters_count()); + parameters_[index] = obj; + } + + // Set an expression on the expression stack. + void SetExpression(int index, Handle<Object> obj) { + DCHECK(0 <= index && index < expression_count()); + expression_stack_[index] = obj; + } + + Handle<JSFunction> function_; + Handle<Object> context_; + std::vector<Handle<Object> > parameters_; + std::vector<Handle<Object> > expression_stack_; + int source_position_; + + friend class Deoptimizer; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_DEOPTIMIZER_DEOPTIMIZER_H_ diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc new file mode 100644 index 0000000000..6b01449ba7 --- /dev/null +++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc @@ -0,0 +1,226 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_IA32 + +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/execution/frame-constants.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Save all general purpose registers before messing with them. + const int kNumberOfRegisters = Register::kNumRegisters; + + const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters; + __ AllocateStackSpace(kDoubleRegsSize); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + XMMRegister xmm_reg = XMMRegister::from_code(code); + int offset = code * kDoubleSize; + __ movsd(Operand(esp, offset), xmm_reg); + } + + STATIC_ASSERT(kFloatSize == kSystemPointerSize); + const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters; + __ AllocateStackSpace(kFloatRegsSize); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + XMMRegister xmm_reg = XMMRegister::from_code(code); + int offset = code * kFloatSize; + __ movss(Operand(esp, offset), xmm_reg); + } + + __ pushad(); + + ExternalReference c_entry_fp_address = + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate); + __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp); + + const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize + + kDoubleRegsSize + kFloatRegsSize; + + // The bailout id is passed in ebx by the caller. + + // Get the address of the location in the code object + // and compute the fp-to-sp delta in register edx. + __ mov(ecx, Operand(esp, kSavedRegistersAreaSize)); + __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize)); + + __ sub(edx, ebp); + __ neg(edx); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(6, eax); + __ mov(eax, Immediate(0)); + Label context_check; + __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(edi, &context_check); + __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function. + __ mov(Operand(esp, 1 * kSystemPointerSize), + Immediate(static_cast<int>(deopt_kind))); + __ mov(Operand(esp, 2 * kSystemPointerSize), ebx); // Bailout id. + __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0. + __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta. + __ mov(Operand(esp, 5 * kSystemPointerSize), + Immediate(ExternalReference::isolate_address(isolate))); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve deoptimizer object in register eax and get the input + // frame descriptor pointer. + __ mov(esi, Operand(eax, Deoptimizer::input_offset())); + + // Fill in the input registers. + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + __ pop(Operand(esi, offset)); + } + + int float_regs_offset = FrameDescription::float_registers_offset(); + // Fill in the float input registers. + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + int dst_offset = i * kFloatSize + float_regs_offset; + __ pop(Operand(esi, dst_offset)); + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Fill in the double input registers. + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = code * kDoubleSize; + __ movsd(xmm0, Operand(esp, src_offset)); + __ movsd(Operand(esi, dst_offset), xmm0); + } + + // Clear FPU all exceptions. + // TODO(ulan): Find out why the TOP register is not zero here in some cases, + // and check that the generated code never deoptimizes with unbalanced stack. + __ fnclex(); + + // Remove the return address and the double registers. + __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize)); + + // Compute a pointer to the unwinding limit in register ecx; that is + // the first stack slot not part of the input frame. + __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset())); + __ add(ecx, esp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ lea(edx, Operand(esi, FrameDescription::frame_content_offset())); + Label pop_loop_header; + __ jmp(&pop_loop_header); + Label pop_loop; + __ bind(&pop_loop); + __ pop(Operand(edx, 0)); + __ add(edx, Immediate(sizeof(uint32_t))); + __ bind(&pop_loop_header); + __ cmp(ecx, esp); + __ j(not_equal, &pop_loop); + + // Compute the output frame in the deoptimizer. + __ push(eax); + __ PrepareCallCFunction(1, esi); + __ mov(Operand(esp, 0 * kSystemPointerSize), eax); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(eax); + + __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: eax = current FrameDescription**, edx = one + // past the last FrameDescription**. + __ mov(edx, Operand(eax, Deoptimizer::output_count_offset())); + __ mov(eax, Operand(eax, Deoptimizer::output_offset())); + __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0)); + __ jmp(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: esi = current FrameDescription*, ecx = loop + // index. + __ mov(esi, Operand(eax, 0)); + __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); + __ bind(&inner_push_loop); + __ sub(ecx, Immediate(sizeof(uint32_t))); + __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset())); + __ bind(&inner_loop_header); + __ test(ecx, ecx); + __ j(not_zero, &inner_push_loop); + __ add(eax, Immediate(kSystemPointerSize)); + __ bind(&outer_loop_header); + __ cmp(eax, edx); + __ j(below, &outer_push_loop); + + // In case of a failed STUB, we have to restore the XMM registers. + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + XMMRegister xmm_reg = XMMRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ movsd(xmm_reg, Operand(esi, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ push(Operand(esi, FrameDescription::pc_offset())); + __ push(Operand(esi, FrameDescription::continuation_offset())); + + // Push the registers from the last output frame. + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + __ push(Operand(esi, offset)); + } + + // Restore the registers from the stack. + __ popad(); + + __ InitializeRootRegister(); + + // Return to the continuation point. + __ ret(0); +} + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No embedded constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc new file mode 100644 index 0000000000..a56501660b --- /dev/null +++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc @@ -0,0 +1,256 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Unlike on ARM we don't save all the registers, just the useful ones. + // For the rest, there are gaps on the stack, so the offsets remain the same. + const int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + RegList saved_regs = restored_regs | sp.bit() | ra.bit(); + + const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; + const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters; + + // Save all FPU registers before messing with them. + __ Subu(sp, sp, Operand(kDoubleRegsSize)); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int offset = code * kDoubleSize; + __ Sdc1(fpu_reg, MemOperand(sp, offset)); + } + + __ Subu(sp, sp, Operand(kFloatRegsSize)); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + const FloatRegister fpu_reg = FloatRegister::from_code(code); + int offset = code * kFloatSize; + __ swc1(fpu_reg, MemOperand(sp, offset)); + } + + // Push saved_regs (needed to populate FrameDescription::registers_). + // Leave gaps for other registers. + __ Subu(sp, sp, kNumberOfRegisters * kPointerSize); + for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { + if ((saved_regs & (1 << i)) != 0) { + __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i)); + } + } + + __ li(a2, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate))); + __ sw(fp, MemOperand(a2)); + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; + + // Get the bailout id is passed as kRootRegister by the caller. + __ mov(a2, kRootRegister); + + // Get the address of the location in the code object (a3) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register t0. + __ mov(a3, ra); + __ Addu(t0, sp, Operand(kSavedRegistersAreaSize)); + __ Subu(t0, fp, t0); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(6, t1); + // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. + __ mov(a0, zero_reg); + Label context_check; + __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(a1, &context_check); + __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ li(a1, Operand(static_cast<int>(deopt_kind))); + // a2: bailout id already loaded. + // a3: code address or 0 already loaded. + __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta. + __ li(t1, Operand(ExternalReference::isolate_address(isolate))); + __ sw(t1, CFunctionArgumentOperand(6)); // Isolate. + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve "deoptimizer" object in register v0 and get the input + // frame descriptor pointer to a1 (deoptimizer->input_); + // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. + __ mov(a0, v0); + __ lw(a1, MemOperand(v0, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((saved_regs & (1 << i)) != 0) { + __ lw(a2, MemOperand(sp, i * kPointerSize)); + __ sw(a2, MemOperand(a1, offset)); + } else if (FLAG_debug_code) { + __ li(a2, kDebugZapValue); + __ sw(a2, MemOperand(a1, offset)); + } + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy FPU registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize; + __ Ldc1(f0, MemOperand(sp, src_offset)); + __ Sdc1(f0, MemOperand(a1, dst_offset)); + } + + // Copy FPU registers to + // float_registers_[FloatRegister::kNumAllocatableRegisters] + int float_regs_offset = FrameDescription::float_registers_offset(); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + int dst_offset = code * kFloatSize + float_regs_offset; + int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize; + __ lwc1(f0, MemOperand(sp, src_offset)); + __ swc1(f0, MemOperand(a1, dst_offset)); + } + + // Remove the saved registers from the stack. + __ Addu(sp, sp, Operand(kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register a2; that is + // the first stack slot not part of the input frame. + __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); + __ Addu(a2, a2, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ BranchShort(&pop_loop_header); + __ bind(&pop_loop); + __ pop(t0); + __ sw(t0, MemOperand(a3, 0)); + __ addiu(a3, a3, sizeof(uint32_t)); + __ bind(&pop_loop_header); + __ BranchShort(&pop_loop, ne, a2, Operand(sp)); + + // Compute the output frame in the deoptimizer. + __ push(a0); // Preserve deoptimizer object across call. + // a0: deoptimizer object; a1: scratch. + __ PrepareCallCFunction(1, a1); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(a0); // Restore deoptimizer object (class Deoptimizer). + + __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: t0 = current "FrameDescription** output_", + // a1 = one past the last FrameDescription**. + __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); + __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. + __ Lsa(a1, t0, a1, kPointerSizeLog2); + __ BranchShort(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: a2 = current FrameDescription*, a3 = loop index. + __ lw(a2, MemOperand(t0, 0)); // output_[ix] + __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); + __ BranchShort(&inner_loop_header); + __ bind(&inner_push_loop); + __ Subu(a3, a3, Operand(sizeof(uint32_t))); + __ Addu(t2, a2, Operand(a3)); + __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset())); + __ push(t3); + __ bind(&inner_loop_header); + __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); + + __ Addu(t0, t0, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ BranchShort(&outer_push_loop, lt, t0, Operand(a1)); + + __ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ Ldc1(fpu_reg, MemOperand(a1, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ lw(t2, MemOperand(a2, FrameDescription::pc_offset())); + __ push(t2); + __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset())); + __ push(t2); + + // Technically restoring 'at' should work unless zero_reg is also restored + // but it's safer to check for this. + DCHECK(!(at.bit() & restored_regs)); + // Restore the registers from the last output frame. + __ mov(at, a2); + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs & (1 << i)) != 0) { + __ lw(ToRegister(i), MemOperand(at, offset)); + } + } + + __ pop(at); // Get continuation, leave pc on stack. + __ pop(ra); + __ Jump(at); + __ stop("Unreachable."); +} + +// Maximum size of a table entry generated below. +#ifdef _MIPS_ARCH_MIPS32R6 +const int Deoptimizer::table_entry_size_ = 2 * kInstrSize; +#else +const int Deoptimizer::table_entry_size_ = 3 * kInstrSize; +#endif + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No embedded constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc new file mode 100644 index 0000000000..6869199f1b --- /dev/null +++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc @@ -0,0 +1,257 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Unlike on ARM we don't save all the registers, just the useful ones. + // For the rest, there are gaps on the stack, so the offsets remain the same. + const int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + RegList saved_regs = restored_regs | sp.bit() | ra.bit(); + + const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; + const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters; + + // Save all double FPU registers before messing with them. + __ Dsubu(sp, sp, Operand(kDoubleRegsSize)); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int offset = code * kDoubleSize; + __ Sdc1(fpu_reg, MemOperand(sp, offset)); + } + + // Save all float FPU registers before messing with them. + __ Dsubu(sp, sp, Operand(kFloatRegsSize)); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + const FloatRegister fpu_reg = FloatRegister::from_code(code); + int offset = code * kFloatSize; + __ Swc1(fpu_reg, MemOperand(sp, offset)); + } + + // Push saved_regs (needed to populate FrameDescription::registers_). + // Leave gaps for other registers. + __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize); + for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { + if ((saved_regs & (1 << i)) != 0) { + __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i)); + } + } + + __ li(a2, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate))); + __ Sd(fp, MemOperand(a2)); + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; + + // Get the bailout is passed as kRootRegister by the caller. + __ mov(a2, kRootRegister); + + // Get the address of the location in the code object (a3) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register a4. + __ mov(a3, ra); + __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize)); + + __ Dsubu(a4, fp, a4); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(6, a5); + // Pass six arguments, according to n64 ABI. + __ mov(a0, zero_reg); + Label context_check; + __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(a1, &context_check); + __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ li(a1, Operand(static_cast<int>(deopt_kind))); + // a2: bailout id already loaded. + // a3: code address or 0 already loaded. + // a4: already has fp-to-sp delta. + __ li(a5, Operand(ExternalReference::isolate_address(isolate))); + + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve "deoptimizer" object in register v0 and get the input + // frame descriptor pointer to a1 (deoptimizer->input_); + // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. + __ mov(a0, v0); + __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((saved_regs & (1 << i)) != 0) { + __ Ld(a2, MemOperand(sp, i * kPointerSize)); + __ Sd(a2, MemOperand(a1, offset)); + } else if (FLAG_debug_code) { + __ li(a2, kDebugZapValue); + __ Sd(a2, MemOperand(a1, offset)); + } + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy FPU registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize; + __ Ldc1(f0, MemOperand(sp, src_offset)); + __ Sdc1(f0, MemOperand(a1, dst_offset)); + } + + int float_regs_offset = FrameDescription::float_registers_offset(); + // Copy FPU registers to + // float_registers_[FloatRegister::kNumAllocatableRegisters] + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + int dst_offset = code * kFloatSize + float_regs_offset; + int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize; + __ Lwc1(f0, MemOperand(sp, src_offset)); + __ Swc1(f0, MemOperand(a1, dst_offset)); + } + + // Remove the saved registers from the stack. + __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register a2; that is + // the first stack slot not part of the input frame. + __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset())); + __ Daddu(a2, a2, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ BranchShort(&pop_loop_header); + __ bind(&pop_loop); + __ pop(a4); + __ Sd(a4, MemOperand(a3, 0)); + __ daddiu(a3, a3, sizeof(uint64_t)); + __ bind(&pop_loop_header); + __ BranchShort(&pop_loop, ne, a2, Operand(sp)); + // Compute the output frame in the deoptimizer. + __ push(a0); // Preserve deoptimizer object across call. + // a0: deoptimizer object; a1: scratch. + __ PrepareCallCFunction(1, a1); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(a0); // Restore deoptimizer object (class Deoptimizer). + + __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: a4 = current "FrameDescription** output_", + // a1 = one past the last FrameDescription**. + __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); + __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. + __ Dlsa(a1, a4, a1, kPointerSizeLog2); + __ BranchShort(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: a2 = current FrameDescription*, a3 = loop index. + __ Ld(a2, MemOperand(a4, 0)); // output_[ix] + __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset())); + __ BranchShort(&inner_loop_header); + __ bind(&inner_push_loop); + __ Dsubu(a3, a3, Operand(sizeof(uint64_t))); + __ Daddu(a6, a2, Operand(a3)); + __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset())); + __ push(a7); + __ bind(&inner_loop_header); + __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); + + __ Daddu(a4, a4, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ BranchShort(&outer_push_loop, lt, a4, Operand(a1)); + + __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ Ldc1(fpu_reg, MemOperand(a1, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset())); + __ push(a6); + __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset())); + __ push(a6); + + // Technically restoring 'at' should work unless zero_reg is also restored + // but it's safer to check for this. + DCHECK(!(at.bit() & restored_regs)); + // Restore the registers from the last output frame. + __ mov(at, a2); + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs & (1 << i)) != 0) { + __ Ld(ToRegister(i), MemOperand(at, offset)); + } + } + + __ pop(at); // Get continuation, leave pc on stack. + __ pop(ra); + __ Jump(at); + __ stop("Unreachable."); +} + +// Maximum size of a table entry generated below. +#ifdef _MIPS_ARCH_MIPS64R6 +const int Deoptimizer::table_entry_size_ = 2 * kInstrSize; +#else +const int Deoptimizer::table_entry_size_ = 3 * kInstrSize; +#endif + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No embedded constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc new file mode 100644 index 0000000000..268660c2ef --- /dev/null +++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc @@ -0,0 +1,246 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/assembler-inl.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Unlike on ARM we don't save all the registers, just the useful ones. + // For the rest, there are gaps on the stack, so the offsets remain the same. + const int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + RegList saved_regs = restored_regs | sp.bit(); + + const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; + const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters; + + // Save all double registers before messing with them. + __ subi(sp, sp, Operand(kDoubleRegsSize)); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister dreg = DoubleRegister::from_code(code); + int offset = code * kDoubleSize; + __ stfd(dreg, MemOperand(sp, offset)); + } + // Save all float registers before messing with them. + __ subi(sp, sp, Operand(kFloatRegsSize)); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + const FloatRegister freg = FloatRegister::from_code(code); + int offset = code * kFloatSize; + __ stfs(freg, MemOperand(sp, offset)); + } + + // Push saved_regs (needed to populate FrameDescription::registers_). + // Leave gaps for other registers. + __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize)); + for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { + if ((saved_regs & (1 << i)) != 0) { + __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i)); + } + } + + __ mov(ip, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate))); + __ StoreP(fp, MemOperand(ip)); + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; + + // Get the bailout id is passed as r29 by the caller. + __ mr(r5, r29); + + // Get the address of the location in the code object (r6) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register r7. + __ mflr(r6); + __ addi(r7, sp, Operand(kSavedRegistersAreaSize)); + __ sub(r7, fp, r7); + + // Allocate a new deoptimizer object. + // Pass six arguments in r3 to r8. + __ PrepareCallCFunction(6, r8); + __ li(r3, Operand::Zero()); + Label context_check; + __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(r4, &context_check); + __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ li(r4, Operand(static_cast<int>(deopt_kind))); + // r5: bailout id already loaded. + // r6: code address or 0 already loaded. + // r7: Fp-to-sp delta. + __ mov(r8, Operand(ExternalReference::isolate_address(isolate))); + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve "deoptimizer" object in register r3 and get the input + // frame descriptor pointer to r4 (deoptimizer->input_); + __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + __ LoadP(r5, MemOperand(sp, i * kPointerSize)); + __ StoreP(r5, MemOperand(r4, offset)); + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy double registers to + // double_registers_[DoubleRegister::kNumRegisters] + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize; + __ lfd(d0, MemOperand(sp, src_offset)); + __ stfd(d0, MemOperand(r4, dst_offset)); + } + int float_regs_offset = FrameDescription::float_registers_offset(); + // Copy float registers to + // float_registers_[FloatRegister::kNumRegisters] + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + int dst_offset = code * kFloatSize + float_regs_offset; + int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize; + __ lfs(d0, MemOperand(sp, src_offset)); + __ stfs(d0, MemOperand(r4, dst_offset)); + } + + // Remove the saved registers from the stack. + __ addi(sp, sp, Operand(kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register r5; that is + // the first stack slot not part of the input frame. + __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); + __ add(r5, r5, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ addi(r6, r4, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ b(&pop_loop_header); + __ bind(&pop_loop); + __ pop(r7); + __ StoreP(r7, MemOperand(r6, 0)); + __ addi(r6, r6, Operand(kPointerSize)); + __ bind(&pop_loop_header); + __ cmp(r5, sp); + __ bne(&pop_loop); + + // Compute the output frame in the deoptimizer. + __ push(r3); // Preserve deoptimizer object across call. + // r3: deoptimizer object; r4: scratch. + __ PrepareCallCFunction(1, r4); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(r3); // Restore deoptimizer object (class Deoptimizer). + + __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: r7 = current "FrameDescription** output_", + // r4 = one past the last FrameDescription**. + __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset())); + __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. + __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2)); + __ add(r4, r7, r4); + __ b(&outer_loop_header); + + __ bind(&outer_push_loop); + // Inner loop state: r5 = current FrameDescription*, r6 = loop index. + __ LoadP(r5, MemOperand(r7, 0)); // output_[ix] + __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset())); + __ b(&inner_loop_header); + + __ bind(&inner_push_loop); + __ addi(r6, r6, Operand(-sizeof(intptr_t))); + __ add(r9, r5, r6); + __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset())); + __ push(r9); + + __ bind(&inner_loop_header); + __ cmpi(r6, Operand::Zero()); + __ bne(&inner_push_loop); // test for gt? + + __ addi(r7, r7, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ cmp(r7, r4); + __ blt(&outer_push_loop); + + __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister dreg = DoubleRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ lfd(dreg, MemOperand(r4, src_offset)); + } + + // Push pc, and continuation from the last output frame. + __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset())); + __ push(r9); + __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset())); + __ push(r9); + + // Restore the registers from the last output frame. + DCHECK(!(ip.bit() & restored_regs)); + __ mr(ip, r5); + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs & (1 << i)) != 0) { + __ LoadP(ToRegister(i), MemOperand(ip, offset)); + } + } + + __ pop(ip); // get continuation, leave pc on stack + __ pop(r0); + __ mtlr(r0); + __ Jump(ip); + __ stop("Unreachable."); +} + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + DCHECK(FLAG_enable_embedded_constant_pool); + SetFrameSlot(offset, value); +} + +#undef __ +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc new file mode 100644 index 0000000000..db2330a8e8 --- /dev/null +++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc @@ -0,0 +1,252 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Save all the registers onto the stack + const int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + + const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; + const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters; + + // Save all double registers before messing with them. + __ lay(sp, MemOperand(sp, -kDoubleRegsSize)); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister dreg = DoubleRegister::from_code(code); + int offset = code * kDoubleSize; + __ StoreDouble(dreg, MemOperand(sp, offset)); + } + // Save all float registers before messing with them. + __ lay(sp, MemOperand(sp, -kFloatRegsSize)); + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + const FloatRegister dreg = FloatRegister::from_code(code); + int offset = code * kFloatSize; + __ StoreFloat32(dreg, MemOperand(sp, offset)); + } + + // Push all GPRs onto the stack + __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize)); + __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers + + __ mov(r1, Operand(ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, isolate))); + __ StoreP(fp, MemOperand(r1)); + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; + + // The bailout id is passed using r10 + __ LoadRR(r4, r10); + + // Cleanse the Return address for 31-bit + __ CleanseP(r14); + + // Get the address of the location in the code object (r5)(return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register r6. + __ LoadRR(r5, r14); + + __ la(r6, MemOperand(sp, kSavedRegistersAreaSize)); + __ SubP(r6, fp, r6); + + // Allocate a new deoptimizer object. + // Pass six arguments in r2 to r7. + __ PrepareCallCFunction(6, r7); + __ LoadImmP(r2, Operand::Zero()); + Label context_check; + __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(r3, &context_check); + __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind))); + // r4: bailout id already loaded. + // r5: code address or 0 already loaded. + // r6: Fp-to-sp delta. + // Parm6: isolate is passed on the stack. + __ mov(r7, Operand(ExternalReference::isolate_address(isolate))); + __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + + // Preserve "deoptimizer" object in register r2 and get the input + // frame descriptor pointer to r3 (deoptimizer->input_); + __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + // __ mvc(MemOperand(r3, FrameDescription::registers_offset()), + // MemOperand(sp), kNumberOfRegisters * kPointerSize); + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + // TODO(john.yan): optimize the following code by using mvc instruction + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + __ LoadP(r4, MemOperand(sp, i * kPointerSize)); + __ StoreP(r4, MemOperand(r3, offset)); + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy double registers to + // double_registers_[DoubleRegister::kNumRegisters] + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize; + // TODO(joransiu): MVC opportunity + __ LoadDouble(d0, MemOperand(sp, src_offset)); + __ StoreDouble(d0, MemOperand(r3, dst_offset)); + } + + int float_regs_offset = FrameDescription::float_registers_offset(); + // Copy float registers to + // float_registers_[FloatRegister::kNumRegisters] + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + int dst_offset = code * kFloatSize + float_regs_offset; + int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize; + // TODO(joransiu): MVC opportunity + __ LoadFloat32(d0, MemOperand(sp, src_offset)); + __ StoreFloat32(d0, MemOperand(r3, dst_offset)); + } + + // Remove the saved registers from the stack. + __ la(sp, MemOperand(sp, kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register r4; that is + // the first stack slot not part of the input frame. + __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset())); + __ AddP(r4, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ b(&pop_loop_header, Label::kNear); + __ bind(&pop_loop); + __ pop(r6); + __ StoreP(r6, MemOperand(r5, 0)); + __ la(r5, MemOperand(r5, kPointerSize)); + __ bind(&pop_loop_header); + __ CmpP(r4, sp); + __ bne(&pop_loop); + + // Compute the output frame in the deoptimizer. + __ push(r2); // Preserve deoptimizer object across call. + // r2: deoptimizer object; r3: scratch. + __ PrepareCallCFunction(1, r3); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(r2); // Restore deoptimizer object (class Deoptimizer). + + __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: r6 = current "FrameDescription** output_", + // r3 = one past the last FrameDescription**. + __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset())); + __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_. + __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2)); + __ AddP(r3, r6, r3); + __ b(&outer_loop_header, Label::kNear); + + __ bind(&outer_push_loop); + // Inner loop state: r4 = current FrameDescription*, r5 = loop index. + __ LoadP(r4, MemOperand(r6, 0)); // output_[ix] + __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); + __ b(&inner_loop_header, Label::kNear); + + __ bind(&inner_push_loop); + __ SubP(r5, Operand(sizeof(intptr_t))); + __ AddP(r8, r4, r5); + __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset())); + __ push(r8); + + __ bind(&inner_loop_header); + __ CmpP(r5, Operand::Zero()); + __ bne(&inner_push_loop); // test for gt? + + __ AddP(r6, r6, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ CmpP(r6, r3); + __ blt(&outer_push_loop); + + __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister dreg = DoubleRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ ld(dreg, MemOperand(r3, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset())); + __ push(r8); + __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset())); + __ push(r8); + + // Restore the registers from the last output frame. + __ LoadRR(r1, r4); + for (int i = kNumberOfRegisters - 1; i > 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs & (1 << i)) != 0) { + __ LoadP(ToRegister(i), MemOperand(r1, offset)); + } + } + + __ pop(ip); // get continuation, leave pc on stack + __ pop(r14); + __ Jump(ip); + __ stop("Unreachable."); +} + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No out-of-line constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc new file mode 100644 index 0000000000..7654dc965f --- /dev/null +++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc @@ -0,0 +1,253 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_X64 + +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +#define __ masm-> + +void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, + Isolate* isolate, + DeoptimizeKind deopt_kind) { + NoRootArrayScope no_root_array(masm); + + // Save all general purpose registers before messing with them. + const int kNumberOfRegisters = Register::kNumRegisters; + + const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters; + __ AllocateStackSpace(kDoubleRegsSize); + + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + XMMRegister xmm_reg = XMMRegister::from_code(code); + int offset = code * kDoubleSize; + __ Movsd(Operand(rsp, offset), xmm_reg); + } + + const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters; + __ AllocateStackSpace(kFloatRegsSize); + + for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { + int code = config->GetAllocatableFloatCode(i); + XMMRegister xmm_reg = XMMRegister::from_code(code); + int offset = code * kFloatSize; + __ Movss(Operand(rsp, offset), xmm_reg); + } + + // We push all registers onto the stack, even though we do not need + // to restore all later. + for (int i = 0; i < kNumberOfRegisters; i++) { + Register r = Register::from_code(i); + __ pushq(r); + } + + const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize + + kDoubleRegsSize + kFloatRegsSize; + + __ Store( + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate), + rbp); + + // We use this to keep the value of the fifth argument temporarily. + // Unfortunately we can't store it directly in r8 (used for passing + // this on linux), since it is another parameter passing register on windows. + Register arg5 = r11; + + // The bailout id is passed using r13 on the stack. + __ movq(arg_reg_3, r13); + + // Get the address of the location in the code object + // and compute the fp-to-sp delta in register arg5. + __ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize)); + __ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize)); + + __ subq(arg5, rbp); + __ negq(arg5); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(6); + __ movq(rax, Immediate(0)); + Label context_check; + __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(rdi, &context_check); + __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ movq(arg_reg_1, rax); + __ Set(arg_reg_2, static_cast<int>(deopt_kind)); + // Args 3 and 4 are already in the right registers. + + // On windows put the arguments on the stack (PrepareCallCFunction + // has created space for this). On linux pass the arguments in r8 and r9. +#ifdef _WIN64 + __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5); + __ LoadAddress(arg5, ExternalReference::isolate_address(isolate)); + __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5); +#else + __ movq(r8, arg5); + __ LoadAddress(r9, ExternalReference::isolate_address(isolate)); +#endif + + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); + } + // Preserve deoptimizer object in register rax and get the input + // frame descriptor pointer. + __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); + + // Fill in the input registers. + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + __ PopQuad(Operand(rbx, offset)); + } + + // Fill in the float input registers. + int float_regs_offset = FrameDescription::float_registers_offset(); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + int src_offset = i * kFloatSize; + int dst_offset = i * kFloatSize + float_regs_offset; + __ movl(rcx, Operand(rsp, src_offset)); + __ movl(Operand(rbx, dst_offset), rcx); + } + __ addq(rsp, Immediate(kFloatRegsSize)); + + // Fill in the double input registers. + int double_regs_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + int dst_offset = i * kDoubleSize + double_regs_offset; + __ popq(Operand(rbx, dst_offset)); + } + + // Remove the return address from the stack. + __ addq(rsp, Immediate(kPCOnStackSize)); + + // Compute a pointer to the unwinding limit in register rcx; that is + // the first stack slot not part of the input frame. + __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); + __ addq(rcx, rsp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset())); + Label pop_loop_header; + __ jmp(&pop_loop_header); + Label pop_loop; + __ bind(&pop_loop); + __ Pop(Operand(rdx, 0)); + __ addq(rdx, Immediate(sizeof(intptr_t))); + __ bind(&pop_loop_header); + __ cmpq(rcx, rsp); + __ j(not_equal, &pop_loop); + + // Compute the output frame in the deoptimizer. + __ pushq(rax); + __ PrepareCallCFunction(2); + __ movq(arg_reg_1, rax); + __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate)); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 2); + } + __ popq(rax); + + __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: rax = current FrameDescription**, rdx = one past the + // last FrameDescription**. + __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); + __ movq(rax, Operand(rax, Deoptimizer::output_offset())); + __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0)); + __ jmp(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: rbx = current FrameDescription*, rcx = loop index. + __ movq(rbx, Operand(rax, 0)); + __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); + __ bind(&inner_push_loop); + __ subq(rcx, Immediate(sizeof(intptr_t))); + __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); + __ bind(&inner_loop_header); + __ testq(rcx, rcx); + __ j(not_zero, &inner_push_loop); + __ addq(rax, Immediate(kSystemPointerSize)); + __ bind(&outer_loop_header); + __ cmpq(rax, rdx); + __ j(below, &outer_push_loop); + + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + XMMRegister xmm_reg = XMMRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ Movsd(xmm_reg, Operand(rbx, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ PushQuad(Operand(rbx, FrameDescription::pc_offset())); + __ PushQuad(Operand(rbx, FrameDescription::continuation_offset())); + + // Push the registers from the last output frame. + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + __ PushQuad(Operand(rbx, offset)); + } + + // Restore the registers from the stack. + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + Register r = Register::from_code(i); + // Do not restore rsp, simply pop the value into the next register + // and overwrite this afterwards. + if (r == rsp) { + DCHECK_GT(i, 0); + r = Register::from_code(i - 1); + } + __ popq(r); + } + + // Return to the continuation point. + __ ret(0); +} + +bool Deoptimizer::PadTopOfStackRegister() { return false; } + +void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { + if (kPCOnStackSize == 2 * kSystemPointerSize) { + // Zero out the high-32 bit of PC for x32 port. + SetFrameSlot(offset + kSystemPointerSize, 0); + } + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { + if (kFPOnStackSize == 2 * kSystemPointerSize) { + // Zero out the high-32 bit of FP for x32 port. + SetFrameSlot(offset + kSystemPointerSize, 0); + } + SetFrameSlot(offset, value); +} + +void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { + // No embedded constant pool support. + UNREACHABLE(); +} + +#undef __ + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_X64 |