diff options
Diffstat (limited to 'deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h')
-rw-r--r-- | deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h | 566 |
1 files changed, 436 insertions, 130 deletions
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h index c1f316072d..f071a003ea 100644 --- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -14,10 +14,14 @@ namespace v8 { namespace internal { namespace wasm { +#define REQUIRE_CPU_FEATURE(name) \ + if (!CpuFeatures::IsSupported(name)) return bailout("no " #name); \ + CpuFeatureScope feature(this, name); + namespace liftoff { -// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot -// is located at rbp-24. +// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack +// slot is located at rbp-24. constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; @@ -27,17 +31,13 @@ inline Operand GetStackSlot(uint32_t index) { return Operand(rbp, -kFirstStackSlotOffset - offset); } -inline Operand GetHalfStackSlot(uint32_t half_index) { - int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2); - return Operand(rbp, -kFirstStackSlotOffset - offset); -} - // TODO(clemensh): Make this a constexpr variable once Operand is constexpr. -inline Operand GetContextOperand() { return Operand(rbp, -16); } +inline Operand GetInstanceOperand() { return Operand(rbp, -16); } // Use this register to store the address of the last argument pushed on the -// stack for a call to C. -static constexpr Register kCCallLastArgAddrReg = rax; +// stack for a call to C. This register must be callee saved according to the c +// calling convention. +static constexpr Register kCCallLastArgAddrReg = rbx; inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, uint32_t offset_imm, LiftoffRegList pinned) { @@ -49,6 +49,45 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, return Operand(addr, offset, times_1, offset_imm); } +inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, + ValueType type) { + switch (type) { + case kWasmI32: + assm->movl(dst.gp(), src); + break; + case kWasmI64: + assm->movq(dst.gp(), src); + break; + case kWasmF32: + assm->Movss(dst.fp(), src); + break; + case kWasmF64: + assm->Movsd(dst.fp(), src); + break; + default: + UNREACHABLE(); + } +} + +inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { + switch (type) { + case kWasmI32: + case kWasmI64: + assm->pushq(reg.gp()); + break; + case kWasmF32: + assm->subp(rsp, Immediate(kPointerSize)); + assm->Movss(Operand(rsp, 0), reg.fp()); + break; + case kWasmF64: + assm->subp(rsp, Immediate(kPointerSize)); + assm->Movsd(Operand(rsp, 0), reg.fp()); + break; + default: + UNREACHABLE(); + } +} + } // namespace liftoff uint32_t LiftoffAssembler::PrepareStackFrame() { @@ -96,10 +135,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, } } -void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, - int size) { +void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { DCHECK_LE(offset, kMaxInt); - movp(dst, liftoff::GetContextOperand()); + movp(dst, liftoff::GetInstanceOperand()); DCHECK(size == 4 || size == 8); if (size == 4) { movl(dst, Operand(dst, offset)); @@ -108,18 +147,21 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset, } } -void LiftoffAssembler::SpillContext(Register context) { - movp(liftoff::GetContextOperand(), context); +void LiftoffAssembler::SpillInstance(Register instance) { + movp(liftoff::GetInstanceOperand(), instance); } -void LiftoffAssembler::FillContextInto(Register dst) { - movp(dst, liftoff::GetContextOperand()); +void LiftoffAssembler::FillInstanceInto(Register dst) { + movp(dst, liftoff::GetInstanceOperand()); } void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, uint32_t* protected_load_pc) { + if (emit_debug_code() && offset_reg != no_reg) { + AssertZeroExtended(offset_reg); + } Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned); if (protected_load_pc) *protected_load_pc = pc_offset(); @@ -169,6 +211,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t* protected_store_pc) { + if (emit_debug_code() && offset_reg != no_reg) { + AssertZeroExtended(offset_reg); + } Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned); if (protected_store_pc) *protected_store_pc = pc_offset(); @@ -203,22 +248,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { Operand src(rbp, kPointerSize * (caller_slot_idx + 1)); - switch (type) { - case kWasmI32: - movl(dst.gp(), src); - break; - case kWasmI64: - movq(dst.gp(), src); - break; - case kWasmF32: - Movss(dst.fp(), src); - break; - case kWasmF64: - Movsd(dst.fp(), src); - break; - default: - UNREACHABLE(); - } + liftoff::Load(this, dst, src, type); } void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, @@ -294,19 +324,21 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { movl(dst, Immediate(value.to_i32())); break; case kWasmI64: { - // We could use movq, but this would require a temporary register. For - // simplicity (and to avoid potentially having to spill another register), - // we use two movl instructions. - int32_t low_word = static_cast<int32_t>(value.to_i64()); - int32_t high_word = static_cast<int32_t>(value.to_i64() >> 32); - movl(dst, Immediate(low_word)); - movl(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word)); + if (is_int32(value.to_i64())) { + // Sign extend low word. + movq(dst, Immediate(static_cast<int32_t>(value.to_i64()))); + } else if (is_uint32(value.to_i64())) { + // Zero extend low word. + movl(kScratchRegister, Immediate(static_cast<int32_t>(value.to_i64()))); + movq(dst, kScratchRegister); + } else { + movq(kScratchRegister, value.to_i64()); + movq(dst, kScratchRegister); + } break; } - case kWasmF32: - movl(dst, Immediate(value.to_f32_boxed().get_bits())); - break; default: + // We do not track f32 and f64 constants, hence they are unreachable. UNREACHABLE(); } } @@ -354,54 +386,69 @@ void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) { } } -#define COMMUTATIVE_I32_BINOP(name, instruction) \ - void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ - Register rhs) { \ - if (dst == rhs) { \ - instruction##l(dst, lhs); \ - } else { \ - if (dst != lhs) movl(dst, lhs); \ - instruction##l(dst, rhs); \ - } \ +namespace liftoff { +template <void (Assembler::*op)(Register, Register), + void (Assembler::*mov)(Register, Register)> +void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs, + Register rhs) { + if (dst == rhs) { + (assm->*op)(dst, lhs); + } else { + if (dst != lhs) (assm->*mov)(dst, lhs); + (assm->*op)(dst, rhs); } +} +} // namespace liftoff + +void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::imull, &Assembler::movl>(this, dst, + lhs, rhs); +} -// clang-format off -COMMUTATIVE_I32_BINOP(mul, imul) -COMMUTATIVE_I32_BINOP(and, and) -COMMUTATIVE_I32_BINOP(or, or) -COMMUTATIVE_I32_BINOP(xor, xor) -// clang-format on +void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::andl, &Assembler::movl>(this, dst, + lhs, rhs); +} -#undef COMMUTATIVE_I32_BINOP +void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::orl, &Assembler::movl>(this, dst, + lhs, rhs); +} + +void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::xorl, &Assembler::movl>(this, dst, + lhs, rhs); +} namespace liftoff { +template <ValueType type> inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst, - Register lhs, Register rhs, + Register src, Register amount, void (Assembler::*emit_shift)(Register), LiftoffRegList pinned) { // If dst is rcx, compute into the scratch register first, then move to rcx. if (dst == rcx) { - assm->movl(kScratchRegister, lhs); - if (rhs != rcx) assm->movl(rcx, rhs); + assm->Move(kScratchRegister, src, type); + if (amount != rcx) assm->Move(rcx, amount, type); (assm->*emit_shift)(kScratchRegister); - assm->movl(rcx, kScratchRegister); + assm->Move(rcx, kScratchRegister, type); return; } - // Move rhs into rcx. If rcx is in use, move its content into the scratch - // register. If lhs is rcx, lhs is now the scratch register. + // Move amount into rcx. If rcx is in use, move its content into the scratch + // register. If src is rcx, src is now the scratch register. bool use_scratch = false; - if (rhs != rcx) { - use_scratch = lhs == rcx || + if (amount != rcx) { + use_scratch = src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx)) || pinned.has(LiftoffRegister(rcx)); if (use_scratch) assm->movq(kScratchRegister, rcx); - if (lhs == rcx) lhs = kScratchRegister; - assm->movl(rcx, rhs); + if (src == rcx) src = kScratchRegister; + assm->Move(rcx, amount, type); } // Do the actual shift. - if (dst != lhs) assm->movl(dst, lhs); + if (dst != src) assm->Move(dst, src, type); (assm->*emit_shift)(dst); // Restore rcx if needed. @@ -409,19 +456,22 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst, } } // namespace liftoff -void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs, +void LiftoffAssembler::emit_i32_shl(Register dst, Register src, Register amount, LiftoffRegList pinned) { - liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl, pinned); + liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount, + &Assembler::shll_cl, pinned); } -void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs, +void LiftoffAssembler::emit_i32_sar(Register dst, Register src, Register amount, LiftoffRegList pinned) { - liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl, pinned); + liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount, + &Assembler::sarl_cl, pinned); } -void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs, +void LiftoffAssembler::emit_i32_shr(Register dst, Register src, Register amount, LiftoffRegList pinned) { - liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl, pinned); + liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount, + &Assembler::shrl_cl, pinned); } bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) { @@ -465,15 +515,62 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { return true; } -void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs, - Register rhs) { - if (lhs != dst) { - leap(dst, Operand(lhs, rhs, times_1, 0)); +void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + if (lhs.gp() != dst.gp()) { + leap(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0)); + } else { + addp(dst.gp(), rhs.gp()); + } +} + +void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + if (dst.gp() == rhs.gp()) { + negq(dst.gp()); + addq(dst.gp(), lhs.gp()); } else { - addp(dst, rhs); + if (dst.gp() != lhs.gp()) movq(dst.gp(), lhs.gp()); + subq(dst.gp(), rhs.gp()); } } +void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::andq, &Assembler::movq>( + this, dst.gp(), lhs.gp(), rhs.gp()); +} + +void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::orq, &Assembler::movq>( + this, dst.gp(), lhs.gp(), rhs.gp()); +} + +void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + liftoff::EmitCommutativeBinOp<&Assembler::xorq, &Assembler::movq>( + this, dst.gp(), lhs.gp(), rhs.gp()); +} + +void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, + Register amount, LiftoffRegList pinned) { + liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount, + &Assembler::shlq_cl, pinned); +} + +void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, + Register amount, LiftoffRegList pinned) { + liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount, + &Assembler::sarq_cl, pinned); +} + +void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, + Register amount, LiftoffRegList pinned) { + liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount, + &Assembler::shrq_cl, pinned); +} + void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (CpuFeatures::IsSupported(AVX)) { @@ -515,6 +612,32 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs, } } +void LiftoffAssembler::emit_f32_div(DoubleRegister dst, DoubleRegister lhs, + DoubleRegister rhs) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vdivss(dst, lhs, rhs); + } else if (dst == rhs) { + movss(kScratchDoubleReg, rhs); + movss(dst, lhs); + divss(dst, kScratchDoubleReg); + } else { + if (dst != lhs) movss(dst, lhs); + divss(dst, rhs); + } +} + +void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { + static constexpr uint32_t kSignBit = uint32_t{1} << 31; + if (dst == src) { + TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); + Andps(dst, kScratchDoubleReg); + } else { + TurboAssembler::Move(dst, kSignBit - 1); + Andps(dst, src); + } +} + void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { @@ -526,6 +649,31 @@ void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { } } +void LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundss(dst, src, kRoundUp); +} + +void LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundss(dst, src, kRoundDown); +} + +void LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundss(dst, src, kRoundToZero); +} + +void LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst, + DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundss(dst, src, kRoundToNearest); +} + +void LiftoffAssembler::emit_f32_sqrt(DoubleRegister dst, DoubleRegister src) { + Sqrtss(dst, src); +} + void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (CpuFeatures::IsSupported(AVX)) { @@ -567,6 +715,32 @@ void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs, } } +void LiftoffAssembler::emit_f64_div(DoubleRegister dst, DoubleRegister lhs, + DoubleRegister rhs) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vdivsd(dst, lhs, rhs); + } else if (dst == rhs) { + movsd(kScratchDoubleReg, rhs); + movsd(dst, lhs); + divsd(dst, kScratchDoubleReg); + } else { + if (dst != lhs) movsd(dst, lhs); + divsd(dst, rhs); + } +} + +void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { + static constexpr uint64_t kSignBit = uint64_t{1} << 63; + if (dst == src) { + TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); + Andpd(dst, kScratchDoubleReg); + } else { + TurboAssembler::Move(dst, kSignBit - 1); + Andpd(dst, src); + } +} + void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { @@ -578,8 +752,98 @@ void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { } } +void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundsd(dst, src, kRoundUp); +} + +void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundsd(dst, src, kRoundDown); +} + +void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundsd(dst, src, kRoundToZero); +} + +void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst, + DoubleRegister src) { + REQUIRE_CPU_FEATURE(SSE4_1); + Roundsd(dst, src, kRoundToNearest); +} + +void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) { + Sqrtsd(dst, src); +} + +bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, + LiftoffRegister dst, + LiftoffRegister src) { + switch (opcode) { + case kExprI32ConvertI64: + movl(dst.gp(), src.gp()); + return true; + case kExprI32ReinterpretF32: + Movd(dst.gp(), src.fp()); + return true; + case kExprI64SConvertI32: + movsxlq(dst.gp(), src.gp()); + return true; + case kExprI64UConvertI32: + AssertZeroExtended(src.gp()); + if (dst.gp() != src.gp()) movl(dst.gp(), src.gp()); + return true; + case kExprI64ReinterpretF64: + Movq(dst.gp(), src.fp()); + return true; + case kExprF32SConvertI32: + Cvtlsi2ss(dst.fp(), src.gp()); + return true; + case kExprF32UConvertI32: + movl(kScratchRegister, src.gp()); + Cvtqsi2ss(dst.fp(), kScratchRegister); + return true; + case kExprF32SConvertI64: + Cvtqsi2ss(dst.fp(), src.gp()); + return true; + case kExprF32UConvertI64: + Cvtqui2ss(dst.fp(), src.gp(), kScratchRegister); + return true; + case kExprF32ConvertF64: + Cvtsd2ss(dst.fp(), src.fp()); + return true; + case kExprF32ReinterpretI32: + Movd(dst.fp(), src.gp()); + return true; + case kExprF64SConvertI32: + Cvtlsi2sd(dst.fp(), src.gp()); + return true; + case kExprF64UConvertI32: + movl(kScratchRegister, src.gp()); + Cvtqsi2sd(dst.fp(), kScratchRegister); + return true; + case kExprF64SConvertI64: + Cvtqsi2sd(dst.fp(), src.gp()); + return true; + case kExprF64UConvertI64: + Cvtqui2sd(dst.fp(), src.gp(), kScratchRegister); + return true; + case kExprF64ConvertF32: + Cvtss2sd(dst.fp(), src.fp()); + return true; + case kExprF64ReinterpretI64: + Movq(dst.fp(), src.gp()); + return true; + default: + UNREACHABLE(); + } +} + void LiftoffAssembler::emit_jump(Label* label) { jmp(label); } +void LiftoffAssembler::emit_jump(Register target) { jmp(target); } + void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, ValueType type, Register lhs, Register rhs) { @@ -602,45 +866,76 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, j(cond, label); } +void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { + testl(src, src); + setcc(equal, dst); + movzxbl(dst, dst); +} + void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - if (rhs != no_reg) { - cmpl(lhs, rhs); - } else { - testl(lhs, lhs); - } + cmpl(lhs, rhs); + setcc(cond, dst); + movzxbl(dst, dst); +} + +void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { + testq(src.gp(), src.gp()); + setcc(equal, dst); + movzxbl(dst, dst); +} +void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + cmpq(lhs.gp(), rhs.gp()); setcc(cond, dst); movzxbl(dst, dst); } -void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, - DoubleRegister lhs, - DoubleRegister rhs) { +namespace liftoff { +template <void (TurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)> +void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst, + DoubleRegister lhs, DoubleRegister rhs) { Label cont; Label not_nan; - Ucomiss(lhs, rhs); + (assm->*cmp_op)(lhs, rhs); // IF PF is one, one of the operands was Nan. This needs special handling. - j(parity_odd, ¬_nan, Label::kNear); + assm->j(parity_odd, ¬_nan, Label::kNear); // Return 1 for f32.ne, 0 for all other cases. if (cond == not_equal) { - movl(dst, Immediate(1)); + assm->movl(dst, Immediate(1)); } else { - xorl(dst, dst); + assm->xorl(dst, dst); } - jmp(&cont, Label::kNear); - bind(¬_nan); + assm->jmp(&cont, Label::kNear); + assm->bind(¬_nan); - setcc(cond, dst); - movzxbl(dst, dst); - bind(&cont); + assm->setcc(cond, dst); + assm->movzxbl(dst, dst); + assm->bind(&cont); +} +} // namespace liftoff + +void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, + DoubleRegister lhs, + DoubleRegister rhs) { + liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs, + rhs); +} + +void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, + DoubleRegister lhs, + DoubleRegister rhs) { + liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs, + rhs); } void LiftoffAssembler::StackCheck(Label* ool_code) { - Register limit = GetUnusedRegister(kGpReg).gp(); - LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate())); - cmpp(rsp, Operand(limit, 0)); + Operand stack_limit = ExternalOperand( + ExternalReference::address_of_stack_limit(isolate()), kScratchRegister); + cmpp(rsp, stack_limit); j(below_equal, ool_code); } @@ -671,22 +966,7 @@ void LiftoffAssembler::PushCallerFrameSlot(const VarState& src, void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg, ValueType type) { - switch (type) { - case kWasmI32: - case kWasmI64: - pushq(reg.gp()); - break; - case kWasmF32: - subp(rsp, Immediate(kPointerSize)); - Movss(Operand(rsp, 0), reg.fp()); - break; - case kWasmF64: - subp(rsp, Immediate(kPointerSize)); - Movsd(Operand(rsp, 0), reg.fp()); - break; - default: - UNREACHABLE(); - } + liftoff::push(this, reg, type); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { @@ -734,37 +1014,61 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ret(static_cast<int>(num_stack_slots * kPointerSize)); } -void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) { - for (size_t param = 0; param < num_params; ++param) { - pushq(args[param]); +void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, + const LiftoffRegister* args, + ValueType out_argument_type) { + for (ValueType param_type : sig->parameters()) { + liftoff::push(this, *args++, param_type); + } + if (out_argument_type != kWasmStmt) { + subq(rsp, Immediate(kPointerSize)); } - movq(liftoff::kCCallLastArgAddrReg, rsp); - PrepareCallCFunction(num_params); + // Save the original sp (before the first push), such that we can later + // compute pointers to the pushed values. Do this only *after* pushing the + // values, because {kCCallLastArgAddrReg} might collide with an arg register. + int num_c_call_arguments = static_cast<int>(sig->parameter_count()) + + (out_argument_type != kWasmStmt); + int pushed_bytes = kPointerSize * num_c_call_arguments; + leaq(liftoff::kCCallLastArgAddrReg, Operand(rsp, pushed_bytes)); + PrepareCallCFunction(num_c_call_arguments); } -void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx, - uint32_t num_params) { - int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx); - leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, offset)); +void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset, + ValueType type) { + // Check that we don't accidentally override kCCallLastArgAddrReg. + DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst); + leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, -param_byte_offset)); } -void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx, - uint32_t param_idx, - uint32_t num_params) { +void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx, + int param_byte_offset, + ValueType type) { // On x64, all C call arguments fit in registers. UNREACHABLE(); } +void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type, + int param_byte_offset) { + // Check that we don't accidentally override kCCallLastArgAddrReg. + DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst); + Operand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset); + liftoff::Load(this, dst, src, type); +} + void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) { CallCFunction(ext_ref, static_cast<int>(num_params)); } +void LiftoffAssembler::FinishCCall() { + movp(rsp, liftoff::kCCallLastArgAddrReg); +} + void LiftoffAssembler::CallNativeWasmCode(Address addr) { near_call(addr, RelocInfo::WASM_CALL); } void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { - // Set context to zero. + // Set instance to zero. xorp(rsi, rsi); CallRuntimeDelayed(zone, fid); } @@ -788,6 +1092,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { addp(rsp, Immediate(size)); } +#undef REQUIRE_CPU_FEATURE + } // namespace wasm } // namespace internal } // namespace v8 |