aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h')
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h57
1 files changed, 40 insertions, 17 deletions
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 0fe0237653..b1d71dce2f 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -150,6 +150,26 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
#endif
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset, 1);
+#if V8_OS_WIN
+ if (bytes > kStackPageSize) {
+ // Generate OOL code (at the end of the function, where the current
+ // assembler is pointing) to do the explicit stack limit check (see
+ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
+ // visual-studio-6.0/aa227153(v=vs.60)).
+ // At the function start, emit a jump to that OOL code (from {offset} to
+ // {pc_offset()}).
+ int ool_offset = pc_offset() - offset;
+ patching_assembler.b(ool_offset >> kInstrSizeLog2);
+
+ // Now generate the OOL code.
+ Claim(bytes, 1);
+ // Jump back to the start of the function (from {pc_offset()} to {offset +
+ // kInstrSize}).
+ int func_start_offset = offset + kInstrSize - pc_offset();
+ b(func_start_offset >> kInstrSizeLog2);
+ return;
+ }
+#endif
patching_assembler.PatchSubSp(bytes);
}
@@ -382,11 +402,23 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
Register rhs) { \
instruction(dst.W(), lhs.W(), rhs.W()); \
}
+#define I32_BINOP_I(name, instruction) \
+ I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst.W(), lhs.W(), Immediate(imm)); \
+ }
#define I64_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
instruction(dst.gp().X(), lhs.gp().X(), rhs.gp().X()); \
}
+#define I64_BINOP_I(name, instruction) \
+ I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t imm) { \
+ instruction(dst.gp().X(), lhs.gp().X(), imm); \
+ }
#define FP32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
@@ -439,21 +471,21 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
instruction(dst.gp().X(), src.gp().X(), amount); \
}
-I32_BINOP(i32_add, Add)
+I32_BINOP_I(i32_add, Add)
I32_BINOP(i32_sub, Sub)
I32_BINOP(i32_mul, Mul)
-I32_BINOP(i32_and, And)
-I32_BINOP(i32_or, Orr)
-I32_BINOP(i32_xor, Eor)
+I32_BINOP_I(i32_and, And)
+I32_BINOP_I(i32_or, Orr)
+I32_BINOP_I(i32_xor, Eor)
I32_SHIFTOP(i32_shl, Lsl)
I32_SHIFTOP(i32_sar, Asr)
I32_SHIFTOP_I(i32_shr, Lsr)
-I64_BINOP(i64_add, Add)
+I64_BINOP_I(i64_add, Add)
I64_BINOP(i64_sub, Sub)
I64_BINOP(i64_mul, Mul)
-I64_BINOP(i64_and, And)
-I64_BINOP(i64_or, Orr)
-I64_BINOP(i64_xor, Eor)
+I64_BINOP_I(i64_and, And)
+I64_BINOP_I(i64_or, Orr)
+I64_BINOP_I(i64_xor, Eor)
I64_SHIFTOP(i64_shl, Lsl)
I64_SHIFTOP(i64_sar, Asr)
I64_SHIFTOP_I(i64_shr, Lsr)
@@ -580,15 +612,6 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w);
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
-}
-
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- Add(dst.W(), lhs.W(), Immediate(imm));
-}
-
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,