summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/baseline')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h13
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h11
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h27
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h11
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc6
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h20
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc213
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h5
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h263
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h221
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h17
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h17
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h74
15 files changed, 427 insertions, 481 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index ca55fe5d52..725bed590f 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
BAILOUT("Store");
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessLoad");
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessStore");
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -249,7 +238,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+ // This is a nop on arm.
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a8928210bb..cdc2dc2a45 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -281,17 +281,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index ae8c9e012f..1fef62542a 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -227,9 +227,10 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
- DCHECK(is_uint31(offset_imm));
+ // Note: We shouldn't have memories larger than 2GiB on 32-bit, but if we
+ // did, we encode {offset_im} as signed, and it will simply wrap around.
Operand src_op = offset_reg == no_reg
- ? Operand(src_addr, offset_imm)
+ ? Operand(src_addr, bit_cast<int32_t>(offset_imm))
: Operand(src_addr, offset_reg, times_1, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
@@ -278,10 +279,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load: {
// Compute the operand for the load of the upper half.
- DCHECK(is_uint31(offset_imm + 4));
Operand upper_src_op =
offset_reg == no_reg
- ? Operand(src_addr, offset_imm + 4)
+ ? Operand(src_addr, bit_cast<int32_t>(offset_imm + 4))
: Operand(src_addr, offset_reg, times_1, offset_imm + 4);
// The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault.
@@ -308,9 +308,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
- DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg
- ? Operand(dst_addr, offset_imm)
+ ? Operand(dst_addr, bit_cast<int32_t>(offset_imm))
: Operand(dst_addr, offset_reg, times_1, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
@@ -342,10 +341,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI64Store: {
// Compute the operand for the store of the upper half.
- DCHECK(is_uint31(offset_imm + 4));
Operand upper_dst_op =
offset_reg == no_reg
- ? Operand(dst_addr, offset_imm + 4)
+ ? Operand(dst_addr, bit_cast<int32_t>(offset_imm + 4))
: Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
// The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault.
@@ -364,17 +362,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -893,7 +880,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+ // This is a nop on ia32.
}
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index b7fdf5fe60..c8d8dab1d9 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -5,18 +5,9 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
+#include "src/assembler-arch.h"
#include "src/reglist.h"
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64.h"
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 0e913c19dc..1d604925cc 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -349,7 +349,7 @@ constexpr AssemblerOptions DefaultLiftoffOptions() {
LiftoffAssembler::LiftoffAssembler()
: TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
CodeObjectRequired::kNo) {
- set_trap_on_abort(true); // Avoid calls to Abort.
+ set_abort_hard(true); // Avoid calls to Abort.
}
LiftoffAssembler::~LiftoffAssembler() {
@@ -446,7 +446,7 @@ void LiftoffAssembler::SpillAllRegisters() {
cache_state_.reset_used_registers();
}
-void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
+void LiftoffAssembler::PrepareCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
LiftoffRegister* target_instance) {
@@ -555,7 +555,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
-void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
+void LiftoffAssembler::FinishCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
const size_t return_count = sig->return_count();
if (return_count != 0) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 822c620b82..cfc412d671 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -314,11 +314,11 @@ class LiftoffAssembler : public TurboAssembler {
// Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
- void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
+ void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
LiftoffRegister* target_instance = nullptr);
// Process return values of the call.
- void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ void FinishCall(FunctionSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
@@ -362,10 +362,6 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr,
bool is_store_mem = false);
- inline void ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned);
- inline void ChangeEndiannessStore(LiftoffRegister src, StoreType type,
- LiftoffRegList pinned);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
@@ -448,6 +444,14 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_add(dst, lhs, rhs);
}
}
+ inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
+ if (kPointerSize == 8) {
+ emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
+ LiftoffRegister(rhs));
+ } else {
+ emit_i32_sub(dst, lhs, rhs);
+ }
+ }
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
@@ -532,13 +536,13 @@ class LiftoffAssembler : public TurboAssembler {
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
- inline void CallC(wasm::FunctionSig* sig, const LiftoffRegister* args,
+ inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueType out_argument_type,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
- inline void CallIndirect(wasm::FunctionSig* sig,
+ inline void CallIndirect(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 1130cf0cdd..dbd106d481 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -16,6 +16,7 @@
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -62,9 +63,11 @@ constexpr LoadType::LoadTypeValue kPointerLoadType =
// thus store the label on the heap and keep a unique_ptr.
class MovableLabel {
public:
- Label* get() { return label_.get(); }
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
MovableLabel() : label_(new Label()) {}
+ Label* get() { return label_.get(); }
+
private:
std::unique_ptr<Label> label_;
};
@@ -72,6 +75,8 @@ class MovableLabel {
// On all other platforms, just store the Label directly.
class MovableLabel {
public:
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
+
Label* get() { return &label_; }
private:
@@ -93,8 +98,7 @@ class LiftoffCompiler {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
// TODO(clemensh): Make this a template parameter.
- static constexpr wasm::Decoder::ValidateFlag validate =
- wasm::Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
@@ -111,7 +115,7 @@ class LiftoffCompiler {
MovableLabel label;
};
- using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
struct OutOfLineCode {
MovableLabel label;
@@ -137,11 +141,6 @@ class LiftoffCompiler {
: descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
- min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
- max_size_(uint64_t{env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages} *
- wasm::kWasmPageSize),
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
@@ -165,20 +164,20 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
- void unsupported(Decoder* decoder, const char* reason) {
+ void unsupported(FullDecoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
BindUnboundLabels(decoder);
}
- bool DidAssemblerBailout(Decoder* decoder) {
+ bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
unsupported(decoder, __ bailout_reason());
return true;
}
- bool CheckSupportedType(Decoder* decoder,
+ bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
char buffer[128];
@@ -195,7 +194,7 @@ class LiftoffCompiler {
return safepoint_table_builder_.GetCodeOffset();
}
- void BindUnboundLabels(Decoder* decoder) {
+ void BindUnboundLabels(FullDecoder* decoder) {
#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
@@ -215,7 +214,7 @@ class LiftoffCompiler {
#endif
}
- void StartFunction(Decoder* decoder) {
+ void StartFunction(FullDecoder* decoder) {
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
@@ -306,7 +305,7 @@ class LiftoffCompiler {
__ bind(ool.continuation.get());
}
- void StartFunctionBody(Decoder* decoder, Control* block) {
+ void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
return;
@@ -422,7 +421,7 @@ class LiftoffCompiler {
}
}
- void FinishFunction(Decoder* decoder) {
+ void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
@@ -435,23 +434,23 @@ class LiftoffCompiler {
DidAssemblerBailout(decoder);
}
- void OnFirstError(Decoder* decoder) {
+ void OnFirstError(FullDecoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
asm_.AbortCompilation();
}
- void NextInstruction(Decoder* decoder, WasmOpcode opcode) {
+ void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
TraceCacheState(decoder);
SLOW_DCHECK(__ ValidateCacheState());
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
}
- void Block(Decoder* decoder, Control* block) {
+ void Block(FullDecoder* decoder, Control* block) {
block->label_state.stack_base = __ cache_state()->stack_height();
}
- void Loop(Decoder* decoder, Control* loop) {
+ void Loop(FullDecoder* decoder, Control* loop) {
loop->label_state.stack_base = __ cache_state()->stack_height();
// Before entering a loop, spill all locals to the stack, in order to free
@@ -471,9 +470,11 @@ class LiftoffCompiler {
StackCheck(decoder->position());
}
- void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
+ void Try(FullDecoder* decoder, Control* block) {
+ unsupported(decoder, "try");
+ }
- void If(Decoder* decoder, const Value& cond, Control* if_block) {
+ void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -493,7 +494,7 @@ class LiftoffCompiler {
if_block->else_state->state.Split(*__ cache_state());
}
- void FallThruTo(Decoder* decoder, Control* c) {
+ void FallThruTo(FullDecoder* decoder, Control* c) {
if (c->end_merge.reached) {
__ MergeFullStackWith(c->label_state);
} else if (c->is_onearmed_if()) {
@@ -506,7 +507,7 @@ class LiftoffCompiler {
TraceCacheState(decoder);
}
- void PopControl(Decoder* decoder, Control* c) {
+ void PopControl(FullDecoder* decoder, Control* c) {
if (!c->is_loop() && c->end_merge.reached) {
__ cache_state()->Steal(c->label_state);
}
@@ -515,7 +516,7 @@ class LiftoffCompiler {
}
}
- void EndControl(Decoder* decoder, Control* c) {}
+ void EndControl(FullDecoder* decoder, Control* c) {}
enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
@@ -588,7 +589,7 @@ class LiftoffCompiler {
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
: __ GetUnusedRegister(dst_rc);
- DCHECK_EQ(can_trap, trap_position > 0);
+ DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
WasmCode::kThrowWasmTrapFloatUnrepresentable)
@@ -614,7 +615,7 @@ class LiftoffCompiler {
__ PushRegister(dst_type, dst);
}
- void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
@@ -747,7 +748,7 @@ class LiftoffCompiler {
}
}
- void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
@@ -994,11 +995,11 @@ class LiftoffCompiler {
#undef CASE_CCALL_BINOP
}
- void I32Const(Decoder* decoder, Value* result, int32_t value) {
+ void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(kWasmI32, value);
}
- void I64Const(Decoder* decoder, Value* result, int64_t value) {
+ void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
// The {VarState} stores constant values as int32_t, thus we only store
// 64-bit constants in this field if it fits in an int32_t. Larger values
// cannot be used as immediate value anyway, so we can also just put them in
@@ -1013,30 +1014,30 @@ class LiftoffCompiler {
}
}
- void F32Const(Decoder* decoder, Value* result, float value) {
+ void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
- void F64Const(Decoder* decoder, Value* result, double value) {
+ void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
- void RefNull(Decoder* decoder, Value* result) {
+ void RefNull(FullDecoder* decoder, Value* result) {
unsupported(decoder, "ref_null");
}
- void Drop(Decoder* decoder, const Value& value) {
+ void Drop(FullDecoder* decoder, const Value& value) {
auto& slot = __ cache_state()->stack_state.back();
// If the dropped slot contains a register, decrement it's use count.
if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
__ cache_state()->stack_state.pop_back();
}
- void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
+ void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
if (implicit) {
DCHECK_EQ(1, decoder->control_depth());
Control* func_block = decoder->control_at(0);
@@ -1060,7 +1061,7 @@ class LiftoffCompiler {
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
- void GetLocal(Decoder* decoder, Value* result,
+ void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
DCHECK_EQ(slot.type(), imm.type);
@@ -1123,12 +1124,12 @@ class LiftoffCompiler {
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
- void SetLocal(Decoder* decoder, const Value& value,
+ void SetLocal(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
SetLocal(imm.index, false);
}
- void TeeLocal(Decoder* decoder, const Value& value, Value* result,
+ void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
SetLocal(imm.index, true);
}
@@ -1138,7 +1139,6 @@ class LiftoffCompiler {
uint32_t* offset) {
LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
if (global->mutability && global->imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerLoadType);
__ Load(addr, addr.gp(), no_reg, global->index * sizeof(Address),
kPointerLoadType, pinned);
@@ -1150,7 +1150,7 @@ class LiftoffCompiler {
return addr;
}
- void GetGlobal(Decoder* decoder, Value* result,
+ void GetGlobal(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
@@ -1165,7 +1165,7 @@ class LiftoffCompiler {
__ PushRegister(global->type, value);
}
- void SetGlobal(Decoder* decoder, const Value& value,
+ void SetGlobal(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
@@ -1178,14 +1178,14 @@ class LiftoffCompiler {
__ Store(addr.gp(), no_reg, offset, reg, type, pinned);
}
- void Unreachable(Decoder* decoder) {
+ void Unreachable(FullDecoder* decoder) {
Label* unreachable_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
__ emit_jump(unreachable_label);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
- void Select(Decoder* decoder, const Value& cond, const Value& fval,
+ void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
@@ -1219,11 +1219,9 @@ class LiftoffCompiler {
__ jmp(target->label.get());
}
- void Br(Decoder* decoder, Control* target) {
- Br(target);
- }
+ void Br(FullDecoder* decoder, Control* target) { Br(target); }
- void BrIf(Decoder* decoder, const Value& cond, Control* target) {
+ void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
@@ -1234,7 +1232,7 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
- void GenerateBrCase(Decoder* decoder, uint32_t br_depth,
+ void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
std::map<uint32_t, MovableLabel>& br_targets) {
MovableLabel& label = br_targets[br_depth];
if (label.get()->is_bound()) {
@@ -1247,7 +1245,7 @@ class LiftoffCompiler {
// Generate a branch table for input in [min, max).
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
- void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp,
+ void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
BranchTableIterator<validate>& table_iterator,
std::map<uint32_t, MovableLabel>& br_targets) {
@@ -1273,7 +1271,7 @@ class LiftoffCompiler {
br_targets);
}
- void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm,
+ void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
const Value& key) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
@@ -1298,7 +1296,7 @@ class LiftoffCompiler {
DCHECK(!table_iterator.has_next());
}
- void Else(Decoder* decoder, Control* if_block) {
+ void Else(FullDecoder* decoder, Control* if_block) {
if (if_block->reachable()) __ emit_jump(if_block->label.get());
__ bind(if_block->else_state->label.get());
__ cache_state()->Steal(if_block->else_state->state);
@@ -1318,17 +1316,17 @@ class LiftoffCompiler {
// Returns true if the memory access is statically known to be out of bounds
// (a jump to the trap was generated then); return false otherwise.
- bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
- Register index, LiftoffRegList pinned) {
- const bool statically_oob =
- access_size > max_size_ || offset > max_size_ - access_size;
+ bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
+ uint32_t offset, Register index, LiftoffRegList pinned) {
+ const bool statically_oob = access_size > env_->max_memory_size ||
+ offset > env_->max_memory_size - access_size;
if (!statically_oob &&
(FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
return false;
}
- // TODO(eholk): This adds protected instruction information for the jump
+ // TODO(wasm): This adds protected instruction information for the jump
// instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0.
Label* trap_label = AddOutOfLineTrap(
@@ -1347,7 +1345,7 @@ class LiftoffCompiler {
DCHECK(!env_->use_trap_handler);
DCHECK(!FLAG_wasm_no_bounds_checks);
- uint32_t end_offset = offset + access_size - 1;
+ uint64_t end_offset = uint64_t{offset} + access_size - 1u;
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
@@ -1355,19 +1353,30 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
- __ LoadConstant(end_offset_reg, WasmValue(end_offset));
- if (end_offset >= min_size_) {
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32,
- end_offset_reg.gp(), mem_size.gp());
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerLoadType);
+
+ if (kPointerSize == 8) {
+ __ LoadConstant(end_offset_reg, WasmValue(end_offset));
+ } else {
+ __ LoadConstant(end_offset_reg,
+ WasmValue(static_cast<uint32_t>(end_offset)));
+ }
+
+ if (end_offset >= env_->min_memory_size) {
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
+ LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
+ mem_size.gp());
}
// Just reuse the end_offset register for computing the effective size.
LiftoffRegister effective_size_reg = end_offset_reg;
- __ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
- end_offset_reg.gp());
+ __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
+ end_offset_reg.gp());
+
+ __ emit_i32_to_intptr(index, index);
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index,
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
+ LiftoffAssembler::kWasmIntPtr, index,
effective_size_reg.gp());
return false;
}
@@ -1385,27 +1394,27 @@ class LiftoffCompiler {
__ LoadConstant(address, WasmValue(offset));
__ emit_i32_add(address.gp(), address.gp(), index);
- // Get a register to hold the stack slot for wasm::MemoryTracingInfo.
+ // Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // Allocate stack slot for wasm::MemoryTracingInfo.
- __ AllocateStackSlot(info.gp(), sizeof(wasm::MemoryTracingInfo));
+ // Allocate stack slot for MemoryTracingInfo.
+ __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
- // Now store all information into the wasm::MemoryTracingInfo struct.
- __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, address),
- address, StoreType::kI32Store, pinned);
+ // Now store all information into the MemoryTracingInfo struct.
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
+ StoreType::kI32Store, pinned);
__ LoadConstant(address, WasmValue(is_store ? 1 : 0));
- __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, is_store),
- address, StoreType::kI32Store8, pinned);
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
+ StoreType::kI32Store8, pinned);
__ LoadConstant(address, WasmValue(static_cast<int>(rep)));
- __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep),
- address, StoreType::kI32Store8, pinned);
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
+ StoreType::kI32Store8, pinned);
source_position_table_builder_.AddPosition(__ pc_offset(),
SourcePosition(position), false);
Register args[] = {info.gp()};
GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
- __ DeallocateStackSlot(sizeof(wasm::MemoryTracingInfo));
+ __ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
@@ -1462,7 +1471,7 @@ class LiftoffCompiler {
return index;
}
- void LoadMem(Decoder* decoder, LoadType type,
+ void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
@@ -1495,7 +1504,7 @@ class LiftoffCompiler {
}
}
- void StoreMem(Decoder* decoder, StoreType type,
+ void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
@@ -1525,7 +1534,7 @@ class LiftoffCompiler {
}
}
- void CurrentMemoryPages(Decoder* decoder, Value* result) {
+ void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
LiftoffRegList pinned;
LiftoffRegister mem_size = pinned.set(__ GetUnusedRegister(kGpReg));
LiftoffRegister tmp_const =
@@ -1533,12 +1542,12 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
// TODO(clemensh): Shift by immediate directly.
__ LoadConstant(tmp_const,
- WasmValue(int32_t{WhichPowerOf2(wasm::kWasmPageSize)}));
+ WasmValue(int32_t{WhichPowerOf2(kWasmPageSize)}));
__ emit_i32_shr(mem_size.gp(), mem_size.gp(), tmp_const.gp(), pinned);
__ PushRegister(kWasmI32, mem_size);
}
- void GrowMemory(Decoder* decoder, const Value& value, Value* result_val) {
+ void GrowMemory(FullDecoder* decoder, const Value& value, Value* result_val) {
// Pop the input, then spill all cache registers to make the runtime call.
LiftoffRegList pinned;
LiftoffRegister input = pinned.set(__ PopToRegister());
@@ -1559,7 +1568,7 @@ class LiftoffCompiler {
Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
- __ CallRuntimeStub(wasm::WasmCode::kWasmGrowMemory);
+ __ CallRuntimeStub(WasmCode::kWasmGrowMemory);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
@@ -1570,7 +1579,8 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, result);
}
- void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm,
+ void CallDirect(FullDecoder* decoder,
+ const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
@@ -1634,7 +1644,7 @@ class LiftoffCompiler {
}
}
- void CallIndirect(Decoder* decoder, const Value& index_val,
+ void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
@@ -1758,36 +1768,36 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor);
}
- void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
}
- void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
+ void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
unsupported(decoder, "simd");
}
- void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
+ void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
unsupported(decoder, "simd");
}
- void Simd8x16ShuffleOp(Decoder* decoder,
+ void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
unsupported(decoder, "simd");
}
- void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>&,
+ void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
Control* block, const Vector<Value>& args) {
unsupported(decoder, "throw");
}
- void CatchException(Decoder* decoder,
+ void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> caught_values) {
unsupported(decoder, "catch");
}
- void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
unsupported(decoder, "atomicop");
}
@@ -1796,9 +1806,6 @@ class LiftoffCompiler {
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
ModuleEnv* const env_;
- // {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
- const uint64_t min_size_;
- const uint64_t max_size_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
@@ -1812,7 +1819,7 @@ class LiftoffCompiler {
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
- void TraceCacheState(Decoder* decoder) const {
+ void TraceCacheState(FullDecoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
StdoutStream os;
@@ -1832,7 +1839,7 @@ class LiftoffCompiler {
} // namespace
-bool LiftoffCompilationUnit::ExecuteCompilation() {
+bool LiftoffCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
@@ -1841,18 +1848,18 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
}
Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
- const wasm::WasmModule* module =
+ const WasmModule* module =
wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr;
auto call_descriptor =
compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, wasm_unit_->counters_->liftoff_compile_time());
- wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, wasm_unit_->func_body_, call_descriptor,
- wasm_unit_->env_, &zone);
+ WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ &zone, module, wasm_unit_->native_module_->enabled_features(), detected,
+ wasm_unit_->func_body_, call_descriptor, wasm_unit_->env_, &zone);
decoder.Decode();
liftoff_compile_time_scope.reset();
- wasm::LiftoffCompiler* compiler = &decoder.interface();
+ LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) return false; // validation error
if (!compiler->ok()) {
// Liftoff compilation failed.
@@ -1883,13 +1890,13 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
code_ = wasm_unit_->native_module_->AddCode(
wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
0, std::move(protected_instructions), std::move(source_positions),
- wasm::WasmCode::kLiftoff);
+ WasmCode::kLiftoff);
wasm_unit_->native_module_->PublishCode(code_);
return true;
}
-wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(wasm::ErrorThrower*) {
+WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) {
return code_;
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index ce828c459b..c7696cbb56 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -11,6 +11,7 @@ namespace v8 {
namespace internal {
namespace wasm {
+struct WasmFeatures;
class ErrorThrower;
class WasmCode;
class WasmCompilationUnit;
@@ -20,8 +21,8 @@ class LiftoffCompilationUnit final {
explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
: wasm_unit_(wasm_unit) {}
- bool ExecuteCompilation();
- wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
+ bool ExecuteCompilation(WasmFeatures* detected);
+ WasmCode* FinishCompilation(ErrorThrower*);
private:
WasmCompilationUnit* const wasm_unit_;
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
index cf2df277c9..c653ce404d 100644
--- a/deps/v8/src/wasm/baseline/mips/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index d2ea65211b..bb18994618 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -98,6 +98,135 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
+#if defined(V8_TARGET_BIG_ENDIAN)
+inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
+ LoadType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = dst;
+ switch (type.value()) {
+ case LoadType::kI64Load8U:
+ case LoadType::kI64Load8S:
+ // Swap low and high registers.
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
+ assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
+ V8_FALLTHROUGH;
+ case LoadType::kI32Load8U:
+ case LoadType::kI32Load8S:
+ // No need to change endianness for byte size.
+ return;
+ case LoadType::kF32Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI32Load:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case LoadType::kI32Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kI32Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kF64Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load:
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
+ break;
+ case LoadType::kI64Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
+ assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
+ break;
+ case LoadType::kI64Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2);
+ assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
+ break;
+ case LoadType::kI64Load32U:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
+ break;
+ case LoadType::kI64Load32S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case LoadType::kF32Load:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
+ break;
+ case LoadType::kF64Load:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = src;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ // Swap low and high registers.
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
+ assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ // No need to change endianness for byte size.
+ return;
+ case StoreType::kF32Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kF64Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI64Store:
+ case StoreType::kI64Store32:
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case StoreType::kF32Store:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
+ break;
+ case StoreType::kF64Store:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_TARGET_BIG_ENDIAN
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -248,7 +377,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) {
- ChangeEndiannessLoad(dst, type, pinned);
+ liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
}
#endif
}
@@ -273,7 +402,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
src = tmp;
pinned.set(tmp);
- ChangeEndiannessStore(src, type, pinned);
+ liftoff::ChangeEndiannessStore(this, src, type, pinned);
}
#endif
@@ -316,134 +445,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = dst;
- switch (type.value()) {
- case LoadType::kI64Load8U:
- case LoadType::kI64Load8S:
- // Swap low and high registers.
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
- TurboAssembler::Move(tmp.high_gp(), kScratchReg);
- V8_FALLTHROUGH;
- case LoadType::kI32Load8U:
- case LoadType::kI32Load8S:
- // No need to change endianness for byte size.
- return;
- case LoadType::kF32Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI32Load:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- break;
- case LoadType::kI32Load16S:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
- break;
- case LoadType::kI32Load16U:
- TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
- break;
- case LoadType::kF64Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpRegPair, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI64Load:
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
- break;
- case LoadType::kI64Load16U:
- TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
- TurboAssembler::Move(tmp.high_gp(), zero_reg);
- break;
- case LoadType::kI64Load16S:
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2);
- sra(tmp.high_gp(), tmp.high_gp(), 31);
- break;
- case LoadType::kI64Load32U:
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- TurboAssembler::Move(tmp.high_gp(), zero_reg);
- break;
- case LoadType::kI64Load32S:
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- sra(tmp.high_gp(), tmp.high_gp(), 31);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case LoadType::kF32Load:
- emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
- break;
- case LoadType::kF64Load:
- emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = src;
- switch (type.value()) {
- case StoreType::kI64Store8:
- // Swap low and high registers.
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
- TurboAssembler::Move(tmp.high_gp(), kScratchReg);
- V8_FALLTHROUGH;
- case StoreType::kI32Store8:
- // No need to change endianness for byte size.
- return;
- case StoreType::kF32Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI32Store:
- case StoreType::kI32Store16:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- break;
- case StoreType::kF64Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpRegPair, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI64Store:
- case StoreType::kI64Store32:
- case StoreType::kI64Store16:
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case StoreType::kF32Store:
- emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
- break;
- case StoreType::kF64Store:
- emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -745,7 +746,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+ // This is a nop on mips32.
}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
index cf2df277c9..c653ce404d 100644
--- a/deps/v8/src/wasm/baseline/mips64/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index fdbbe0f7d4..4bbfc18251 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -88,6 +88,115 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
+#if defined(V8_TARGET_BIG_ENDIAN)
+inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
+ LoadType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = dst;
+ switch (type.value()) {
+ case LoadType::kI64Load8U:
+ case LoadType::kI64Load8S:
+ case LoadType::kI32Load8U:
+ case LoadType::kI32Load8S:
+ // No need to change endianness for byte size.
+ return;
+ case LoadType::kF32Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load32U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
+ assm->dsrl32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ assm->dsra32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ assm->dsra32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ assm->dsrl32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kF64Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case LoadType::kF32Load:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
+ break;
+ case LoadType::kF64Load:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = src;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ // No need to change endianness for byte size.
+ return;
+ case StoreType::kF32Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kF64Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI64Store:
+ case StoreType::kI64Store32:
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case StoreType::kF32Store:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
+ break;
+ case StoreType::kF64Store:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_TARGET_BIG_ENDIAN
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -212,7 +321,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) {
- ChangeEndiannessLoad(dst, type, pinned);
+ liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
}
#endif
}
@@ -237,7 +346,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
src = tmp;
pinned.set(tmp);
- ChangeEndiannessStore(src, type, pinned);
+ liftoff::ChangeEndiannessStore(this, src, type, pinned);
}
#endif
@@ -269,114 +378,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = dst;
- switch (type.value()) {
- case LoadType::kI64Load8U:
- case LoadType::kI64Load8S:
- case LoadType::kI32Load8U:
- case LoadType::kI32Load8S:
- // No need to change endianness for byte size.
- return;
- case LoadType::kF32Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI64Load32U:
- TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
- dsrl32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kI32Load:
- case LoadType::kI64Load32S:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- dsra32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kI32Load16S:
- case LoadType::kI64Load16S:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
- dsra32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kI32Load16U:
- case LoadType::kI64Load16U:
- TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
- dsrl32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kF64Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI64Load:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case LoadType::kF32Load:
- emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
- break;
- case LoadType::kF64Load:
- emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = src;
- switch (type.value()) {
- case StoreType::kI64Store8:
- case StoreType::kI32Store8:
- // No need to change endianness for byte size.
- return;
- case StoreType::kF32Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI32Store:
- case StoreType::kI32Store16:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- break;
- case StoreType::kF64Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI64Store:
- case StoreType::kI64Store32:
- case StoreType::kI64Store16:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case StoreType::kF32Store:
- emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
- break;
- case StoreType::kF64Store:
- emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index a4bd20622e..9164db2188 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
BAILOUT("Store");
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessLoad");
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessStore");
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -258,7 +247,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+#ifdef V8_TARGET_ARCH_PPC64
+ BAILOUT("emit_i32_to_intptr");
+#else
+// This is a nop on ppc32.
+#endif
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index ee142c7be4..e39dd90166 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
BAILOUT("Store");
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessLoad");
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessStore");
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -258,7 +247,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+#ifdef V8_TARGET_ARCH_S390X
+ BAILOUT("emit_i32_to_intptr");
+#else
+// This is a nop on s390.
+#endif
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index b8d08c56aa..f6a8e09b4e 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -23,6 +23,17 @@ namespace wasm {
namespace liftoff {
+static_assert((kLiftoffAssemblerGpCacheRegs &
+ Register::ListOf<kScratchRegister>()) == 0,
+ "scratch register must not be used as cache registers");
+
+constexpr DoubleRegister kScratchDoubleReg2 = xmm14;
+static_assert(kScratchDoubleReg != kScratchDoubleReg2, "collision");
+static_assert(
+ (kLiftoffAssemblerFpCacheRegs &
+ DoubleRegister::ListOf<kScratchDoubleReg, kScratchDoubleReg2>()) == 0,
+ "scratch registers must not be used as cache registers");
+
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-24.
constexpr int32_t kConstantStackSpace = 16;
@@ -38,13 +49,18 @@ inline Operand GetStackSlot(uint32_t index) {
inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
- uint32_t offset_imm, LiftoffRegList pinned) {
- // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
- // immediate value (in 31 bits, interpreted as signed value).
- // If the offset is bigger, we always trap and this code is not reached.
- DCHECK(is_uint31(offset_imm));
- if (offset == no_reg) return Operand(addr, offset_imm);
- return Operand(addr, offset, times_1, offset_imm);
+ uint32_t offset_imm) {
+ if (is_uint31(offset_imm)) {
+ if (offset == no_reg) return Operand(addr, offset_imm);
+ return Operand(addr, offset, times_1, offset_imm);
+ }
+ // Offset immediate does not fit in 31 bits.
+ Register scratch = kScratchRegister;
+ assm->movl(scratch, Immediate(offset_imm));
+ if (offset != no_reg) {
+ assm->addq(scratch, offset);
+ }
+ return Operand(addr, scratch, times_1, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
@@ -192,8 +208,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
- Operand src_op =
- liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned);
+ Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
@@ -244,8 +259,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
- Operand dst_op =
- liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned);
+ Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
case StoreType::kI32Store8:
@@ -274,17 +288,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -296,9 +299,8 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
- LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index, type);
- Spill(dst_index, reg, type);
+ Fill(LiftoffRegister{kScratchRegister}, src_index, type);
+ Spill(dst_index, LiftoffRegister{kScratchRegister}, type);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
@@ -465,10 +467,8 @@ void EmitIntDivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// unconditionally, as the cache state will also be modified unconditionally.
liftoff::SpillRegisters(assm, rdx, rax);
if (rhs == rax || rhs == rdx) {
- LiftoffRegList unavailable = LiftoffRegList::ForRegs(rax, rdx, lhs);
- Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
- iop(mov, tmp, rhs);
- rhs = tmp;
+ iop(mov, kScratchRegister, rhs);
+ rhs = kScratchRegister;
}
// Check for division by zero.
@@ -1098,10 +1098,8 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
}
CpuFeatureScope feature(assm, SSE4_1);
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
- DoubleRegister rounded =
- pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
- DoubleRegister converted_back = assm->GetUnusedRegister(kFpReg, pinned).fp();
+ DoubleRegister rounded = kScratchDoubleReg;
+ DoubleRegister converted_back = kScratchDoubleReg2;
if (std::is_same<double, src_type>::value) { // f64
assm->Roundsd(rounded, src, kRoundToZero);
@@ -1380,14 +1378,8 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
}
DCHECK_LE(arg_bytes, stack_bytes);
-// Pass a pointer to the buffer with the arguments to the C function.
-// On win64, the first argument is in {rcx}, otherwise it is {rdi}.
-#ifdef _WIN64
- constexpr Register kFirstArgReg = rcx;
-#else
- constexpr Register kFirstArgReg = rdi;
-#endif
- movp(kFirstArgReg, rsp);
+ // Pass a pointer to the buffer with the arguments to the C function.
+ movp(arg_reg_1, rsp);
constexpr int kNumCCallArgs = 1;