summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-interpreter.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/wasm-interpreter.cc')
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc606
1 files changed, 500 insertions, 106 deletions
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index a692833e5e..581277cbab 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -969,16 +969,14 @@ class CodeMap {
InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
uint32_t saved_index;
USE(saved_index);
- if (table_index >= module_->function_tables.size()) return nullptr;
+ if (table_index >= module_->tables.size()) return nullptr;
// Mask table index for SSCA mitigation.
saved_index = table_index;
- table_index &=
- static_cast<int32_t>((table_index - module_->function_tables.size()) &
- ~static_cast<int32_t>(table_index)) >>
- 31;
+ table_index &= static_cast<int32_t>((table_index - module_->tables.size()) &
+ ~static_cast<int32_t>(table_index)) >>
+ 31;
DCHECK_EQ(table_index, saved_index);
- const WasmIndirectFunctionTable* table =
- &module_->function_tables[table_index];
+ const WasmTable* table = &module_->tables[table_index];
if (entry_index >= table->values.size()) return nullptr;
// Mask entry_index for SSCA mitigation.
saved_index = entry_index;
@@ -1081,7 +1079,6 @@ class ThreadImpl {
Handle<WasmInstanceObject> instance_object)
: codemap_(codemap),
instance_object_(instance_object),
- zone_(zone),
frames_(zone),
activations_(zone) {}
@@ -1123,7 +1120,7 @@ class ThreadImpl {
void Reset() {
TRACE("----- RESET -----\n");
- sp_ = stack_start_;
+ sp_ = stack_.get();
frames_.clear();
state_ = WasmInterpreter::STOPPED;
trap_reason_ = kTrapCount;
@@ -1146,12 +1143,12 @@ class ThreadImpl {
WasmValue GetStackValue(sp_t index) {
DCHECK_GT(StackHeight(), index);
- return stack_start_[index];
+ return stack_[index];
}
void SetStackValue(sp_t index, WasmValue value) {
DCHECK_GT(StackHeight(), index);
- stack_start_[index] = value;
+ stack_[index] = value;
}
TrapReason GetTrapReason() { return trap_reason_; }
@@ -1190,7 +1187,7 @@ class ThreadImpl {
// first).
DCHECK_EQ(activations_.back().fp, frames_.size());
DCHECK_LE(activations_.back().sp, StackHeight());
- sp_ = stack_start_ + activations_.back().sp;
+ sp_ = stack_.get() + activations_.back().sp;
activations_.pop_back();
}
@@ -1212,7 +1209,7 @@ class ThreadImpl {
DCHECK_LE(act.fp, frames_.size());
frames_.resize(act.fp);
DCHECK_LE(act.sp, StackHeight());
- sp_ = stack_start_ + act.sp;
+ sp_ = stack_.get() + act.sp;
state_ = WasmInterpreter::STOPPED;
return WasmInterpreter::Thread::UNWOUND;
}
@@ -1241,8 +1238,7 @@ class ThreadImpl {
CodeMap* codemap_;
Handle<WasmInstanceObject> instance_object_;
- Zone* zone_;
- WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
+ std::unique_ptr<WasmValue[]> stack_;
WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
WasmValue* sp_ = nullptr; // Current stack pointer.
ZoneVector<Frame> frames_;
@@ -1344,7 +1340,7 @@ class ThreadImpl {
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
size_t arity) {
DCHECK_GT(frames_.size(), 0);
- WasmValue* sp_dest = stack_start_ + frames_.back().sp;
+ WasmValue* sp_dest = stack_.get() + frames_.back().sp;
frames_.pop_back();
if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
@@ -1458,14 +1454,14 @@ class ThreadImpl {
return true;
}
- template <typename type>
+ template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address& address, pc_t pc, int& len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
- if (val2) *val2 = Pop().to<uint32_t>();
- if (val) *val = Pop().to<uint32_t>();
+ if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
+ if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
address = BoundsCheckMem<type>(imm.offset, index);
if (!address) {
@@ -1515,84 +1511,133 @@ class ThreadImpl {
InterpreterCode* code, pc_t pc, int& len) {
WasmValue result;
switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, operation) \
+// Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
+// on big endian and 64 bit atomics fail to compile.
+#if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
+#define ATOMIC_BINOP_CASE(name, type, op_type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
+ &val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- result = WasmValue( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ result = WasmValue(static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \
Push(result); \
break; \
}
- ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
- ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
+ atomic_exchange);
+ ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
+ atomic_exchange);
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type) \
- case kExpr##name: { \
- type val; \
- type val2; \
- Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val, \
- &val2)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
- Push(WasmValue(val)); \
- break; \
- }
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t);
+#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
+ case kExpr##name: { \
+ type val; \
+ type val2; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
+ &val, &val2)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::atomic_compare_exchange_strong( \
+ reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
+ Push(WasmValue(static_cast<op_type>(val))); \
+ break; \
+ }
+ ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
+ uint32_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
+ uint32_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
+ uint32_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
+ uint64_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
+ uint64_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
+ uint64_t);
+ ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
+ uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, operation) \
+#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
case kExpr##name: { \
Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len)) { \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- result = \
- WasmValue(std::operation(reinterpret_cast<std::atomic<type>*>(addr))); \
+ result = WasmValue(static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \
Push(result); \
break; \
}
- ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
+ ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, operation) \
+#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
- if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
+ &val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
@@ -1601,10 +1646,15 @@ class ThreadImpl {
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
break; \
}
- ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
+ ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
+#endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
default:
UNREACHABLE();
return false;
@@ -1653,6 +1703,331 @@ class ThreadImpl {
EXTRACT_LANE_CASE(I16x8, i16x8)
EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
+#define BINOP_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s1.val[i]; \
+ auto b = s2.val[i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
+ BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
+ BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
+ BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
+ BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
+ BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
+ BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
+ BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
+ BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
+ BINOP_CASE(I32x4MinU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
+ BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
+ BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
+ BINOP_CASE(S128And, i32x4, int4, 4, a & b)
+ BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
+ BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
+ BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
+ BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
+ BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
+ BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
+ BINOP_CASE(I16x8MinU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
+ BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
+ BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
+ BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
+ BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
+ BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
+ BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
+ BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
+ BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
+ BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
+ BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
+ BINOP_CASE(I8x16MinU, i8x16, int16, 16,
+ static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
+ BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
+ BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
+ static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
+ BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
+ BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
+ SaturateAdd<uint8_t>(a, b))
+ BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
+ BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
+ SaturateSub<uint8_t>(a, b))
+#undef BINOP_CASE
+#define UNOP_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v = Pop(); \
+ stype s = v.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s.val[i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
+ UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
+ UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
+ UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
+ UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
+ UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
+ UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
+ UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
+#undef UNOP_CASE
+#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ out_stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s1.val[i]; \
+ auto b = s2.val[i]; \
+ res.val[i] = expr ? -1 : 0; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
+ CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
+ CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
+ CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
+ CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
+ CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
+ CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
+ CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
+ CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
+ CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
+ CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
+ CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
+ CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
+ CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
+ CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
+ CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
+ static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
+ CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
+ CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
+ CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
+ CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
+ CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
+ CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
+ CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
+ CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
+ CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
+ CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
+ static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
+ CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
+ CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
+ CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
+ CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
+ CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
+ CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
+ CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
+ CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
+ CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
+ CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
+ static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
+#undef CMPOP_CASE
+#define REPLACE_LANE_CASE(format, name, stype, ctype) \
+ case kExpr##format##ReplaceLane: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
+ ++len; \
+ WasmValue new_val = Pop(); \
+ WasmValue simd_val = Pop(); \
+ stype s = simd_val.to_s128().to_##name(); \
+ s.val[imm.lane] = new_val.to<ctype>(); \
+ Push(WasmValue(Simd128(s))); \
+ return true; \
+ }
+ REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
+ REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
+ REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
+ REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
+#undef REPLACE_LANE_CASE
+ case kExprS128LoadMem:
+ return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
+ MachineRepresentation::kSimd128);
+ case kExprS128StoreMem:
+ return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
+ MachineRepresentation::kSimd128);
+#define SHIFT_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
+ ++len; \
+ WasmValue v = Pop(); \
+ stype s = v.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s.val[i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
+ SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
+ SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) >> imm.shift)
+ SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
+ SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
+ SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) >> imm.shift)
+ SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
+ SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
+ SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
+ static_cast<uint8_t>(a) >> imm.shift)
+#undef SHIFT_CASE
+#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
+ expr) \
+ case kExpr##op: { \
+ WasmValue v = Pop(); \
+ src_type s = v.to_s128().to_##name(); \
+ dst_type res; \
+ for (size_t i = 0; i < count; ++i) { \
+ ctype a = s.val[start_index + i]; \
+ res.val[i] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
+ static_cast<float>(a))
+ CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
+ static_cast<float>(a))
+ CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
+ std::isnan(a) ? 0
+ : a<kMinInt ? kMinInt : a> kMaxInt
+ ? kMaxInt
+ : static_cast<int32_t>(a))
+ CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
+ std::isnan(a)
+ ? 0
+ : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
+ : static_cast<uint32_t>(a))
+ CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
+ a)
+ CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
+ a)
+ CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
+ CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
+ a)
+ CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
+ a)
+ CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
+ a)
+ CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
+ CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
+ a)
+#undef CONVERT_CASE
+#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \
+ is_unsigned) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ src_type s1 = v1.to_s128().to_##name(); \
+ src_type s2 = v2.to_s128().to_##name(); \
+ dst_type res; \
+ int64_t min = std::numeric_limits<ctype>::min(); \
+ int64_t max = std::numeric_limits<ctype>::max(); \
+ for (size_t i = 0; i < count; ++i) { \
+ int32_t v = i < count / 2 ? s1.val[i] : s2.val[i - count / 2]; \
+ int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \
+ res.val[i] = static_cast<dst_ctype>(std::max(min, std::min(max, a))); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t,
+ false)
+ PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t,
+ true)
+ PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t,
+ false)
+ PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t,
+ true)
+#undef PACK_CASE
+ case kExprS128Select: {
+ int4 v2 = Pop().to_s128().to_i32x4();
+ int4 v1 = Pop().to_s128().to_i32x4();
+ int4 bool_val = Pop().to_s128().to_i32x4();
+ int4 res;
+ for (size_t i = 0; i < 4; ++i) {
+ res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+#define ADD_HORIZ_CASE(op, name, stype, count) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count / 2; ++i) { \
+ res.val[i] = s1.val[i * 2] + s1.val[i * 2 + 1]; \
+ res.val[i + count / 2] = s2.val[i * 2] + s2.val[i * 2 + 1]; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
+ ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
+ ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
+#undef ADD_HORIZ_CASE
+ case kExprS8x16Shuffle: {
+ Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc));
+ len += 16;
+ int16 v2 = Pop().to_s128().to_i8x16();
+ int16 v1 = Pop().to_s128().to_i8x16();
+ int16 res;
+ for (size_t i = 0; i < kSimd128Size; ++i) {
+ int lane = imm.shuffle[i];
+ res.val[i] =
+ lane < kSimd128Size ? v1.val[lane] : v2.val[lane - kSimd128Size];
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+#define REDUCTION_CASE(op, name, stype, count, operation) \
+ case kExpr##op: { \
+ stype s = Pop().to_s128().to_##name(); \
+ int32_t res = s.val[0]; \
+ for (size_t i = 1; i < count; ++i) { \
+ res = res operation static_cast<int32_t>(s.val[i]); \
+ } \
+ Push(WasmValue(res)); \
+ return true; \
+ }
+ REDUCTION_CASE(S1x4AnyTrue, i32x4, int4, 4, |)
+ REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
+ REDUCTION_CASE(S1x8AnyTrue, i16x8, int8, 8, |)
+ REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
+ REDUCTION_CASE(S1x16AnyTrue, i8x16, int16, 16, |)
+ REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
+#undef REDUCTION_CASE
default:
return false;
}
@@ -1672,7 +2047,7 @@ class ThreadImpl {
const size_t stack_size_limit = FLAG_stack_size * KB;
// Sum up the value stack size and the control stack size.
const size_t current_stack_size =
- (sp_ - stack_start_) + frames_.size() * sizeof(Frame);
+ (sp_ - stack_.get()) + frames_.size() * sizeof(Frame);
if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
return true;
}
@@ -1693,7 +2068,7 @@ class ThreadImpl {
DCHECK_LE(code->function->sig->parameter_count() +
code->locals.type_list.size() +
code->side_table->max_stack_height_,
- stack_limit_ - stack_start_ - frames_.back().sp);
+ stack_limit_ - stack_.get() - frames_.back().sp);
Decoder decoder(code->start, code->end);
pc_t limit = code->end - code->start;
@@ -1926,7 +2301,7 @@ class ThreadImpl {
code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
- DCHECK_LE(module()->function_tables.size(), 1u);
+ DCHECK_LE(module()->tables.size(), 1u);
ExternalCallResult result =
CallIndirectFunction(0, entry_index, imm.sig_index);
switch (result.type) {
@@ -2079,7 +2454,8 @@ class ThreadImpl {
MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Handle<WasmMemoryObject> memory(instance_object_->memory_object());
+ Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
+ instance_object_->GetIsolate());
Isolate* isolate = memory->GetIsolate();
int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
Push(WasmValue(result));
@@ -2110,6 +2486,18 @@ class ThreadImpl {
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
break;
}
+#define SIGN_EXTENSION_CASE(name, wtype, ntype) \
+ case kExpr##name: { \
+ ntype val = static_cast<ntype>(Pop().to<wtype>()); \
+ Push(WasmValue(static_cast<wtype>(val))); \
+ break; \
+ }
+ SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
+ SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
+ SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
+ SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
+ SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
+#undef SIGN_EXTENSION_CASE
case kNumericPrefix: {
++len;
if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
@@ -2238,18 +2626,18 @@ class ThreadImpl {
void EnsureStackSpace(size_t size) {
if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
- size_t old_size = stack_limit_ - stack_start_;
+ size_t old_size = stack_limit_ - stack_.get();
size_t requested_size =
- base::bits::RoundUpToPowerOfTwo64((sp_ - stack_start_) + size);
+ base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
- WasmValue* new_stack = zone_->NewArray<WasmValue>(new_size);
- memcpy(new_stack, stack_start_, old_size * sizeof(*sp_));
- sp_ = new_stack + (sp_ - stack_start_);
- stack_start_ = new_stack;
- stack_limit_ = new_stack + new_size;
+ std::unique_ptr<WasmValue[]> new_stack(new WasmValue[new_size]);
+ memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
+ sp_ = new_stack.get() + (sp_ - stack_.get());
+ stack_ = std::move(new_stack);
+ stack_limit_ = stack_.get() + new_size;
}
- sp_t StackHeight() { return sp_ - stack_start_; }
+ sp_t StackHeight() { return sp_ - stack_.get(); }
void TraceValueStack() {
#ifdef DEBUG
@@ -2415,6 +2803,19 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
+ static WasmCode* GetTargetCode(WasmCodeManager* code_manager,
+ Address target) {
+ NativeModule* native_module = code_manager->LookupNativeModule(target);
+ if (native_module->is_jump_table_slot(target)) {
+ uint32_t func_index =
+ native_module->GetFunctionIndexFromJumpTableSlot(target);
+ return native_module->code(func_index);
+ }
+ WasmCode* code = native_module->Lookup(target);
+ DCHECK_EQ(code->instruction_start(), target);
+ return code;
+ }
+
ExternalCallResult CallImportedFunction(uint32_t function_index) {
// Use a new HandleScope to avoid leaking / accumulating handles in the
// outer scope.
@@ -2423,13 +2824,10 @@ class ThreadImpl {
DCHECK_GT(module()->num_imported_functions, function_index);
Handle<WasmInstanceObject> instance;
- WasmCode* code;
- {
- ImportedFunctionEntry entry(instance_object_, function_index);
- instance = handle(entry.instance(), isolate);
- code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
- entry.target());
- }
+ ImportedFunctionEntry entry(instance_object_, function_index);
+ instance = handle(entry.instance(), isolate);
+ WasmCode* code =
+ GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
FunctionSig* sig = codemap()->module()->functions[function_index].sig;
return CallExternalWasmFunction(isolate, instance, code, sig);
}
@@ -2448,7 +2846,7 @@ class ThreadImpl {
module()->signature_ids[code->function->sig_index];
int expected_canonical_id = module()->signature_ids[sig_index];
DCHECK_EQ(function_canonical_id,
- module()->signature_map.Find(code->function->sig));
+ module()->signature_map.Find(*code->function->sig));
if (function_canonical_id != expected_canonical_id) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
@@ -2459,7 +2857,7 @@ class ThreadImpl {
Isolate* isolate = instance_object_->GetIsolate();
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
- module()->signature_map.Find(module()->signatures[sig_index]));
+ module()->signature_map.Find(*module()->signatures[sig_index]));
// The function table is stored in the instance.
// TODO(wasm): the wasm interpreter currently supports only one table.
@@ -2469,20 +2867,16 @@ class ThreadImpl {
return {ExternalCallResult::INVALID_FUNC};
}
- WasmCode* code;
- Handle<WasmInstanceObject> instance;
- {
- IndirectFunctionTableEntry entry(instance_object_, entry_index);
- // Signature check.
- if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
-
- instance = handle(entry.instance(), isolate);
- code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
- entry.target());
+ IndirectFunctionTableEntry entry(instance_object_, entry_index);
+ // Signature check.
+ if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
}
+ Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
+ WasmCode* code =
+ GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
+
// Call either an internal or external WASM function.
HandleScope scope(isolate);
FunctionSig* signature = module()->signatures[sig_index];