summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-interpreter.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/wasm-interpreter.cc')
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc181
1 files changed, 131 insertions, 50 deletions
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 2f8fb0bf4a..3bcb1b5ef6 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -477,6 +477,17 @@ int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
int64_t output;
if (!float64_to_int64_wrapper(&a, &output)) {
@@ -485,6 +496,17 @@ int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
uint64_t output;
if (!float32_to_uint64_wrapper(&a, &output)) {
@@ -493,6 +515,17 @@ uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
uint64_t output;
if (!float64_to_uint64_wrapper(&a, &output)) {
@@ -501,6 +534,17 @@ uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64UConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<int64_t>(a);
}
@@ -924,6 +968,9 @@ class CodeMap {
// This handle is set and reset by the SetInstanceObject() /
// ClearInstanceObject() method, which is used by the HeapObjectsScope.
Handle<WasmInstanceObject> instance_;
+ // TODO(wasm): Remove this testing wart. It is needed because interpreter
+ // entry stubs are not generated in testing the interpreter in cctests.
+ bool call_indirect_through_module_ = false;
public:
CodeMap(Isolate* isolate, const WasmModule* module,
@@ -942,6 +989,12 @@ class CodeMap {
}
}
+ bool call_indirect_through_module() { return call_indirect_through_module_; }
+
+ void set_call_indirect_through_module(bool val) {
+ call_indirect_through_module_ = val;
+ }
+
void SetInstanceObject(Handle<WasmInstanceObject> instance) {
DCHECK(instance_.is_null());
instance_ = instance;
@@ -987,12 +1040,34 @@ class CodeMap {
}
InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
+ uint32_t saved_index;
+ USE(saved_index);
if (table_index >= module_->function_tables.size()) return nullptr;
+ // Mask table index for SSCA mitigation.
+ saved_index = table_index;
+ table_index &=
+ static_cast<int32_t>((table_index - module_->function_tables.size()) &
+ ~static_cast<int32_t>(table_index)) >>
+ 31;
+ DCHECK_EQ(table_index, saved_index);
const WasmIndirectFunctionTable* table =
&module_->function_tables[table_index];
if (entry_index >= table->values.size()) return nullptr;
+ // Mask entry_index for SSCA mitigation.
+ saved_index = entry_index;
+ entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
+ ~static_cast<int32_t>(entry_index)) >>
+ 31;
+ DCHECK_EQ(entry_index, saved_index);
uint32_t index = table->values[entry_index];
if (index >= interpreter_code_.size()) return nullptr;
+ // Mask index for SSCA mitigation.
+ saved_index = index;
+ index &= static_cast<int32_t>((index - interpreter_code_.size()) &
+ ~static_cast<int32_t>(index)) >>
+ 31;
+ DCHECK_EQ(index, saved_index);
+
return GetCode(index);
}
@@ -1543,9 +1618,21 @@ class ThreadImpl {
case kExprI32UConvertSatF64:
Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
return true;
+ case kExprI64SConvertSatF32:
+ Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64UConvertSatF32:
+ Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64SConvertSatF64:
+ Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
+ return true;
+ case kExprI64UConvertSatF64:
+ Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
+ return true;
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
return false;
@@ -1912,7 +1999,7 @@ class ThreadImpl {
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, operand.index);
+ CallIndirectFunction(0, entry_index, operand.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -2071,6 +2158,9 @@ class ThreadImpl {
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
Push(WasmValue(result));
len = 1 + operand.length;
+ // Treat one grow_memory instruction like 1000 other instructions,
+ // because it is a really expensive operation.
+ if (max > 0) max = std::max(0, max - 1000);
break;
}
case kExprMemorySize: {
@@ -2152,8 +2242,8 @@ class ThreadImpl {
#undef EXECUTE_UNOP
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
@@ -2386,18 +2476,24 @@ class ThreadImpl {
arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer data pointer in a handle. As this is an aligned
- // pointer, to the GC it will look like a Smi.
+ // Wrap the arg_buffer data pointer and the WasmContext* in a handle. As
+ // this is an aligned pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
+ static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
+ "code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
+ WasmContext* context = code.wasm_context();
+ Handle<Object> context_obj(reinterpret_cast<Object*>(context), isolate);
+ DCHECK(!context_obj->IsHeapObject());
args[compiler::CWasmEntryParameters::kCodeObject] =
code.IsCodeObject()
? Handle<Object>::cast(code.GetCode())
: Handle<Object>::cast(isolate->factory()->NewForeign(
code.GetWasmCode()->instructions().start(), TENURED));
+ args[compiler::CWasmEntryParameters::kWasmContext] = context_obj;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -2466,13 +2562,19 @@ class ThreadImpl {
DCHECK(AllowHeapAllocation::IsAllowed());
if (code->kind() == wasm::WasmCode::kFunction) {
- DCHECK_EQ(code->owner()->compiled_module()->owning_instance(),
- codemap()->instance());
+ if (code->owner()->compiled_module()->owning_instance() !=
+ codemap()->instance()) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
+ }
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
+
if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- } else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
+ }
+ if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper ||
+ code->kind() == wasm::WasmCode::kInterpreterStub) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
@@ -2501,23 +2603,8 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- bool no_func_tables = !codemap()->has_instance();
- if (FLAG_wasm_jit_to_native) {
- no_func_tables = no_func_tables || codemap()
- ->instance()
- ->compiled_module()
- ->GetNativeModule()
- ->function_tables()
- .empty();
- } else {
- no_func_tables =
- no_func_tables ||
- !codemap()->instance()->compiled_module()->has_function_tables();
- }
- if (no_func_tables) {
- // No instance. Rely on the information stored in the WasmModule.
- // TODO(wasm): This is only needed for testing. Refactor testing to use
- // the same paths as production.
+ if (codemap()->call_indirect_through_module()) {
+ // Rely on the information stored in the WasmModule.
InterpreterCode* code =
codemap()->GetIndirectCode(table_index, entry_index);
if (!code) return {ExternalCallResult::INVALID_FUNC};
@@ -2551,7 +2638,7 @@ class ThreadImpl {
DCHECK_EQ(canonical_sig_index,
module()->signature_map.Find(module()->signatures[sig_index]));
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
// Check signature.
FixedArray* fun_tables = compiled_module->function_tables();
if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
@@ -2578,33 +2665,23 @@ class ThreadImpl {
target_gc = Code::cast(fun_table->get(
compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
- // Check signature.
- std::vector<GlobalHandleAddress>& fun_tables =
- compiled_module->GetNativeModule()->function_tables();
- if (table_index >= fun_tables.size()) {
+ // The function table is stored in the wasm context.
+ // TODO(wasm): the wasm interpreter currently supports only one table.
+ CHECK_EQ(0, table_index);
+ // Bounds check against table size.
+ if (entry_index >= wasm_context_->table_size) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to the function table, from the
- // address stored in the respective table of tables.
- FixedArray* fun_table =
- *reinterpret_cast<FixedArray**>(fun_tables[table_index]);
- // Function tables store <smi, code> pairs.
- int num_funcs_in_table =
- fun_table->length() / compiler::kFunctionTableEntrySize;
- if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(fun_table->get(
- compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ // Signature check.
+ int32_t entry_sig = wasm_context_->table[entry_index].sig_id;
+ if (entry_sig != static_cast<int32_t>(canonical_sig_index)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
-
+ // Load the target address (first instruction of code).
+ Address first_instr = wasm_context_->table[entry_index].target;
+ // TODO(titzer): load the wasm context instead of relying on the
+ // target code being specialized to the target instance.
// Get code object.
- Address first_instr =
- Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
- static_cast<int>(entry_index))))
- ->foreign_address();
target =
isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
first_instr);
@@ -2897,6 +2974,10 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
internals_->codemap_.SetFunctionCode(function, start, end);
}
+void WasmInterpreter::SetCallIndirectTestMode() {
+ internals_->codemap_.set_call_indirect_through_module(true);
+}
+
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
// Create some dummy structures, to avoid special-casing the implementation