aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-16 11:32:46 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-19 09:25:23 +0200
commite31f0a7d25668d3c1531294d2ef44a9f3bde4ef4 (patch)
tree6c6bed9804be9df6162b2483f0a56f371f66464d /deps/v8/src/wasm
parentec16fdae540adaf710b1a86c620170b2880088f0 (diff)
downloadandroid-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.gz
android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.bz2
android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.zip
deps: update V8 to 7.7.299.4
PR-URL: https://github.com/nodejs/node/pull/28918 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h17
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h11
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h12
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc251
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h32
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h50
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h40
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h139
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h139
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h12
-rw-r--r--deps/v8/src/wasm/c-api.cc1058
-rw-r--r--deps/v8/src/wasm/decoder.h4
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h510
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc9
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h2
-rw-r--r--deps/v8/src/wasm/function-compiler.cc116
-rw-r--r--deps/v8/src/wasm/function-compiler.h40
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc18
-rw-r--r--deps/v8/src/wasm/js-to-wasm-wrapper-cache.h41
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc24
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h100
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc6
-rw-r--r--deps/v8/src/wasm/module-compiler.cc335
-rw-r--r--deps/v8/src/wasm/module-compiler.h30
-rw-r--r--deps/v8/src/wasm/module-decoder.cc223
-rw-r--r--deps/v8/src/wasm/module-decoder.h4
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc212
-rw-r--r--deps/v8/src/wasm/value-type.h86
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h73
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc103
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h19
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc23
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc74
-rw-r--r--deps/v8/src/wasm/wasm-engine.h7
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc488
-rw-r--r--deps/v8/src/wasm/wasm-js.cc35
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc3
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc314
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h54
-rw-r--r--deps/v8/src/wasm/wasm-module.h10
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h104
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc479
-rw-r--r--deps/v8/src/wasm/wasm-objects.h117
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc42
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h54
-rw-r--r--deps/v8/src/wasm/wasm-result.cc18
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc2
-rw-r--r--deps/v8/src/wasm/wasm-text.cc175
-rw-r--r--deps/v8/src/wasm/wasm-text.h11
-rw-r--r--deps/v8/src/wasm/wasm-value.h50
56 files changed, 3418 insertions, 2396 deletions
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index c9b1aa4d78..8aa6e24739 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b2cd566873..834eb181d8 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("arm " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) {
- BAILOUT("Armv6 not supported");
+ bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0;
}
uint32_t offset = static_cast<uint32_t>(pc_offset());
@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
- BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
if (!CpuFeatures::IsSupported(SUDIV)) {
- BAILOUT("i32_divs");
+ bailout(kMissingCPUFeature, "i32_divs");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
if (!CpuFeatures::IsSupported(SUDIV)) {
- BAILOUT("i32_divu");
+ bailout(kMissingCPUFeature, "i32_divu");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
- BAILOUT("i32_rems");
+ bailout(kMissingCPUFeature, "i32_rems");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
- BAILOUT("i32_remu");
+ bailout(kMissingCPUFeature, "i32_remu");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index b1d71dce2f..57a157d3a7 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("arm64 " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
if (!IsImmAddSub(bytes)) {
// Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan.
- BAILOUT("Stack too big");
+ bailout(kOtherReason, "Stack too big");
return;
}
}
@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
- BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
@@ -173,7 +172,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
patching_assembler.PatchSubSp(bytes);
}
-void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
+void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 1b5ca87c3d..7bc3596d2e 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define REQUIRE_CPU_FEATURE(name, ...) \
- if (!CpuFeatures::IsSupported(name)) { \
- bailout("no " #name); \
- return __VA_ARGS__; \
- } \
+#define REQUIRE_CPU_FEATURE(name, ...) \
+ if (!CpuFeatures::IsSupported(name)) { \
+ bailout(kMissingCPUFeature, "no " #name); \
+ return __VA_ARGS__; \
+ } \
CpuFeatureScope feature(this, name);
namespace liftoff {
@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout("no SSE4.1");
+ assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 40e1636b6e..766ce71db1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -11,8 +11,8 @@
#include "src/base/bits.h"
#include "src/base/small-vector.h"
#include "src/codegen/macro-assembler.h"
-#include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -635,13 +635,16 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; }
- bool did_bailout() { return bailout_reason_ != nullptr; }
- const char* bailout_reason() const { return bailout_reason_; }
+ bool did_bailout() { return bailout_reason_ != kSuccess; }
+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
+ const char* bailout_detail() const { return bailout_detail_; }
- void bailout(const char* reason) {
- if (bailout_reason_ != nullptr) return;
+ void bailout(LiftoffBailoutReason reason, const char* detail) {
+ DCHECK_NE(kSuccess, reason);
+ if (bailout_reason_ != kSuccess) return;
AbortCompilation();
bailout_reason_ = reason;
+ bailout_detail_ = detail;
}
private:
@@ -655,7 +658,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0;
- const char* bailout_reason_ = nullptr;
+ LiftoffBailoutReason bailout_reason_ = kSuccess;
+ const char* bailout_detail_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index caf00a24ca..7a87ae1a95 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -174,7 +174,8 @@ class LiftoffCompiler {
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
- bool ok() const { return ok_; }
+ bool did_bailout() const { return bailout_reason_ != kSuccess; }
+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
void GetCode(CodeDesc* desc) {
asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
@@ -195,30 +196,51 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
- void unsupported(FullDecoder* decoder, const char* reason) {
- ok_ = false;
- TRACE("unsupported: %s\n", reason);
+ void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
+ const char* detail) {
+ DCHECK_NE(kSuccess, reason);
+ if (did_bailout()) return;
+ bailout_reason_ = reason;
+ TRACE("unsupported: %s\n", detail);
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
- reason);
+ detail);
UnuseLabels(decoder);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
- unsupported(decoder, __ bailout_reason());
+ unsupported(decoder, __ bailout_reason(), __ bailout_detail());
return true;
}
+ LiftoffBailoutReason BailoutReasonForType(ValueType type) {
+ switch (type) {
+ case kWasmS128:
+ return kSimd;
+ case kWasmAnyRef:
+ case kWasmFuncRef:
+ case kWasmNullRef:
+ return kAnyRef;
+ case kWasmExnRef:
+ return kExceptionHandling;
+ case kWasmBottom:
+ return kMultiValue;
+ default:
+ return kOtherReason;
+ }
+ }
+
bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
- char buffer[128];
// Check supported types.
for (ValueType supported : supported_types) {
if (type == supported) return true;
}
- SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
- unsupported(decoder, buffer);
+ LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
+ EmbeddedVector<char, 128> buffer;
+ SNPrintF(buffer, "%s %s", ValueTypes::TypeName(type), context);
+ unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -394,17 +416,17 @@ class LiftoffCompiler {
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
}
- void GenerateOutOfLineCode(OutOfLineCode& ool) {
- __ bind(ool.label.get());
- const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
+ void GenerateOutOfLineCode(OutOfLineCode* ool) {
+ __ bind(ool->label.get());
+ const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
const bool is_mem_out_of_bounds =
- ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
+ ool->stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
if (is_mem_out_of_bounds && env_->use_trap_handler) {
uint32_t pc = static_cast<uint32_t>(__ pc_offset());
DCHECK_EQ(pc, __ pc_offset());
protected_instructions_.emplace_back(
- trap_handler::ProtectedInstructionData{ool.pc, pc});
+ trap_handler::ProtectedInstructionData{ool->pc, pc});
}
if (!env_->runtime_exception_support) {
@@ -419,16 +441,16 @@ class LiftoffCompiler {
return;
}
- if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
+ if (!ool->regs_to_save.is_empty()) __ PushRegisters(ool->regs_to_save);
source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(ool.position), false);
- __ CallRuntimeStub(ool.stub);
+ __ pc_offset(), SourcePosition(ool->position), false);
+ __ CallRuntimeStub(ool->stub);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
- DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
- if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
+ DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
+ if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
- __ emit_jump(ool.continuation.get());
+ __ emit_jump(ool->continuation.get());
} else {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
@@ -437,7 +459,7 @@ class LiftoffCompiler {
void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
- GenerateOutOfLineCode(ool);
+ GenerateOutOfLineCode(&ool);
}
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
__ GetTotalFrameSlotCount());
@@ -449,7 +471,7 @@ class LiftoffCompiler {
}
void OnFirstError(FullDecoder* decoder) {
- ok_ = false;
+ if (!did_bailout()) bailout_reason_ = kDecodeError;
UnuseLabels(decoder);
asm_.AbortCompilation();
}
@@ -481,19 +503,20 @@ class LiftoffCompiler {
}
void Try(FullDecoder* decoder, Control* block) {
- unsupported(decoder, "try");
+ unsupported(decoder, kExceptionHandling, "try");
}
void Catch(FullDecoder* decoder, Control* block, Value* exception) {
- unsupported(decoder, "catch");
+ unsupported(decoder, kExceptionHandling, "catch");
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
- if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
- return unsupported(decoder, "multi-value if");
+ if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1) {
+ return unsupported(decoder, kMultiValue, "multi-value if");
+ }
// Allocate the else state.
if_block->else_state = base::make_unique<ElseState>();
@@ -773,8 +796,23 @@ class LiftoffCompiler {
__ emit_i64_eqz(dst.gp(), src);
});
break;
+ case WasmOpcode::kExprI64Clz:
+ case WasmOpcode::kExprI64Ctz:
+ case WasmOpcode::kExprI64Popcnt:
+ return unsupported(decoder, kComplexOperation,
+ WasmOpcodes::OpcodeName(opcode));
+ case WasmOpcode::kExprI32SConvertSatF32:
+ case WasmOpcode::kExprI32UConvertSatF32:
+ case WasmOpcode::kExprI32SConvertSatF64:
+ case WasmOpcode::kExprI32UConvertSatF64:
+ case WasmOpcode::kExprI64SConvertSatF32:
+ case WasmOpcode::kExprI64UConvertSatF32:
+ case WasmOpcode::kExprI64SConvertSatF64:
+ case WasmOpcode::kExprI64UConvertSatF64:
+ return unsupported(decoder, kNonTrappingFloatToInt,
+ WasmOpcodes::OpcodeName(opcode));
default:
- return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ UNREACHABLE();
}
#undef CASE_I32_UNOP
#undef CASE_I32_SIGN_EXTENSION
@@ -1104,8 +1142,12 @@ class LiftoffCompiler {
}
});
break;
+ case WasmOpcode::kExprI64Rol:
+ case WasmOpcode::kExprI64Ror:
+ return unsupported(decoder, kComplexOperation,
+ WasmOpcodes::OpcodeName(opcode));
default:
- return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ UNREACHABLE();
}
#undef CASE_I32_BINOP
#undef CASE_I32_BINOPI
@@ -1153,11 +1195,11 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, Value* result) {
- unsupported(decoder, "ref_null");
+ unsupported(decoder, kAnyRef, "ref_null");
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, "func");
+ unsupported(decoder, kAnyRef, "func");
}
void Drop(FullDecoder* decoder, const Value& value) {
@@ -1169,7 +1211,9 @@ class LiftoffCompiler {
void ReturnImpl(FullDecoder* decoder) {
size_t num_returns = decoder->sig_->return_count();
- if (num_returns > 1) return unsupported(decoder, "multi-return");
+ if (num_returns > 1) {
+ return unsupported(decoder, kMultiValue, "multi-return");
+ }
if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
@@ -1201,24 +1245,24 @@ class LiftoffCompiler {
}
}
- void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
+ void SetLocalFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
- ValueType type = dst_slot.type();
- if (dst_slot.is_reg()) {
- LiftoffRegister slot_reg = dst_slot.reg();
+ ValueType type = dst_slot->type();
+ if (dst_slot->is_reg()) {
+ LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
+ __ Fill(dst_slot->reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
- dst_slot.MakeStack();
+ dst_slot->MakeStack();
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
- dst_slot = LiftoffAssembler::VarState(type, dst_reg);
+ *dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
@@ -1237,7 +1281,7 @@ class LiftoffCompiler {
target_slot = source_slot;
break;
case kStack:
- SetLocalFromStackSlot(target_slot, local_index);
+ SetLocalFromStackSlot(&target_slot, local_index);
break;
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
@@ -1254,12 +1298,12 @@ class LiftoffCompiler {
}
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
- LiftoffRegList& pinned, uint32_t* offset) {
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
+ LiftoffRegList* pinned, uint32_t* offset) {
+ Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
- global->index * sizeof(Address), kPointerLoadType, pinned);
+ global->index * sizeof(Address), kPointerLoadType, *pinned);
*offset = 0;
} else {
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
@@ -1275,7 +1319,7 @@ class LiftoffCompiler {
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type);
@@ -1290,20 +1334,20 @@ class LiftoffCompiler {
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
- void GetTable(FullDecoder* decoder, const Value& index, Value* result,
- TableIndexImmediate<validate>& imm) {
- unsupported(decoder, "table_get");
+ void TableGet(FullDecoder* decoder, const Value& index, Value* result,
+ const TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, kAnyRef, "table_get");
}
- void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
- TableIndexImmediate<validate>& imm) {
- unsupported(decoder, "table_set");
+ void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
+ const TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, kAnyRef, "table_set");
}
void Unreachable(FullDecoder* decoder) {
@@ -1370,8 +1414,8 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
- std::map<uint32_t, MovableLabel>& br_targets) {
- MovableLabel& label = br_targets[br_depth];
+ std::map<uint32_t, MovableLabel>* br_targets) {
+ MovableLabel& label = (*br_targets)[br_depth];
if (label.get()->is_bound()) {
__ jmp(label.get());
} else {
@@ -1384,13 +1428,13 @@ class LiftoffCompiler {
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
- BranchTableIterator<validate>& table_iterator,
- std::map<uint32_t, MovableLabel>& br_targets) {
+ BranchTableIterator<validate>* table_iterator,
+ std::map<uint32_t, MovableLabel>* br_targets) {
DCHECK_LT(min, max);
// Check base case.
if (max == min + 1) {
- DCHECK_EQ(min, table_iterator.cur_index());
- GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ DCHECK_EQ(min, table_iterator->cur_index());
+ GenerateBrCase(decoder, table_iterator->next(), br_targets);
return;
}
@@ -1422,14 +1466,14 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
value.gp(), tmp.gp());
- GenerateBrTable(decoder, tmp, value, 0, imm.table_count, table_iterator,
- br_targets);
+ GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
+ &br_targets);
__ bind(&case_default);
}
// Generate the default case.
- GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ GenerateBrCase(decoder, table_iterator.next(), &br_targets);
DCHECK(!table_iterator.has_next());
}
@@ -1593,7 +1637,7 @@ class LiftoffCompiler {
}
Register AddMemoryMasking(Register index, uint32_t* offset,
- LiftoffRegList& pinned) {
+ LiftoffRegList* pinned) {
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
return index;
}
@@ -1601,11 +1645,11 @@ class LiftoffCompiler {
// Make sure that we can overwrite {index}.
if (__ cache_state()->is_used(LiftoffRegister(index))) {
Register old_index = index;
- pinned.clear(LiftoffRegister(old_index));
- index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ pinned->clear(LiftoffRegister(old_index));
+ index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
- Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
+ Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
__ emit_ptrsize_add(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
@@ -1625,7 +1669,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, pinned);
+ index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -1659,7 +1703,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, pinned);
+ index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -1720,12 +1764,14 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- if (imm.sig->return_count() > 1)
- return unsupported(decoder, "multi-return");
+ if (imm.sig->return_count() > 1) {
+ return unsupported(decoder, kMultiValue, "multi-return");
+ }
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
- "return"))
+ "return")) {
return;
+ }
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
@@ -1783,10 +1829,10 @@ class LiftoffCompiler {
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
- return unsupported(decoder, "multi-return");
+ return unsupported(decoder, kMultiValue, "multi-return");
}
if (imm.table_index != 0) {
- return unsupported(decoder, "table index != 0");
+ return unsupported(decoder, kAnyRef, "table index != 0");
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
@@ -1918,96 +1964,99 @@ class LiftoffCompiler {
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- unsupported(decoder, "return_call");
+ unsupported(decoder, kTailCall, "return_call");
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- unsupported(decoder, "return_call_indirect");
+ unsupported(decoder, kTailCall, "return_call_indirect");
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
const Vector<Value>& args) {
- unsupported(decoder, "throw");
+ unsupported(decoder, kExceptionHandling, "throw");
}
void Rethrow(FullDecoder* decoder, const Value& exception) {
- unsupported(decoder, "rethrow");
+ unsupported(decoder, kExceptionHandling, "rethrow");
}
void BrOnException(FullDecoder* decoder, const Value& exception,
const ExceptionIndexImmediate<validate>& imm,
uint32_t depth, Vector<Value> values) {
- unsupported(decoder, "br_on_exn");
+ unsupported(decoder, kExceptionHandling, "br_on_exn");
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
- unsupported(decoder, "atomicop");
+ unsupported(decoder, kAtomics, "atomicop");
+ }
+ void AtomicFence(FullDecoder* decoder) {
+ unsupported(decoder, kAtomics, "atomic.fence");
}
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- unsupported(decoder, "memory.init");
+ unsupported(decoder, kBulkMemory, "memory.init");
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
- unsupported(decoder, "data.drop");
+ unsupported(decoder, kBulkMemory, "data.drop");
}
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- unsupported(decoder, "memory.copy");
+ unsupported(decoder, kBulkMemory, "memory.copy");
}
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value& dst,
const Value& value, const Value& size) {
- unsupported(decoder, "memory.fill");
+ unsupported(decoder, kBulkMemory, "memory.fill");
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, "table.init");
+ unsupported(decoder, kBulkMemory, "table.init");
}
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
- unsupported(decoder, "elem.drop");
+ unsupported(decoder, kBulkMemory, "elem.drop");
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, "table.copy");
+ unsupported(decoder, kBulkMemory, "table.copy");
}
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value& value, Value& delta, Value* result) {
- unsupported(decoder, "table.grow");
+ const Value& value, const Value& delta, Value* result) {
+ unsupported(decoder, kAnyRef, "table.grow");
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
- unsupported(decoder, "table.size");
+ unsupported(decoder, kAnyRef, "table.size");
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value& start, Value& value, Value& count) {
- unsupported(decoder, "table.fill");
+ const Value& start, const Value& value, const Value& count) {
+ unsupported(decoder, kAnyRef, "table.fill");
}
private:
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
- bool ok_ = true;
+ LiftoffBailoutReason bailout_reason_ = kSuccess;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
@@ -2066,11 +2115,17 @@ WasmCompilationResult ExecuteLiftoffCompilation(AccountingAllocator* allocator,
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) {
- compiler->OnFirstError(&decoder);
- return WasmCompilationResult{};
- }
- if (!compiler->ok()) {
+ if (decoder.failed()) compiler->OnFirstError(&decoder);
+
+ // Check that the histogram for the bailout reasons has the correct size.
+ DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
+ DCHECK_EQ(kNumBailoutReasons - 1, counters->liftoff_bailout_reasons()->max());
+ DCHECK_EQ(kNumBailoutReasons,
+ counters->liftoff_bailout_reasons()->num_buckets());
+ // Register the bailout reason (can also be {kSuccess}).
+ counters->liftoff_bailout_reasons()->AddSample(
+ static_cast<int>(compiler->bailout_reason()));
+ if (compiler->did_bailout()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
return WasmCompilationResult{};
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index f310b9a54b..d40b92bef4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -19,6 +19,38 @@ struct CompilationEnv;
struct FunctionBody;
struct WasmFeatures;
+// Note: If this list changes, also the histogram "V8.LiftoffBailoutReasons"
+// on the chromium side needs to be updated.
+// Deprecating entries is always fine. Repurposing works if you don't care about
+// temporary mix-ups. Increasing the number of reasons {kNumBailoutReasons} is
+// more tricky, and might require introducing a new (updated) histogram.
+enum LiftoffBailoutReason : int8_t {
+ // Nothing actually failed.
+ kSuccess = 0,
+ // Compilation failed, but not because of Liftoff.
+ kDecodeError = 1,
+ // Liftoff is not implemented on that architecture.
+ kUnsupportedArchitecture = 2,
+ // More complex code would be needed because a CPU feature is not present.
+ kMissingCPUFeature = 3,
+ // Liftoff does not implement a complex (and rare) instruction.
+ kComplexOperation = 4,
+ // Unimplemented proposals:
+ kSimd = 5,
+ kAnyRef = 6,
+ kExceptionHandling = 7,
+ kMultiValue = 8,
+ kTailCall = 9,
+ kAtomics = 10,
+ kBulkMemory = 11,
+ kNonTrappingFloatToInt = 12,
+ // A little gap, for forward compatibility.
+ // Any other reason (use rarely; introduce new reasons if this spikes).
+ kOtherReason = 20,
+ // Marker:
+ kNumBailoutReasons
+};
+
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
Counters*, WasmFeatures* detected_features);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 5be769685c..e82ffe8f67 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("mips " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
- } else {
- BAILOUT("emit_type_conversion kExprI32SConvertF64");
- return true;
}
+ bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
+ return true;
}
case kExprI32UConvertF64: {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
- } else {
- BAILOUT("emit_type_conversion kExprI32UConvertF64");
- return true;
}
+ bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
+ return true;
}
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -1239,29 +1235,29 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
- Condition condition) {
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+ bool* predicate) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kUnequal:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -1287,7 +1283,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1316,7 +1312,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 1da72cb9b8..9c87dca733 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("mips64 " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -1096,29 +1094,29 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
- Condition condition) {
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+ bool* predicate) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kUnequal:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -1144,7 +1142,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1173,7 +1171,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 577df835e8..a690a1c090 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -7,20 +7,19 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("ppc " reason)
namespace v8 {
namespace internal {
namespace wasm {
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -29,136 +28,136 @@ void LiftoffAssembler::AbortCompilation() { FinishCode(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ bailout(kUnsupportedArchitecture, "LoadConstant");
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
- BAILOUT("LoadTaggedPointerFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ bailout(kUnsupportedArchitecture, "SpillInstance");
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
- BAILOUT("LoadTaggedPointer");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ bailout(kUnsupportedArchitecture, "MoveStackValue");
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
- BAILOUT("FillI64Half");
+ bailout(kUnsupportedArchitecture, "FillI64Half");
}
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("i32 binop:: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop:: " #name); \
}
#define UNIMPLEMENTED_I32_BINOP_I(name) \
UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
- BAILOUT("i32 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP_I(name) \
UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
- BAILOUT("i64_i binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64_i binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
@@ -227,65 +226,65 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ bailout(kUnsupportedArchitecture, "i32_divs");
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ bailout(kUnsupportedArchitecture, "i32_divu");
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ bailout(kUnsupportedArchitecture, "i32_rems");
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ bailout(kUnsupportedArchitecture, "i32_remu");
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+ bailout(kUnsupportedArchitecture, "i32_shr");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i64_divs");
+ bailout(kUnsupportedArchitecture, "i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_divu");
+ bailout(kUnsupportedArchitecture, "i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_rems");
+ bailout(kUnsupportedArchitecture, "i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_remu");
+ bailout(kUnsupportedArchitecture, "i64_remu");
return true;
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
int amount) {
- BAILOUT("i64_shr");
+ bailout(kUnsupportedArchitecture, "i64_shr");
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_PPC64
- BAILOUT("emit_i32_to_intptr");
+ bailout(kUnsupportedArchitecture, "emit_i32_to_intptr");
#else
// This is a nop on ppc32.
#endif
@@ -294,96 +293,100 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
+ bailout(kUnsupportedArchitecture, "emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ bailout(kUnsupportedArchitecture, "StackCheck");
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ bailout(kUnsupportedArchitecture, "AssertUnreachable");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ bailout(kUnsupportedArchitecture, "PushRegisters");
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ bailout(kUnsupportedArchitecture, "PopRegisters");
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -391,33 +394,33 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ bailout(kUnsupportedArchitecture, "CallC");
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ bailout(kUnsupportedArchitecture, "CallIndirect");
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ bailout(kUnsupportedArchitecture, "CallRuntimeStub");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ bailout(kUnsupportedArchitecture, "AllocateStackSlot");
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1e01bec407..d17c7dada1 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -7,20 +7,19 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("s390 " reason)
namespace v8 {
namespace internal {
namespace wasm {
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
void LiftoffAssembler::FinishCode() {}
@@ -29,136 +28,136 @@ void LiftoffAssembler::AbortCompilation() {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ bailout(kUnsupportedArchitecture, "LoadConstant");
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
- BAILOUT("LoadTaggedPointerFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ bailout(kUnsupportedArchitecture, "SpillInstance");
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
- BAILOUT("LoadTaggedPointer");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ bailout(kUnsupportedArchitecture, "MoveStackValue");
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
- BAILOUT("FillI64Half");
+ bailout(kUnsupportedArchitecture, "FillI64Half");
}
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("i32 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop: " #name); \
}
#define UNIMPLEMENTED_I32_BINOP_I(name) \
UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
- BAILOUT("i32 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP_I(name) \
UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
- BAILOUT("i64 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
@@ -227,65 +226,65 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ bailout(kUnsupportedArchitecture, "i32_divs");
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ bailout(kUnsupportedArchitecture, "i32_divu");
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ bailout(kUnsupportedArchitecture, "i32_rems");
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ bailout(kUnsupportedArchitecture, "i32_remu");
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+ bailout(kUnsupportedArchitecture, "i32_shr");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i64_divs");
+ bailout(kUnsupportedArchitecture, "i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_divu");
+ bailout(kUnsupportedArchitecture, "i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_rems");
+ bailout(kUnsupportedArchitecture, "i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_remu");
+ bailout(kUnsupportedArchitecture, "i64_remu");
return true;
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
int amount) {
- BAILOUT("i64_shr");
+ bailout(kUnsupportedArchitecture, "i64_shr");
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
- BAILOUT("emit_i32_to_intptr");
+ bailout(kUnsupportedArchitecture, "emit_i32_to_intptr");
#else
// This is a nop on s390.
#endif
@@ -294,96 +293,100 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
+ bailout(kUnsupportedArchitecture, "emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ bailout(kUnsupportedArchitecture, "StackCheck");
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ bailout(kUnsupportedArchitecture, "AssertUnreachable");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ bailout(kUnsupportedArchitecture, "PushRegisters");
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ bailout(kUnsupportedArchitecture, "PopRegisters");
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -391,33 +394,33 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ bailout(kUnsupportedArchitecture, "CallC");
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ bailout(kUnsupportedArchitecture, "CallIndirect");
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ bailout(kUnsupportedArchitecture, "CallRuntimeStub");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ bailout(kUnsupportedArchitecture, "AllocateStackSlot");
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index cbff0d4da9..43637985d0 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define REQUIRE_CPU_FEATURE(name, ...) \
- if (!CpuFeatures::IsSupported(name)) { \
- bailout("no " #name); \
- return __VA_ARGS__; \
- } \
+#define REQUIRE_CPU_FEATURE(name, ...) \
+ if (!CpuFeatures::IsSupported(name)) { \
+ bailout(kMissingCPUFeature, "no " #name); \
+ return __VA_ARGS__; \
+ } \
CpuFeatureScope feature(this, name);
namespace liftoff {
@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout("no SSE4.1");
+ assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index e5c1fa4686..86bba189b8 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -28,577 +28,131 @@
#include "include/libplatform/libplatform.h"
#include "src/api/api-inl.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
-// BEGIN FILE wasm-bin.cc
-
namespace wasm {
-namespace bin {
-
-////////////////////////////////////////////////////////////////////////////////
-// Encoding
-
-void encode_header(char*& ptr) {
- std::memcpy(ptr,
- "\x00"
- "asm\x01\x00\x00\x00",
- 8);
- ptr += 8;
-}
-
-void encode_size32(char*& ptr, size_t n) {
- assert(n <= 0xffffffff);
- for (int i = 0; i < 5; ++i) {
- *ptr++ = (n & 0x7f) | (i == 4 ? 0x00 : 0x80);
- n = n >> 7;
- }
-}
-void encode_valtype(char*& ptr, const ValType* type) {
- switch (type->kind()) {
- case I32:
- *ptr++ = 0x7f;
- break;
- case I64:
- *ptr++ = 0x7e;
- break;
- case F32:
- *ptr++ = 0x7d;
- break;
- case F64:
- *ptr++ = 0x7c;
- break;
- case FUNCREF:
- *ptr++ = 0x70;
- break;
- case ANYREF:
- *ptr++ = 0x6f;
- break;
- default:
- UNREACHABLE();
- }
-}
-
-auto zero_size(const ValType* type) -> size_t {
- switch (type->kind()) {
- case I32:
- return 1;
- case I64:
- return 1;
- case F32:
- return 4;
- case F64:
- return 8;
- case FUNCREF:
- return 0;
- case ANYREF:
- return 0;
- default:
- UNREACHABLE();
- }
-}
-
-void encode_const_zero(char*& ptr, const ValType* type) {
- switch (type->kind()) {
- case I32:
- *ptr++ = 0x41;
- break;
- case I64:
- *ptr++ = 0x42;
- break;
- case F32:
- *ptr++ = 0x43;
- break;
- case F64:
- *ptr++ = 0x44;
- break;
- default:
- UNREACHABLE();
- }
- for (size_t i = 0; i < zero_size(type); ++i) *ptr++ = 0;
-}
-
-auto wrapper(const FuncType* type) -> vec<byte_t> {
- auto in_arity = type->params().size();
- auto out_arity = type->results().size();
- auto size = 39 + in_arity + out_arity;
- auto binary = vec<byte_t>::make_uninitialized(size);
- auto ptr = binary.get();
-
- encode_header(ptr);
-
- *ptr++ = i::wasm::kTypeSectionCode;
- encode_size32(ptr, 12 + in_arity + out_arity); // size
- *ptr++ = 1; // length
- *ptr++ = i::wasm::kWasmFunctionTypeCode;
- encode_size32(ptr, in_arity);
- for (size_t i = 0; i < in_arity; ++i) {
- encode_valtype(ptr, type->params()[i].get());
- }
- encode_size32(ptr, out_arity);
- for (size_t i = 0; i < out_arity; ++i) {
- encode_valtype(ptr, type->results()[i].get());
- }
-
- *ptr++ = i::wasm::kImportSectionCode;
- *ptr++ = 5; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // module length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalFunction;
- *ptr++ = 0; // type index
-
- *ptr++ = i::wasm::kExportSectionCode;
- *ptr++ = 4; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalFunction;
- *ptr++ = 0; // func index
-
- assert(ptr - binary.get() == static_cast<ptrdiff_t>(size));
- return binary;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Decoding
-
-// Numbers
-
-auto u32(const byte_t*& pos) -> uint32_t {
- uint32_t n = 0;
- uint32_t shift = 0;
- byte_t b;
- do {
- b = *pos++;
- n += (b & 0x7f) << shift;
- shift += 7;
- } while ((b & 0x80) != 0);
- return n;
-}
+namespace {
-auto u64(const byte_t*& pos) -> uint64_t {
+auto ReadLebU64(const byte_t** pos) -> uint64_t {
uint64_t n = 0;
uint64_t shift = 0;
byte_t b;
do {
- b = *pos++;
+ b = **pos;
+ (*pos)++;
n += (b & 0x7f) << shift;
shift += 7;
} while ((b & 0x80) != 0);
return n;
}
-void u32_skip(const byte_t*& pos) { bin::u32(pos); }
-
-// Names
-
-auto name(const byte_t*& pos) -> Name {
- auto size = bin::u32(pos);
- auto start = pos;
- auto name = Name::make_uninitialized(size);
- std::memcpy(name.get(), start, size);
- pos += size;
- return name;
-}
-
-// Types
-
-auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
- switch (*pos++) {
- case i::wasm::kLocalI32:
- return ValType::make(I32);
- case i::wasm::kLocalI64:
- return ValType::make(I64);
- case i::wasm::kLocalF32:
- return ValType::make(F32);
- case i::wasm::kLocalF64:
- return ValType::make(F64);
- case i::wasm::kLocalAnyFunc:
- return ValType::make(FUNCREF);
- case i::wasm::kLocalAnyRef:
- return ValType::make(ANYREF);
+ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
+ switch (v8_valtype) {
+ case i::wasm::kWasmI32:
+ return I32;
+ case i::wasm::kWasmI64:
+ return I64;
+ case i::wasm::kWasmF32:
+ return F32;
+ case i::wasm::kWasmF64:
+ return F64;
+ case i::wasm::kWasmFuncRef:
+ return FUNCREF;
+ case i::wasm::kWasmAnyRef:
+ return ANYREF;
default:
// TODO(wasm+): support new value types
UNREACHABLE();
}
- return {};
-}
-
-auto mutability(const byte_t*& pos) -> Mutability {
- return *pos++ ? VAR : CONST;
-}
-
-auto limits(const byte_t*& pos) -> Limits {
- auto tag = *pos++;
- auto min = bin::u32(pos);
- if ((tag & 0x01) == 0) {
- return Limits(min);
- } else {
- auto max = bin::u32(pos);
- return Limits(min, max);
- }
-}
-
-auto stacktype(const byte_t*& pos) -> vec<ValType*> {
- size_t size = bin::u32(pos);
- auto v = vec<ValType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) v[i] = bin::valtype(pos);
- return v;
-}
-
-auto functype(const byte_t*& pos) -> own<FuncType*> {
- assert(*pos == i::wasm::kWasmFunctionTypeCode);
- ++pos;
- auto params = bin::stacktype(pos);
- auto results = bin::stacktype(pos);
- return FuncType::make(std::move(params), std::move(results));
-}
-
-auto globaltype(const byte_t*& pos) -> own<GlobalType*> {
- auto content = bin::valtype(pos);
- auto mutability = bin::mutability(pos);
- return GlobalType::make(std::move(content), mutability);
-}
-
-auto tabletype(const byte_t*& pos) -> own<TableType*> {
- auto elem = bin::valtype(pos);
- auto limits = bin::limits(pos);
- return TableType::make(std::move(elem), limits);
}
-auto memorytype(const byte_t*& pos) -> own<MemoryType*> {
- auto limits = bin::limits(pos);
- return MemoryType::make(limits);
-}
-
-// Expressions
-
-void expr_skip(const byte_t*& pos) {
- switch (*pos++) {
- case i::wasm::kExprI32Const:
- case i::wasm::kExprI64Const:
- case i::wasm::kExprGetGlobal: {
- bin::u32_skip(pos);
- } break;
- case i::wasm::kExprF32Const: {
- pos += 4;
- } break;
- case i::wasm::kExprF64Const: {
- pos += 8;
- } break;
- default: {
- // TODO(wasm+): support new expression forms
+i::wasm::ValueType WasmValKindToV8(ValKind kind) {
+ switch (kind) {
+ case I32:
+ return i::wasm::kWasmI32;
+ case I64:
+ return i::wasm::kWasmI64;
+ case F32:
+ return i::wasm::kWasmF32;
+ case F64:
+ return i::wasm::kWasmF64;
+ case FUNCREF:
+ return i::wasm::kWasmFuncRef;
+ case ANYREF:
+ return i::wasm::kWasmAnyRef;
+ default:
+ // TODO(wasm+): support new value types
UNREACHABLE();
- }
}
- ++pos; // end
-}
-
-// Sections
-
-auto section(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
- -> const byte_t* {
- const byte_t* end = binary.get() + binary.size();
- const byte_t* pos = binary.get() + 8; // skip header
- while (pos < end && *pos++ != sec) {
- auto size = bin::u32(pos);
- pos += size;
- }
- if (pos == end) return nullptr;
- bin::u32_skip(pos);
- return pos;
-}
-
-// Only for asserts/DCHECKs.
-auto section_end(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
- -> const byte_t* {
- const byte_t* end = binary.get() + binary.size();
- const byte_t* pos = binary.get() + 8; // skip header
- while (pos < end && *pos != sec) {
- ++pos;
- auto size = bin::u32(pos);
- pos += size;
- }
- if (pos == end) return nullptr;
- ++pos;
- auto size = bin::u32(pos);
- return pos + size;
-}
-
-// Type section
-
-auto types(const vec<const byte_t>& binary) -> vec<FuncType*> {
- auto pos = bin::section(binary, i::wasm::kTypeSectionCode);
- if (pos == nullptr) return vec<FuncType*>::make();
- size_t size = bin::u32(pos);
- // TODO(wasm+): support new deftypes
- auto v = vec<FuncType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- v[i] = bin::functype(pos);
- }
- assert(pos == bin::section_end(binary, i::wasm::kTypeSectionCode));
- return v;
-}
-
-// Import section
-
-auto imports(const vec<const byte_t>& binary, const vec<FuncType*>& types)
- -> vec<ImportType*> {
- auto pos = bin::section(binary, i::wasm::kImportSectionCode);
- if (pos == nullptr) return vec<ImportType*>::make();
- size_t size = bin::u32(pos);
- auto v = vec<ImportType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- auto module = bin::name(pos);
- auto name = bin::name(pos);
- own<ExternType*> type;
- switch (*pos++) {
- case i::wasm::kExternalFunction:
- type = types[bin::u32(pos)]->copy();
- break;
- case i::wasm::kExternalTable:
- type = bin::tabletype(pos);
- break;
- case i::wasm::kExternalMemory:
- type = bin::memorytype(pos);
- break;
- case i::wasm::kExternalGlobal:
- type = bin::globaltype(pos);
- break;
- default:
- UNREACHABLE();
- }
- v[i] =
- ImportType::make(std::move(module), std::move(name), std::move(type));
- }
- assert(pos == bin::section_end(binary, i::wasm::kImportSectionCode));
- return v;
-}
-
-auto count(const vec<ImportType*>& imports, ExternKind kind) -> uint32_t {
- uint32_t n = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- if (imports[i]->type()->kind() == kind) ++n;
- }
- return n;
}
-// Function section
-
-auto funcs(const vec<const byte_t>& binary, const vec<ImportType*>& imports,
- const vec<FuncType*>& types) -> vec<FuncType*> {
- auto pos = bin::section(binary, i::wasm::kFunctionSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v =
- vec<FuncType*>::make_uninitialized(size + count(imports, EXTERN_FUNC));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_FUNC) {
- v[j++] = et->func()->copy();
- }
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = types[bin::u32(pos)]->copy();
- }
- assert(pos == bin::section_end(binary, i::wasm::kFunctionSectionCode));
- }
- return v;
+Name GetNameFromWireBytes(const i::wasm::WireBytesRef& ref,
+ const i::Vector<const uint8_t>& wire_bytes) {
+ DCHECK_LE(ref.offset(), wire_bytes.length());
+ DCHECK_LE(ref.end_offset(), wire_bytes.length());
+ Name name = Name::make_uninitialized(ref.length());
+ std::memcpy(name.get(), wire_bytes.begin() + ref.offset(), ref.length());
+ return name;
}
-// Global section
-
-auto globals(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<GlobalType*> {
- auto pos = bin::section(binary, i::wasm::kGlobalSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v = vec<GlobalType*>::make_uninitialized(size +
- count(imports, EXTERN_GLOBAL));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_GLOBAL) {
- v[j++] = et->global()->copy();
- }
+own<FuncType*> FunctionSigToFuncType(const i::wasm::FunctionSig* sig) {
+ size_t param_count = sig->parameter_count();
+ vec<ValType*> params = vec<ValType*>::make_uninitialized(param_count);
+ for (size_t i = 0; i < param_count; i++) {
+ params[i] = ValType::make(V8ValueTypeToWasm(sig->GetParam(i)));
}
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::globaltype(pos);
- expr_skip(pos);
- }
- assert(pos == bin::section_end(binary, i::wasm::kGlobalSectionCode));
+ size_t return_count = sig->return_count();
+ vec<ValType*> results = vec<ValType*>::make_uninitialized(return_count);
+ for (size_t i = 0; i < return_count; i++) {
+ results[i] = ValType::make(V8ValueTypeToWasm(sig->GetReturn(i)));
}
- return v;
+ return FuncType::make(std::move(params), std::move(results));
}
-// Table section
-
-auto tables(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<TableType*> {
- auto pos = bin::section(binary, i::wasm::kTableSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v =
- vec<TableType*>::make_uninitialized(size + count(imports, EXTERN_TABLE));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_TABLE) {
- v[j++] = et->table()->copy();
- }
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::tabletype(pos);
+own<ExternType*> GetImportExportType(const i::wasm::WasmModule* module,
+ const i::wasm::ImportExportKindCode kind,
+ const uint32_t index) {
+ switch (kind) {
+ case i::wasm::kExternalFunction: {
+ return FunctionSigToFuncType(module->functions[index].sig);
}
- assert(pos == bin::section_end(binary, i::wasm::kTableSectionCode));
- }
- return v;
-}
-
-// Memory section
-
-auto memories(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<MemoryType*> {
- auto pos = bin::section(binary, i::wasm::kMemorySectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v = vec<MemoryType*>::make_uninitialized(size +
- count(imports, EXTERN_MEMORY));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_MEMORY) {
- v[j++] = et->memory()->copy();
+ case i::wasm::kExternalTable: {
+ const i::wasm::WasmTable& table = module->tables[index];
+ own<ValType*> elem = ValType::make(V8ValueTypeToWasm(table.type));
+ Limits limits(table.initial_size,
+ table.has_maximum_size ? table.maximum_size : -1);
+ return TableType::make(std::move(elem), limits);
}
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::memorytype(pos);
+ case i::wasm::kExternalMemory: {
+ DCHECK(module->has_memory);
+ Limits limits(module->initial_pages,
+ module->has_maximum_pages ? module->maximum_pages : -1);
+ return MemoryType::make(limits);
}
- assert(pos == bin::section_end(binary, i::wasm::kMemorySectionCode));
- }
- return v;
-}
-
-// Export section
-
-auto exports(const vec<const byte_t>& binary, const vec<FuncType*>& funcs,
- const vec<GlobalType*>& globals, const vec<TableType*>& tables,
- const vec<MemoryType*>& memories) -> vec<ExportType*> {
- auto pos = bin::section(binary, i::wasm::kExportSectionCode);
- if (pos == nullptr) return vec<ExportType*>::make();
- size_t size = bin::u32(pos);
- auto exports = vec<ExportType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- auto name = bin::name(pos);
- auto tag = *pos++;
- auto index = bin::u32(pos);
- own<ExternType*> type;
- switch (tag) {
- case i::wasm::kExternalFunction:
- type = funcs[index]->copy();
- break;
- case i::wasm::kExternalTable:
- type = tables[index]->copy();
- break;
- case i::wasm::kExternalMemory:
- type = memories[index]->copy();
- break;
- case i::wasm::kExternalGlobal:
- type = globals[index]->copy();
- break;
- default:
- UNREACHABLE();
+ case i::wasm::kExternalGlobal: {
+ const i::wasm::WasmGlobal& global = module->globals[index];
+ own<ValType*> content = ValType::make(V8ValueTypeToWasm(global.type));
+ Mutability mutability = global.mutability ? VAR : CONST;
+ return GlobalType::make(std::move(content), mutability);
}
- exports[i] = ExportType::make(std::move(name), std::move(type));
- }
- assert(pos == bin::section_end(binary, i::wasm::kExportSectionCode));
- return exports;
-}
-
-auto imports(const vec<const byte_t>& binary) -> vec<ImportType*> {
- return bin::imports(binary, bin::types(binary));
-}
-
-auto exports(const vec<const byte_t>& binary) -> vec<ExportType*> {
- auto types = bin::types(binary);
- auto imports = bin::imports(binary, types);
- auto funcs = bin::funcs(binary, imports, types);
- auto globals = bin::globals(binary, imports);
- auto tables = bin::tables(binary, imports);
- auto memories = bin::memories(binary, imports);
- return bin::exports(binary, funcs, globals, tables, memories);
-}
-
-} // namespace bin
-} // namespace wasm
-
-// BEGIN FILE wasm-v8-lowlevel.cc
-
-namespace v8 {
-namespace wasm {
-
-// Foreign pointers
-
-auto foreign_new(v8::Isolate* isolate, void* ptr) -> v8::Local<v8::Value> {
- auto foreign = v8::FromCData(reinterpret_cast<i::Isolate*>(isolate),
- reinterpret_cast<i::Address>(ptr));
- return v8::Utils::ToLocal(foreign);
-}
-
-auto foreign_get(v8::Local<v8::Value> val) -> void* {
- auto foreign = v8::Utils::OpenHandle(*val);
- if (!foreign->IsForeign()) return nullptr;
- auto addr = v8::ToCData<i::Address>(*foreign);
- return reinterpret_cast<void*>(addr);
-}
-
-// Types
-
-auto v8_valtype_to_wasm(i::wasm::ValueType v8_valtype) -> ::wasm::ValKind {
- switch (v8_valtype) {
- case i::wasm::kWasmI32:
- return ::wasm::I32;
- case i::wasm::kWasmI64:
- return ::wasm::I64;
- case i::wasm::kWasmF32:
- return ::wasm::F32;
- case i::wasm::kWasmF64:
- return ::wasm::F64;
- default:
- // TODO(wasm+): support new value types
- UNREACHABLE();
- }
-}
-
-i::wasm::ValueType wasm_valtype_to_v8(::wasm::ValKind type) {
- switch (type) {
- case ::wasm::I32:
- return i::wasm::kWasmI32;
- case ::wasm::I64:
- return i::wasm::kWasmI64;
- case ::wasm::F32:
- return i::wasm::kWasmF32;
- case ::wasm::F64:
- return i::wasm::kWasmF64;
- default:
- // TODO(wasm+): support new value types
+ case i::wasm::kExternalException:
UNREACHABLE();
+ return {};
}
}
-} // namespace wasm
-} // namespace v8
+} // namespace
/// BEGIN FILE wasm-v8.cc
-namespace wasm {
-
///////////////////////////////////////////////////////////////////////////////
// Auxiliaries
@@ -695,6 +249,7 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config*>&& config) -> own<Engine*> {
i::FLAG_expose_gc = true;
+ i::FLAG_experimental_wasm_anyref = true;
i::FLAG_experimental_wasm_bigint = true;
i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
@@ -714,7 +269,6 @@ StoreImpl::~StoreImpl() {
v8::kGCCallbackFlagForced);
#endif
context()->Exit();
- isolate_->Exit();
isolate_->Dispose();
delete create_params_.array_buffer_allocator;
}
@@ -739,7 +293,6 @@ auto Store::make(Engine*) -> own<Store*> {
if (!isolate) return own<Store*>();
{
- v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
// Create context.
@@ -750,8 +303,10 @@ auto Store::make(Engine*) -> own<Store*> {
store->isolate_ = isolate;
store->context_ = v8::Eternal<v8::Context>(isolate, context);
}
-
- store->isolate()->Enter();
+ // We intentionally do not call isolate->Enter() here, because that would
+ // prevent embedders from using stores with overlapping but non-nested
+ // lifetimes. The consequence is that Isolate::Current() is dysfunctional
+ // and hence must not be called by anything reachable via this file.
store->context()->Enter();
isolate->SetData(0, store.get());
@@ -831,7 +386,8 @@ struct FuncTypeImpl : ExternTypeImpl {
vec<ValType*> params;
vec<ValType*> results;
- FuncTypeImpl(vec<ValType*>& params, vec<ValType*>& results)
+ FuncTypeImpl(vec<ValType*>& params, // NOLINT(runtime/references)
+ vec<ValType*>& results) // NOLINT(runtime/references)
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
@@ -884,7 +440,8 @@ struct GlobalTypeImpl : ExternTypeImpl {
own<ValType*> content;
Mutability mutability;
- GlobalTypeImpl(own<ValType*>& content, Mutability mutability)
+ GlobalTypeImpl(own<ValType*>& content, // NOLINT(runtime/references)
+ Mutability mutability)
: ExternTypeImpl(EXTERN_GLOBAL),
content(std::move(content)),
mutability(mutability) {}
@@ -936,7 +493,8 @@ struct TableTypeImpl : ExternTypeImpl {
own<ValType*> element;
Limits limits;
- TableTypeImpl(own<ValType*>& element, Limits limits)
+ TableTypeImpl(own<ValType*>& element, // NOLINT(runtime/references)
+ Limits limits)
: ExternTypeImpl(EXTERN_TABLE),
element(std::move(element)),
limits(limits) {}
@@ -1028,7 +586,9 @@ struct ImportTypeImpl {
Name name;
own<ExternType*> type;
- ImportTypeImpl(Name& module, Name& name, own<ExternType*>& type)
+ ImportTypeImpl(Name& module, // NOLINT(runtime/references)
+ Name& name, // NOLINT(runtime/references)
+ own<ExternType*>& type) // NOLINT(runtime/references)
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
@@ -1071,7 +631,8 @@ struct ExportTypeImpl {
Name name;
own<ExternType*> type;
- ExportTypeImpl(Name& name, own<ExternType*>& type)
+ ExportTypeImpl(Name& name, // NOLINT(runtime/references)
+ own<ExternType*>& type) // NOLINT(runtime/references)
: name(std::move(name)), type(std::move(type)) {}
~ExportTypeImpl() {}
@@ -1103,89 +664,14 @@ auto ExportType::type() const -> const ExternType* {
return impl(this)->type.get();
}
-///////////////////////////////////////////////////////////////////////////////
-// Conversions of values from and to V8 objects
-
-auto val_to_v8(StoreImpl* store, const Val& v) -> v8::Local<v8::Value> {
- auto isolate = store->isolate();
- switch (v.kind()) {
- case I32:
- return v8::Integer::NewFromUnsigned(isolate, v.i32());
- case I64:
- return v8::BigInt::New(isolate, v.i64());
- case F32:
- return v8::Number::New(isolate, v.f32());
- case F64:
- return v8::Number::New(isolate, v.f64());
- case ANYREF:
- case FUNCREF: {
- if (v.ref() == nullptr) {
- return v8::Null(isolate);
- } else {
- WASM_UNIMPLEMENTED("ref value");
- }
- }
- default:
- UNREACHABLE();
- }
-}
-
-own<Val> v8_to_val(i::Isolate* isolate, i::Handle<i::Object> value,
- ValKind kind) {
- switch (kind) {
- case I32:
- do {
- if (value->IsSmi()) return Val(i::Smi::ToInt(*value));
- if (value->IsHeapNumber()) {
- return Val(i::DoubleToInt32(i::HeapNumber::cast(*value).value()));
- }
- value = i::Object::ToInt32(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case I64:
- if (value->IsBigInt()) return Val(i::BigInt::cast(*value).AsInt64());
- return Val(
- i::BigInt::FromObject(isolate, value).ToHandleChecked()->AsInt64());
- case F32:
- do {
- if (value->IsSmi()) {
- return Val(static_cast<float32_t>(i::Smi::ToInt(*value)));
- }
- if (value->IsHeapNumber()) {
- return Val(i::DoubleToFloat32(i::HeapNumber::cast(*value).value()));
- }
- value = i::Object::ToNumber(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case F64:
- do {
- if (value->IsSmi()) {
- return Val(static_cast<float64_t>(i::Smi::ToInt(*value)));
- }
- if (value->IsHeapNumber()) {
- return Val(i::HeapNumber::cast(*value).value());
- }
- value = i::Object::ToNumber(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case ANYREF:
- case FUNCREF: {
- if (value->IsNull(isolate)) {
- return Val(nullptr);
- } else {
- WASM_UNIMPLEMENTED("ref value");
- }
- }
- }
-}
-
i::Handle<i::String> VecToString(i::Isolate* isolate,
const vec<byte_t>& chars) {
+ size_t length = chars.size();
+ // Some, but not all, {chars} vectors we get here are null-terminated,
+ // so let's be robust to that.
+ if (length > 0 && chars[length - 1] == 0) length--;
return isolate->factory()
- ->NewStringFromUtf8({chars.get(), chars.size()})
+ ->NewStringFromUtf8({chars.get(), length})
.ToHandleChecked();
}
@@ -1327,11 +813,12 @@ Foreign::~Foreign() {}
auto Foreign::copy() const -> own<Foreign*> { return impl(this)->copy(); }
auto Foreign::make(Store* store_abs) -> own<Foreign*> {
- auto store = impl(store_abs);
- auto isolate = store->i_isolate();
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
- auto obj = i::Handle<i::JSReceiver>();
+ i::Handle<i::JSObject> obj =
+ isolate->factory()->NewJSObject(isolate->object_function());
return implement<Foreign>::type::make(store, obj);
}
@@ -1379,22 +866,37 @@ auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
}
auto Module::imports() const -> vec<ImportType*> {
- i::Vector<const uint8_t> wire_bytes =
- impl(this)->v8_object()->native_module()->wire_bytes();
- vec<const byte_t> binary = vec<const byte_t>::adopt(
- wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
- auto imports = wasm::bin::imports(binary);
- binary.release();
+ const i::wasm::NativeModule* native_module =
+ impl(this)->v8_object()->native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const std::vector<i::wasm::WasmImport>& import_table = module->import_table;
+ size_t size = import_table.size();
+ vec<ImportType*> imports = vec<ImportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; i++) {
+ const i::wasm::WasmImport& imp = import_table[i];
+ Name module_name = GetNameFromWireBytes(imp.module_name, wire_bytes);
+ Name name = GetNameFromWireBytes(imp.field_name, wire_bytes);
+ own<ExternType*> type = GetImportExportType(module, imp.kind, imp.index);
+ imports[i] = ImportType::make(std::move(module_name), std::move(name),
+ std::move(type));
+ }
return imports;
}
vec<ExportType*> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
- i::Vector<const uint8_t> wire_bytes =
- module_obj->native_module()->wire_bytes();
- vec<const byte_t> binary = vec<const byte_t>::adopt(
- wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
- auto exports = wasm::bin::exports(binary);
- binary.release();
+ const i::wasm::NativeModule* native_module = module_obj->native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const std::vector<i::wasm::WasmExport>& export_table = module->export_table;
+ size_t size = export_table.size();
+ vec<ExportType*> exports = vec<ExportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; i++) {
+ const i::wasm::WasmExport& exp = export_table[i];
+ Name name = GetNameFromWireBytes(exp.name, wire_bytes);
+ own<ExternType*> type = GetImportExportType(module, exp.kind, exp.index);
+ exports[i] = ExportType::make(std::move(name), std::move(type));
+ }
return exports;
}
@@ -1430,7 +932,7 @@ auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
const byte_t* ptr = serialized.get();
- uint64_t binary_size = wasm::bin::u64(ptr);
+ uint64_t binary_size = ReadLebU64(&ptr);
ptrdiff_t size_size = ptr - serialized.get();
size_t serial_size = serialized.size() - size_size - binary_size;
i::Handle<i::WasmModuleObject> module_obj;
@@ -1597,16 +1099,14 @@ class SignatureHelper : public i::AllStatic {
int index = 0;
// TODO(jkummerow): Consider making vec<> range-based for-iterable.
for (size_t i = 0; i < type->results().size(); i++) {
- sig->set(index++,
- v8::wasm::wasm_valtype_to_v8(type->results()[i]->kind()));
+ sig->set(index++, WasmValKindToV8(type->results()[i]->kind()));
}
// {sig->set} needs to take the address of its second parameter,
// so we can't pass in the static const kMarker directly.
i::wasm::ValueType marker = kMarker;
sig->set(index++, marker);
for (size_t i = 0; i < type->params().size(); i++) {
- sig->set(index++,
- v8::wasm::wasm_valtype_to_v8(type->params()[i]->kind()));
+ sig->set(index++, WasmValKindToV8(type->params()[i]->kind()));
}
return sig;
}
@@ -1619,11 +1119,11 @@ class SignatureHelper : public i::AllStatic {
int i = 0;
for (; i < result_arity; ++i) {
- results[i] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ results[i] = ValType::make(V8ValueTypeToWasm(sig.get(i)));
}
i++; // Skip marker.
for (int p = 0; i < sig.length(); ++i, ++p) {
- params[p] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ params[p] = ValType::make(V8ValueTypeToWasm(sig.get(i)));
}
return FuncType::make(std::move(params), std::move(results));
}
@@ -1684,22 +1184,8 @@ auto Func::type() const -> own<FuncType*> {
DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
i::Handle<i::WasmExportedFunction> function =
i::Handle<i::WasmExportedFunction>::cast(func);
- i::wasm::FunctionSig* sig =
- function->instance().module()->functions[function->function_index()].sig;
- uint32_t param_arity = static_cast<uint32_t>(sig->parameter_count());
- uint32_t result_arity = static_cast<uint32_t>(sig->return_count());
- auto params = vec<ValType*>::make_uninitialized(param_arity);
- auto results = vec<ValType*>::make_uninitialized(result_arity);
-
- for (size_t i = 0; i < params.size(); ++i) {
- auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetParam(i));
- params[i] = ValType::make(kind);
- }
- for (size_t i = 0; i < results.size(); ++i) {
- auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(i));
- results[i] = ValType::make(kind);
- }
- return FuncType::make(std::move(params), std::move(results));
+ return FunctionSigToFuncType(
+ function->instance().module()->functions[function->function_index()].sig);
}
auto Func::param_arity() const -> size_t {
@@ -1728,74 +1214,183 @@ auto Func::result_arity() const -> size_t {
return sig->return_count();
}
+namespace {
+
+void PrepareFunctionData(i::Isolate* isolate,
+ i::Handle<i::WasmExportedFunctionData> function_data,
+ i::wasm::FunctionSig* sig) {
+ // If the data is already populated, return immediately.
+ if (!function_data->c_wrapper_code().IsSmi()) return;
+ // Compile wrapper code.
+ i::Handle<i::Code> wrapper_code =
+ i::compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
+ function_data->set_c_wrapper_code(*wrapper_code);
+ // Compute packed args size.
+ function_data->set_packed_args_size(
+ i::wasm::CWasmArgumentsPacker::TotalSize(sig));
+ // Get call target (function table offset). This is an Address, we store
+ // it as a pseudo-Smi by shifting it by one bit, so the GC leaves it alone.
+ i::Address call_target =
+ function_data->instance().GetCallTarget(function_data->function_index());
+ i::Smi smi_target((call_target << i::kSmiTagSize) | i::kSmiTag);
+ function_data->set_wasm_call_target(smi_target);
+}
+
+void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
+ i::wasm::CWasmArgumentsPacker* packer) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ i::wasm::ValueType type = sig->GetParam(i);
+ switch (type) {
+ case i::wasm::kWasmI32:
+ packer->Push(args[i].i32());
+ break;
+ case i::wasm::kWasmI64:
+ packer->Push(args[i].i64());
+ break;
+ case i::wasm::kWasmF32:
+ packer->Push(args[i].f32());
+ break;
+ case i::wasm::kWasmF64:
+ packer->Push(args[i].f64());
+ break;
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef:
+ packer->Push(impl(args[i].ref())->v8_object()->ptr());
+ break;
+ case i::wasm::kWasmExnRef:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+void PopArgs(i::wasm::FunctionSig* sig, Val results[],
+ i::wasm::CWasmArgumentsPacker* packer, StoreImpl* store) {
+ packer->Reset();
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ i::wasm::ValueType type = sig->GetReturn(i);
+ switch (type) {
+ case i::wasm::kWasmI32:
+ results[i] = Val(packer->Pop<int32_t>());
+ break;
+ case i::wasm::kWasmI64:
+ results[i] = Val(packer->Pop<int64_t>());
+ break;
+ case i::wasm::kWasmF32:
+ results[i] = Val(packer->Pop<float>());
+ break;
+ case i::wasm::kWasmF64:
+ results[i] = Val(packer->Pop<double>());
+ break;
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef: {
+ i::Address raw = packer->Pop<i::Address>();
+ if (raw == i::kNullAddress) {
+ results[i] = Val(nullptr);
+ } else {
+ i::JSReceiver raw_obj = i::JSReceiver::cast(i::Object(raw));
+ i::Handle<i::JSReceiver> obj(raw_obj, store->i_isolate());
+ results[i] = Val(implement<Ref>::type::make(store, obj));
+ }
+ break;
+ }
+ case i::wasm::kWasmExnRef:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+own<Trap*> CallWasmCapiFunction(i::WasmCapiFunctionData data, const Val args[],
+ Val results[]) {
+ FuncData* func_data = reinterpret_cast<FuncData*>(data.embedder_data());
+ if (func_data->kind == FuncData::kCallback) {
+ return (func_data->callback)(args, results);
+ }
+ DCHECK(func_data->kind == FuncData::kCallbackWithEnv);
+ return (func_data->callback_with_env)(func_data->env, args, results);
+}
+
+} // namespace
+
auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
auto func = impl(this);
auto store = func->store();
- auto isolate = store->isolate();
- auto i_isolate = store->i_isolate();
- v8::HandleScope handle_scope(isolate);
-
- int num_params;
- int num_results;
- ValKind result_kind;
- i::Handle<i::JSFunction> v8_func = func->v8_object();
- if (i::WasmExportedFunction::IsWasmExportedFunction(*v8_func)) {
- i::WasmExportedFunction wef = i::WasmExportedFunction::cast(*v8_func);
- i::wasm::FunctionSig* sig =
- wef.instance().module()->functions[wef.function_index()].sig;
- num_params = static_cast<int>(sig->parameter_count());
- num_results = static_cast<int>(sig->return_count());
- if (num_results > 0) {
- result_kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(0));
- }
-#if DEBUG
- for (int i = 0; i < num_params; i++) {
- DCHECK_EQ(args[i].kind(), v8::wasm::v8_valtype_to_wasm(sig->GetParam(i)));
+ auto isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ i::Object raw_function_data = func->v8_object()->shared().function_data();
+
+ // WasmCapiFunctions can be called directly.
+ if (raw_function_data.IsWasmCapiFunctionData()) {
+ return CallWasmCapiFunction(
+ i::WasmCapiFunctionData::cast(raw_function_data), args, results);
+ }
+
+ DCHECK(raw_function_data.IsWasmExportedFunctionData());
+ i::Handle<i::WasmExportedFunctionData> function_data(
+ i::WasmExportedFunctionData::cast(raw_function_data), isolate);
+ i::Handle<i::WasmInstanceObject> instance(function_data->instance(), isolate);
+ int function_index = function_data->function_index();
+ // Caching {sig} would give a ~10% reduction in overhead.
+ i::wasm::FunctionSig* sig = instance->module()->functions[function_index].sig;
+ PrepareFunctionData(isolate, function_data, sig);
+ i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
+ i::Code::cast(function_data->c_wrapper_code()), isolate);
+ i::Address call_target =
+ function_data->wasm_call_target().ptr() >> i::kSmiTagSize;
+
+ i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
+ PushArgs(sig, args, &packer);
+
+ i::Handle<i::Object> object_ref = instance;
+ if (function_index <
+ static_cast<int>(instance->module()->num_imported_functions)) {
+ object_ref = i::handle(
+ instance->imported_function_refs().get(function_index), isolate);
+ if (object_ref->IsTuple2()) {
+ i::JSFunction jsfunc =
+ i::JSFunction::cast(i::Tuple2::cast(*object_ref).value2());
+ i::Object data = jsfunc.shared().function_data();
+ if (data.IsWasmCapiFunctionData()) {
+ return CallWasmCapiFunction(i::WasmCapiFunctionData::cast(data), args,
+ results);
+ }
+ // TODO(jkummerow): Imported and then re-exported JavaScript functions
+ // are not supported yet. If we support C-API + JavaScript, we'll need
+ // to call those here.
+ UNIMPLEMENTED();
+ } else {
+ // A WasmFunction from another module.
+ DCHECK(object_ref->IsWasmInstanceObject());
}
-#endif
- } else {
- DCHECK(i::WasmCapiFunction::IsWasmCapiFunction(*v8_func));
- UNIMPLEMENTED();
- }
- // TODO(rossberg): cache v8_args array per thread.
- auto v8_args = std::unique_ptr<i::Handle<i::Object>[]>(
- new (std::nothrow) i::Handle<i::Object>[num_params]);
- for (int i = 0; i < num_params; ++i) {
- v8_args[i] = v8::Utils::OpenHandle(*val_to_v8(store, args[i]));
- }
-
- // TODO(jkummerow): Use Execution::TryCall instead of manual TryCatch.
- v8::TryCatch handler(isolate);
- i::MaybeHandle<i::Object> maybe_val = i::Execution::Call(
- i_isolate, func->v8_object(), i_isolate->factory()->undefined_value(),
- num_params, v8_args.get());
-
- if (handler.HasCaught()) {
- i_isolate->OptionalRescheduleException(true);
- i::Handle<i::Object> exception =
- v8::Utils::OpenHandle(*handler.Exception());
+ }
+
+ i::Execution::CallWasm(isolate, wrapper_code, call_target, object_ref,
+ packer.argv());
+
+ if (isolate->has_pending_exception()) {
+ i::Handle<i::Object> exception(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
if (!exception->IsJSReceiver()) {
i::MaybeHandle<i::String> maybe_string =
- i::Object::ToString(i_isolate, exception);
+ i::Object::ToString(isolate, exception);
i::Handle<i::String> string = maybe_string.is_null()
- ? i_isolate->factory()->empty_string()
+ ? isolate->factory()->empty_string()
: maybe_string.ToHandleChecked();
exception =
- i_isolate->factory()->NewError(i_isolate->error_function(), string);
+ isolate->factory()->NewError(isolate->error_function(), string);
}
return implement<Trap>::type::make(
store, i::Handle<i::JSReceiver>::cast(exception));
}
- auto val = maybe_val.ToHandleChecked();
- if (num_results == 0) {
- assert(val->IsUndefined(i_isolate));
- } else if (num_results == 1) {
- assert(!val->IsUndefined(i_isolate));
- new (&results[0]) Val(v8_to_val(i_isolate, val, result_kind));
- } else {
- WASM_UNIMPLEMENTED("multiple results");
- }
+ PopArgs(sig, results, &packer, store);
return nullptr;
}
@@ -1814,24 +1409,24 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
for (int i = 0; i < num_param_types; ++i) {
switch (param_types[i]->kind()) {
case I32:
- params[i] = Val(i::ReadUnalignedValue<int32_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<int32_t>(p));
p += 4;
break;
case I64:
- params[i] = Val(i::ReadUnalignedValue<int64_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<int64_t>(p));
p += 8;
break;
case F32:
- params[i] = Val(i::ReadUnalignedValue<float32_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<float32_t>(p));
p += 4;
break;
case F64:
- params[i] = Val(i::ReadUnalignedValue<float64_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<float64_t>(p));
p += 8;
break;
case ANYREF:
case FUNCREF: {
- i::Address raw = i::ReadUnalignedValue<i::Address>(p);
+ i::Address raw = v8::base::ReadUnalignedValue<i::Address>(p);
p += sizeof(raw);
if (raw == i::kNullAddress) {
params[i] = Val(nullptr);
@@ -1864,27 +1459,28 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
for (int i = 0; i < num_result_types; ++i) {
switch (result_types[i]->kind()) {
case I32:
- i::WriteUnalignedValue(p, results[i].i32());
+ v8::base::WriteUnalignedValue(p, results[i].i32());
p += 4;
break;
case I64:
- i::WriteUnalignedValue(p, results[i].i64());
+ v8::base::WriteUnalignedValue(p, results[i].i64());
p += 8;
break;
case F32:
- i::WriteUnalignedValue(p, results[i].f32());
+ v8::base::WriteUnalignedValue(p, results[i].f32());
p += 4;
break;
case F64:
- i::WriteUnalignedValue(p, results[i].f64());
+ v8::base::WriteUnalignedValue(p, results[i].f64());
p += 8;
break;
case ANYREF:
case FUNCREF: {
if (results[i].ref() == nullptr) {
- i::WriteUnalignedValue(p, i::kNullAddress);
+ v8::base::WriteUnalignedValue(p, i::kNullAddress);
} else {
- i::WriteUnalignedValue(p, impl(results[i].ref())->v8_object()->ptr());
+ v8::base::WriteUnalignedValue(
+ p, impl(results[i].ref())->v8_object()->ptr());
}
p += sizeof(i::Address);
break;
@@ -1917,8 +1513,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
DCHECK_EQ(type->content()->kind(), val.kind());
- i::wasm::ValueType i_type =
- v8::wasm::wasm_valtype_to_v8(type->content()->kind());
+ i::wasm::ValueType i_type = WasmValKindToV8(type->content()->kind());
bool is_mutable = (type->mutability() == VAR);
const int32_t offset = 0;
i::Handle<i::WasmGlobalObject> obj =
@@ -1935,7 +1530,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
auto Global::type() const -> own<GlobalType*> {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
- ValKind kind = v8::wasm::v8_valtype_to_wasm(v8_global->type());
+ ValKind kind = V8ValueTypeToWasm(v8_global->type());
Mutability mutability = v8_global->is_mutable() ? VAR : CONST;
return GlobalType::make(ValType::make(kind), mutability);
}
@@ -1951,9 +1546,16 @@ auto Global::get() const -> Val {
return Val(v8_global->GetF32());
case F64:
return Val(v8_global->GetF64());
- case ANYREF:
- case FUNCREF:
- WASM_UNIMPLEMENTED("globals of reference type");
+ case ANYREF: {
+ i::Handle<i::JSReceiver> obj =
+ i::Handle<i::JSReceiver>::cast(v8_global->GetRef());
+ return Val(RefImpl<Ref, i::JSReceiver>::make(impl(this)->store(), obj));
+ }
+ case FUNCREF: {
+ i::Handle<i::JSFunction> obj =
+ i::Handle<i::JSFunction>::cast(v8_global->GetRef());
+ return Val(implement<Func>::type::make(impl(this)->store(), obj));
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -1972,8 +1574,14 @@ void Global::set(const Val& val) {
case F64:
return v8_global->SetF64(val.f64());
case ANYREF:
- case FUNCREF:
- WASM_UNIMPLEMENTED("globals of reference type");
+ return v8_global->SetAnyRef(impl(val.ref())->v8_object());
+ case FUNCREF: {
+ bool result = v8_global->SetFuncRef(impl(this)->store()->i_isolate(),
+ impl(val.ref())->v8_object());
+ DCHECK(result);
+ USE(result);
+ return;
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -2002,7 +1610,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
i::wasm::ValueType i_type;
switch (type->element()->kind()) {
case FUNCREF:
- i_type = i::wasm::kWasmAnyFunc;
+ i_type = i::wasm::kWasmFuncRef;
break;
case ANYREF:
if (enabled_features.anyref) {
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 440267bd25..abb7b8ee86 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -10,8 +10,8 @@
#include <memory>
#include "src/base/compiler-specific.h"
+#include "src/base/memory.h"
#include "src/codegen/signature.h"
-#include "src/common/v8memory.h"
#include "src/flags/flags.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -299,7 +299,7 @@ class Decoder {
} else if (!validate_size(pc, sizeof(IntType), msg)) {
return IntType{0};
}
- return ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
+ return base::ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
}
template <typename IntType>
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index eb895a25b3..9f1ca23c62 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -13,6 +13,7 @@
#include "src/utils/bit-vector.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -64,7 +65,7 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(AtomicNotify, Uint32) \
V(I32AtomicWait, Uint32) \
- V(I64AtomicWait, Uint32) \
+ V(I64AtomicWait, Uint64) \
V(I32AtomicLoad, Uint32) \
V(I64AtomicLoad, Uint64) \
V(I32AtomicLoad8U, Uint8) \
@@ -229,17 +230,17 @@ inline bool decode_local_type(uint8_t val, ValueType* result) {
case kLocalS128:
*result = kWasmS128;
return true;
- case kLocalAnyFunc:
- *result = kWasmAnyFunc;
+ case kLocalFuncRef:
+ *result = kWasmFuncRef;
return true;
case kLocalAnyRef:
*result = kWasmAnyRef;
return true;
- case kLocalExceptRef:
- *result = kWasmExceptRef;
+ case kLocalExnRef:
+ *result = kWasmExnRef;
return true;
default:
- *result = kWasmVar;
+ *result = kWasmBottom;
return false;
}
}
@@ -296,20 +297,20 @@ struct BlockTypeImmediate {
}
uint32_t in_arity() const {
- if (type != kWasmVar) return 0;
+ if (type != kWasmBottom) return 0;
return static_cast<uint32_t>(sig->parameter_count());
}
uint32_t out_arity() const {
if (type == kWasmStmt) return 0;
- if (type != kWasmVar) return 1;
+ if (type != kWasmBottom) return 1;
return static_cast<uint32_t>(sig->return_count());
}
ValueType in_type(uint32_t index) {
- DCHECK_EQ(kWasmVar, type);
+ DCHECK_EQ(kWasmBottom, type);
return sig->GetParam(index);
}
ValueType out_type(uint32_t index) {
- if (type == kWasmVar) return sig->GetReturn(index);
+ if (type == kWasmBottom) return sig->GetReturn(index);
DCHECK_NE(kWasmStmt, type);
DCHECK_EQ(0, index);
return type;
@@ -573,14 +574,14 @@ struct ElemDropImmediate {
template <Decoder::ValidateFlag validate>
struct TableCopyImmediate {
- TableIndexImmediate<validate> table_src;
TableIndexImmediate<validate> table_dst;
+ TableIndexImmediate<validate> table_src;
unsigned length = 0;
inline TableCopyImmediate(Decoder* decoder, const byte* pc) {
- table_src = TableIndexImmediate<validate>(decoder, pc + 1);
- table_dst =
- TableIndexImmediate<validate>(decoder, pc + 1 + table_src.length);
+ table_dst = TableIndexImmediate<validate>(decoder, pc + 1);
+ table_src =
+ TableIndexImmediate<validate>(decoder, pc + 1 + table_dst.length);
length = table_src.length + table_dst.length;
}
};
@@ -718,9 +719,9 @@ struct ControlBase {
const LocalIndexImmediate<validate>& imm) \
F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(GetTable, const Value& index, Value* result, \
+ F(TableGet, const Value& index, Value* result, \
const TableIndexImmediate<validate>& imm) \
- F(SetTable, const Value& index, const Value& value, \
+ F(TableSet, const Value& index, const Value& value, \
const TableIndexImmediate<validate>& imm) \
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
@@ -759,6 +760,7 @@ struct ControlBase {
Vector<Value> values) \
F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
const Value& src, const Value& size) \
F(DataDrop, const DataDropImmediate<validate>& imm) \
@@ -849,18 +851,18 @@ class WasmDecoder : public Decoder {
}
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
- case kLocalAnyFunc:
+ case kLocalFuncRef:
if (enabled.anyref) {
- type = kWasmAnyFunc;
+ type = kWasmFuncRef;
break;
}
decoder->error(decoder->pc() - 1,
- "local type 'anyfunc' is not enabled with "
+ "local type 'funcref' is not enabled with "
"--experimental-wasm-anyref");
return false;
- case kLocalExceptRef:
+ case kLocalExnRef:
if (enabled.eh) {
- type = kWasmExceptRef;
+ type = kWasmExnRef;
break;
}
decoder->error(decoder->pc() - 1, "invalid local type");
@@ -1015,8 +1017,8 @@ class WasmDecoder : public Decoder {
return false;
}
if (!VALIDATE(module_ != nullptr &&
- module_->tables[imm.table_index].type == kWasmAnyFunc)) {
- error("table of call_indirect must be of type anyfunc");
+ module_->tables[imm.table_index].type == kWasmFuncRef)) {
+ error("table of call_indirect must be of type funcref");
return false;
}
if (!Complete(pc, imm)) {
@@ -1049,6 +1051,12 @@ class WasmDecoder : public Decoder {
SimdLaneImmediate<validate>& imm) {
uint8_t num_lanes = 0;
switch (opcode) {
+ case kExprF64x2ExtractLane:
+ case kExprF64x2ReplaceLane:
+ case kExprI64x2ExtractLane:
+ case kExprI64x2ReplaceLane:
+ num_lanes = 2;
+ break;
case kExprF32x4ExtractLane:
case kExprF32x4ReplaceLane:
case kExprI32x4ExtractLane:
@@ -1079,6 +1087,11 @@ class WasmDecoder : public Decoder {
SimdShiftImmediate<validate>& imm) {
uint8_t max_shift = 0;
switch (opcode) {
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU:
+ max_shift = 64;
+ break;
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
@@ -1121,7 +1134,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(BlockTypeImmediate<validate>& imm) {
- if (imm.type != kWasmVar) return true;
+ if (imm.type != kWasmBottom) return true;
if (!VALIDATE(module_ && imm.sig_index < module_->signatures.size())) {
return false;
}
@@ -1238,8 +1251,8 @@ class WasmDecoder : public Decoder {
GlobalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprGetTable:
- case kExprSetTable: {
+ case kExprTableGet:
+ case kExprTableSet: {
TableIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1405,6 +1418,12 @@ class WasmDecoder : public Decoder {
MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX);
return 2 + imm.length;
}
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ return 2 + 1;
+ }
default:
decoder->error(pc, "invalid Atomics opcode");
return 2;
@@ -1428,11 +1447,11 @@ class WasmDecoder : public Decoder {
case kExprSelect:
case kExprSelectWithType:
return {3, 1};
- case kExprSetTable:
+ case kExprTableSet:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- case kExprGetTable:
+ case kExprTableGet:
case kExprTeeLocal:
case kExprMemoryGrow:
return {1, 1};
@@ -1536,7 +1555,6 @@ template <Decoder::ValidateFlag validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
- using MergeValues = Merge<Value>;
using ArgVector = base::SmallVector<Value, 8>;
// All Value types should be trivially copyable for performance. We push, pop,
@@ -1658,7 +1676,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
static Value UnreachableValue(const uint8_t* pc) {
- return Value{pc, kWasmVar};
+ return Value{pc, kWasmBottom};
}
bool CheckHasMemory() {
@@ -1760,7 +1778,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRethrow: {
CHECK_PROTOTYPE_OPCODE(eh);
- auto exception = Pop(0, kWasmExceptRef);
+ auto exception = Pop(0, kWasmExnRef);
CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
EndControl();
break;
@@ -1806,7 +1824,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
FallThruTo(c);
stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
c->reachability = control_at(1)->innerReachability();
- auto* exception = Push(kWasmExceptRef);
+ auto* exception = Push(kWasmExnRef);
CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
break;
}
@@ -1816,7 +1834,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
Control* c = control_at(imm.depth.depth);
- auto exception = Pop(0, kWasmExceptRef);
+ auto exception = Pop(0, kWasmExnRef);
const WasmExceptionSig* sig = imm.index.exception->sig;
size_t value_count = sig->parameter_count();
// TODO(mstarzinger): This operand stack mutation is an ugly hack to
@@ -1825,15 +1843,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// special handling for both and do minimal/no stack mutation here.
for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
Vector<Value> values(stack_.data() + c->stack_depth, value_count);
- if (!TypeCheckBranch(c)) break;
- if (control_.back().reachable()) {
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
values);
c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
for (size_t i = 0; i < value_count; ++i) Pop();
- auto* pexception = Push(kWasmExceptRef);
+ auto* pexception = Push(kWasmExnRef);
*pexception = exception;
break;
}
@@ -1875,7 +1895,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error(this->pc_, "else already present for if");
break;
}
- if (!TypeCheckFallThru(c)) break;
+ if (!TypeCheckFallThru()) break;
c->kind = kControlIfElse;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
if (c->reachable()) c->end_merge.reached = true;
@@ -1902,7 +1922,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- if (!TypeCheckFallThru(c)) break;
+ if (!TypeCheckFallThru()) break;
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
@@ -1917,7 +1937,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.clear();
break;
}
-
PopControl(c);
break;
}
@@ -1925,8 +1944,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto cond = Pop(2, kWasmI32);
auto fval = Pop();
auto tval = Pop(0, fval.type);
- ValueType type = tval.type == kWasmVar ? fval.type : tval.type;
- if (ValueTypes::IsSubType(kWasmAnyRef, type)) {
+ ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
+ if (ValueTypes::IsSubType(type, kWasmAnyRef)) {
this->error(
"select without type is only valid for value type inputs");
break;
@@ -1951,12 +1970,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchDepthImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm, control_.size())) break;
Control* c = control_at(imm.depth);
- if (!TypeCheckBranch(c)) break;
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else if (control_.back().reachable()) {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
EndControl();
@@ -1968,10 +1991,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->failed()) break;
if (!this->Validate(this->pc_, imm, control_.size())) break;
Control* c = control_at(imm.depth);
- if (!TypeCheckBranch(c)) break;
- if (control_.back().reachable()) {
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrIf, cond, imm.depth);
c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
break;
@@ -1982,42 +2007,45 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto key = Pop(0, kWasmI32);
if (this->failed()) break;
if (!this->Validate(this->pc_, imm, control_.size())) break;
- uint32_t br_arity = 0;
+
+ // Cache the branch targets during the iteration, so that we can set
+ // all branch targets as reachable after the {CALL_INTERFACE} call.
std::vector<bool> br_targets(control_.size());
+
+ // The result types of the br_table instruction. We have to check the
+ // stack against these types. Only needed during validation.
+ std::vector<ValueType> result_types;
+
while (iterator.has_next()) {
- const uint32_t i = iterator.cur_index();
+ const uint32_t index = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (!VALIDATE(target < control_.size())) {
- this->errorf(pos,
- "improper branch in br_table target %u (depth %u)",
- i, target);
- break;
- }
+ if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
// Avoid redundant branch target checks.
if (br_targets[target]) continue;
br_targets[target] = true;
- // Check that label types match up.
- Control* c = control_at(target);
- uint32_t arity = c->br_merge()->arity;
- if (i == 0) {
- br_arity = arity;
- } else if (!VALIDATE(br_arity == arity)) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u"
- " (previous was %u, this one %u)",
- i, br_arity, arity);
+
+ if (validate) {
+ if (index == 0) {
+ // With the first branch target, initialize the result types.
+ result_types = InitializeBrTableResultTypes(target);
+ } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
+ index)) {
+ break;
+ }
}
- if (!TypeCheckBranch(c)) break;
}
- if (this->failed()) break;
+
+ if (!VALIDATE(TypeCheckBrTable(result_types))) break;
+
+ DCHECK(this->ok());
if (control_.back().reachable()) {
CALL_INTERFACE(BrTable, imm, key);
- for (uint32_t depth = control_depth(); depth-- > 0;) {
- if (!br_targets[depth]) continue;
- control_at(depth)->br_merge()->reached = true;
+ for (int i = 0, e = control_depth(); i < e; ++i) {
+ if (!br_targets[i]) continue;
+ control_at(i)->br_merge()->reached = true;
}
}
@@ -2026,8 +2054,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprReturn: {
- if (!TypeCheckReturn()) break;
- DoReturn();
+ if (V8_LIKELY(control_.back().reachable())) {
+ if (!VALIDATE(TypeCheckReturn())) break;
+ DoReturn();
+ } else {
+ // We pop all return values from the stack to check their type.
+ // Since we deal with unreachable code, we do not have to keep the
+ // values.
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ for (int i = 0; i < num_returns; ++i) {
+ Pop(i, this->sig_->GetReturn(i));
+ }
+ }
+
EndControl();
break;
}
@@ -2075,7 +2114,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(anyref);
FunctionIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(kWasmAnyFunc);
+ auto* value = Push(kWasmFuncRef);
CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
len = 1 + imm.length;
break;
@@ -2131,7 +2170,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
break;
}
- case kExprGetTable: {
+ case kExprTableGet: {
CHECK_PROTOTYPE_OPCODE(anyref);
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
@@ -2139,17 +2178,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_NOT_NULL(this->module_);
auto index = Pop(0, kWasmI32);
auto* result = Push(this->module_->tables[imm.index].type);
- CALL_INTERFACE_IF_REACHABLE(GetTable, index, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
break;
}
- case kExprSetTable: {
+ case kExprTableSet: {
CHECK_PROTOTYPE_OPCODE(anyref);
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(1, this->module_->tables[imm.index].type);
auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(SetTable, index, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
break;
}
@@ -2328,7 +2367,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kAtomicPrefix: {
CHECK_PROTOTYPE_OPCODE(threads);
- if (!CheckHasSharedMemory()) break;
len++;
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
@@ -2348,8 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
default: {
// Deal with special asmjs opcodes.
- if (this->module_ != nullptr &&
- this->module_->origin == kAsmJsOrigin) {
+ if (this->module_ != nullptr && is_asmjs_module(this->module_)) {
FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
@@ -2520,6 +2557,90 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
+ bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
+ if (!VALIDATE(target < this->control_.size())) {
+ this->errorf(pos, "improper branch in br_table target %u (depth %u)",
+ index, target);
+ return false;
+ }
+ return true;
+ }
+
+ std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
+ auto* merge = control_at(target)->br_merge();
+ int br_arity = merge->arity;
+ std::vector<ValueType> result(br_arity);
+ for (int i = 0; i < br_arity; ++i) {
+ result[i] = (*merge)[i].type;
+ }
+ return result;
+ }
+
+ bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
+ uint32_t target, const byte* pos, int index) {
+ auto* merge = control_at(target)->br_merge();
+ int br_arity = merge->arity;
+ // First we check if the arities match.
+ if (br_arity != static_cast<int>(result_types->size())) {
+ this->errorf(pos,
+ "inconsistent arity in br_table target %u (previous was "
+ "%zu, this one is %u)",
+ index, result_types->size(), br_arity);
+ return false;
+ }
+
+ for (int i = 0; i < br_arity; ++i) {
+ if (this->enabled_.anyref) {
+ // The expected type is the biggest common sub type of all targets.
+ (*result_types)[i] =
+ ValueTypes::CommonSubType((*result_types)[i], (*merge)[i].type);
+ } else {
+ // All target must have the same signature.
+ if ((*result_types)[i] != (*merge)[i].type) {
+ this->errorf(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, ValueTypes::TypeName((*result_types)[i]),
+ ValueTypes::TypeName((*merge)[i].type));
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool TypeCheckBrTable(const std::vector<ValueType>& result_types) {
+ int br_arity = static_cast<int>(result_types.size());
+ if (V8_LIKELY(control_.back().reachable())) {
+ int available =
+ static_cast<int>(stack_.size()) - control_.back().stack_depth;
+ // There have to be enough values on the stack.
+ if (available < br_arity) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for branch to "
+ "@%d, found %u",
+ br_arity, startrel(control_.back().pc), available);
+ return false;
+ }
+ Value* stack_values = &*(stack_.end() - br_arity);
+ // Type-check the topmost br_arity values on the stack.
+ for (int i = 0; i < br_arity; ++i) {
+ Value& val = stack_values[i];
+ if (!ValueTypes::IsSubType(val.type, result_types[i])) {
+ this->errorf(this->pc_,
+ "type error in merge[%u] (expected %s, got %s)", i,
+ ValueTypes::TypeName(result_types[i]),
+ ValueTypes::TypeName(val.type));
+ return false;
+ }
+ }
+ } else { // !control_.back().reachable()
+ // Pop values from the stack, accoring to the expected signature.
+ for (int i = 0; i < br_arity; ++i) Pop(i + 1, result_types[i]);
+ }
+ return this->ok();
+ }
+
uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type) {
SimdLaneImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
@@ -2570,26 +2691,45 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t DecodeSimdOpcode(WasmOpcode opcode) {
uint32_t len = 0;
switch (opcode) {
+ case kExprF64x2ExtractLane: {
+ len = SimdExtractLane(opcode, kWasmF64);
+ break;
+ }
case kExprF32x4ExtractLane: {
len = SimdExtractLane(opcode, kWasmF32);
break;
}
+ case kExprI64x2ExtractLane: {
+ len = SimdExtractLane(opcode, kWasmI64);
+ break;
+ }
case kExprI32x4ExtractLane:
case kExprI16x8ExtractLane:
case kExprI8x16ExtractLane: {
len = SimdExtractLane(opcode, kWasmI32);
break;
}
+ case kExprF64x2ReplaceLane: {
+ len = SimdReplaceLane(opcode, kWasmF64);
+ break;
+ }
case kExprF32x4ReplaceLane: {
len = SimdReplaceLane(opcode, kWasmF32);
break;
}
+ case kExprI64x2ReplaceLane: {
+ len = SimdReplaceLane(opcode, kWasmI64);
+ break;
+ }
case kExprI32x4ReplaceLane:
case kExprI16x8ReplaceLane:
case kExprI8x16ReplaceLane: {
len = SimdReplaceLane(opcode, kWasmI32);
break;
}
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU:
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
@@ -2631,16 +2771,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t len = 0;
ValueType ret_type;
FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (sig != nullptr) {
- MachineType memtype;
- switch (opcode) {
+ if (!VALIDATE(sig != nullptr)) {
+ this->error("invalid atomic opcode");
+ return 0;
+ }
+ MachineType memtype;
+ switch (opcode) {
#define CASE_ATOMIC_STORE_OP(Name, Type) \
case kExpr##Name: { \
memtype = MachineType::Type(); \
ret_type = kWasmStmt; \
break; \
}
- ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
+ ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
#undef CASE_ATOMIC_OP
#define CASE_ATOMIC_OP(Name, Type) \
case kExpr##Name: { \
@@ -2648,22 +2791,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ret_type = GetReturnType(sig); \
break; \
}
- ATOMIC_OP_LIST(CASE_ATOMIC_OP)
+ ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
- default:
- this->error("invalid atomic opcode");
+ case kExprAtomicFence: {
+ byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
+ if (!VALIDATE(zero == 0)) {
+ this->error(this->pc_ + 2, "invalid atomic operand");
return 0;
+ }
+ CALL_INTERFACE_IF_REACHABLE(AtomicFence);
+ return 1;
}
- MemoryAccessImmediate<validate> imm(
- this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
- len += imm.length;
- auto args = PopArgs(sig);
- auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
- result);
- } else {
- this->error("invalid atomic opcode");
- }
+ default:
+ this->error("invalid atomic opcode");
+ return 0;
+ }
+ if (!CheckHasSharedMemory()) return 0;
+ MemoryAccessImmediate<validate> imm(
+ this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
+ len += imm.length;
+ auto args = PopArgs(sig);
+ auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
+ CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
return len;
}
@@ -2823,8 +2972,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (!VALIDATE(ValueTypes::IsSubType(expected, val.type) ||
- val.type == kWasmVar || expected == kWasmVar)) {
+ if (!VALIDATE(ValueTypes::IsSubType(val.type, expected) ||
+ val.type == kWasmBottom || expected == kWasmBottom)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
ValueTypes::TypeName(expected), SafeOpcodeNameAt(val.pc),
@@ -2849,11 +2998,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return val;
}
+ // Pops values from the stack, as defined by {merge}. Thereby we type-check
+ // unreachable merges. Afterwards the values are pushed again on the stack
+ // according to the signature in {merge}. This is done so follow-up validation
+ // is possible.
+ bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch) {
+ int arity = merge.arity;
+ // For conditional branches, stack value '0' is the condition of the branch,
+ // and the result values start at index '1'.
+ int index_offset = conditional_branch ? 1 : 0;
+ for (int i = 0; i < arity; ++i) Pop(index_offset + i, merge[i].type);
+ // Push values of the correct type back on the stack.
+ for (int i = arity - 1; i >= 0; --i) Push(merge[i].type);
+ return this->ok();
+ }
+
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
- if (!TypeCheckFallThru(c)) return;
+ if (!TypeCheckFallThru()) return;
if (!c->reachable()) return;
if (!c->is_loop()) CALL_INTERFACE(FallThruTo, c);
@@ -2861,6 +3025,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
+ // This is a CHECK instead of a DCHECK because {validate} is a constexpr,
+ // and a CHECK makes the whole function unreachable.
+ static_assert(validate, "Call this function only within VALIDATE");
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
// The computation of {stack_values} is only valid if {merge->arity} is >0.
@@ -2870,108 +3037,121 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (ValueTypes::IsSubType(old.type, val.type)) continue;
- // If {val.type} is polymorphic, which results from unreachable, make
- // it more specific by using the merge value's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
+ if (!ValueTypes::IsSubType(val.type, old.type)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
i, ValueTypes::TypeName(old.type),
ValueTypes::TypeName(val.type));
return false;
}
- val.type = old.type;
}
return true;
}
- bool TypeCheckFallThru(Control* c) {
- DCHECK_EQ(c, &control_.back());
- if (!validate) return true;
- uint32_t expected = c->end_merge.arity;
- DCHECK_GE(stack_.size(), c->stack_depth);
- uint32_t actual = static_cast<uint32_t>(stack_.size()) - c->stack_depth;
- // Fallthrus must match the arity of the control exactly.
- if (!InsertUnreachablesIfNecessary(expected, actual) || actual > expected) {
+ bool TypeCheckFallThru() {
+ Control& c = control_.back();
+ if (V8_LIKELY(c.reachable())) {
+ // We only do type-checking here. This is only needed during validation.
+ if (!validate) return true;
+
+ uint32_t expected = c.end_merge.arity;
+ DCHECK_GE(stack_.size(), c.stack_depth);
+ uint32_t actual = static_cast<uint32_t>(stack_.size()) - c.stack_depth;
+ // Fallthrus must match the arity of the control exactly.
+ if (actual != expected) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for fallthru to @%d, found %u",
+ expected, startrel(c.pc), actual);
+ return false;
+ }
+ if (expected == 0) return true; // Fast path.
+
+ return TypeCheckMergeValues(&c, &c.end_merge);
+ }
+
+ // Type-check an unreachable fallthru. First we do an arity check, then a
+ // type check. Note that type-checking may require an adjustment of the
+ // stack, if some stack values are missing to match the block signature.
+ Merge<Value>& merge = c.end_merge;
+ int arity = static_cast<int>(merge.arity);
+ int available = static_cast<int>(stack_.size()) - c.stack_depth;
+ // For fallthrus, not more than the needed values should be available.
+ if (available > arity) {
this->errorf(
this->pc_,
"expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c->pc), actual);
+ arity, startrel(c.pc), available);
return false;
}
- if (expected == 0) return true; // Fast path.
-
- return TypeCheckMergeValues(c, &c->end_merge);
+ // Pop all values from the stack for type checking of existing stack
+ // values.
+ return TypeCheckUnreachableMerge(merge, false);
}
- bool TypeCheckBranch(Control* c) {
- // Branches must have at least the number of values expected; can have more.
- uint32_t expected = c->br_merge()->arity;
- if (expected == 0) return true; // Fast path.
- DCHECK_GE(stack_.size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
- if (!InsertUnreachablesIfNecessary(expected, actual)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc), actual);
- return false;
+ enum TypeCheckBranchResult {
+ kReachableBranch,
+ kUnreachableBranch,
+ kInvalidStack,
+ };
+
+ TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch) {
+ if (V8_LIKELY(control_.back().reachable())) {
+ // We only do type-checking here. This is only needed during validation.
+ if (!validate) return kReachableBranch;
+
+ // Branches must have at least the number of values expected; can have
+ // more.
+ uint32_t expected = c->br_merge()->arity;
+ if (expected == 0) return kReachableBranch; // Fast path.
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (expected > actual) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for br to @%d, found %u",
+ expected, startrel(c->pc), actual);
+ return kInvalidStack;
+ }
+ return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
+ : kInvalidStack;
}
- return TypeCheckMergeValues(c, c->br_merge());
+
+ return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch)
+ ? kUnreachableBranch
+ : kInvalidStack;
}
bool TypeCheckReturn() {
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ // No type checking is needed if there are no returns.
+ if (num_returns == 0) return true;
+
// Returns must have at least the number of values expected; can have more.
- uint32_t num_returns = static_cast<uint32_t>(this->sig_->return_count());
- DCHECK_GE(stack_.size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
- if (!InsertUnreachablesIfNecessary(num_returns, actual)) {
+ int num_available =
+ static_cast<int>(stack_.size()) - control_.back().stack_depth;
+ if (num_available < num_returns) {
this->errorf(this->pc_,
"expected %u elements on the stack for return, found %u",
- num_returns, actual);
+ num_returns, num_available);
return false;
}
// Typecheck the topmost {num_returns} values on the stack.
- if (num_returns == 0) return true;
// This line requires num_returns > 0.
Value* stack_values = &*(stack_.end() - num_returns);
- for (uint32_t i = 0; i < num_returns; ++i) {
+ for (int i = 0; i < num_returns; ++i) {
auto& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (ValueTypes::IsSubType(expected_type, val.type)) continue;
- // If {val.type} is polymorphic, which results from unreachable,
- // make it more specific by using the return's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
+ if (!ValueTypes::IsSubType(val.type, expected_type)) {
this->errorf(this->pc_,
"type error in return[%u] (expected %s, got %s)", i,
ValueTypes::TypeName(expected_type),
ValueTypes::TypeName(val.type));
return false;
}
- val.type = expected_type;
- }
- return true;
- }
-
- inline bool InsertUnreachablesIfNecessary(uint32_t expected,
- uint32_t actual) {
- if (V8_LIKELY(actual >= expected)) {
- return true; // enough actual values are there.
- }
- if (!VALIDATE(control_.back().unreachable())) {
- // There aren't enough values on the stack.
- return false;
}
- // A slow path. When the actual number of values on the stack is less
- // than the expected number of values and the current control is
- // unreachable, insert unreachable values below the actual values.
- // This simplifies {TypeCheckMergeValues}.
- auto pos = stack_.begin() + (stack_.size() - actual);
- stack_.insert(pos, expected - actual, UnreachableValue(this->pc_));
return true;
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index c1e8e541b5..0568d61f3f 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -45,7 +45,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module, WasmFeatures* detected,
- FunctionBody& body) {
+ const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
@@ -151,7 +151,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned length =
WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
+ unsigned offset = 1;
WasmOpcode opcode = i.current();
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = i.prefixed_opcode();
+ offset = 2;
+ }
if (line_numbers) line_numbers->push_back(i.position());
if (opcode == kExprElse || opcode == kExprCatch) {
control_depth--;
@@ -188,7 +193,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
#undef CASE_LOCAL_TYPE
} else {
- for (unsigned j = 1; j < length; ++j) {
+ for (unsigned j = offset; j < length; ++j) {
os << " 0x" << AsHex(i.pc()[j], 2) << ",";
}
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 16f90a41cb..eadc333dd5 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -38,7 +38,7 @@ V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module,
WasmFeatures* detected,
- FunctionBody& body);
+ const FunctionBody& body);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index a5d7a08846..7df5abf5c8 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -4,9 +4,14 @@
#include "src/wasm/function-compiler.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
@@ -107,12 +112,48 @@ ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
const WasmModule* module) {
// Liftoff does not support the special asm.js opcodes, thus always compile
// asm.js modules with TurboFan.
- if (module->origin == kAsmJsOrigin) return ExecutionTier::kTurbofan;
+ if (is_asmjs_module(module)) return ExecutionTier::kTurbofan;
if (FLAG_wasm_interpret_all) return ExecutionTier::kInterpreter;
return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
+ WasmEngine* engine, CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected) {
+ WasmCompilationResult result;
+ if (func_index_ < static_cast<int>(env->module->num_imported_functions)) {
+ result = ExecuteImportWrapperCompilation(engine, env);
+ } else {
+ result = ExecuteFunctionCompilation(engine, env, wire_bytes_storage,
+ counters, detected);
+ }
+
+ if (result.succeeded()) {
+ counters->wasm_generated_code_size()->Increment(
+ result.code_desc.instr_size);
+ counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
+ }
+
+ result.func_index = func_index_;
+ result.requested_tier = tier_;
+
+ return result;
+}
+
+WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
+ WasmEngine* engine, CompilationEnv* env) {
+ FunctionSig* sig = env->module->functions[func_index_].sig;
+ // Assume the wrapper is going to be a JS function with matching arity at
+ // instantiation time.
+ auto kind = compiler::kDefaultImportCallKind;
+ bool source_positions = is_asmjs_module(env->module);
+ WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ engine, env, kind, sig, source_positions);
+ return result;
+}
+
+WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
WasmEngine* wasm_engine, CompilationEnv* env,
const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
@@ -167,17 +208,32 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
break;
}
- result.func_index = func_index_;
- result.requested_tier = tier_;
+ return result;
+}
- if (result.succeeded()) {
- counters->wasm_generated_code_size()->Increment(
- result.code_desc.instr_size);
- counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
- }
+namespace {
+bool must_record_function_compilation(Isolate* isolate) {
+ return isolate->logger()->is_listening_to_code_events() ||
+ isolate->is_profiling();
+}
- return result;
+PRINTF_FORMAT(3, 4)
+void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
+ const char* format, ...) {
+ DCHECK(must_record_function_compilation(isolate));
+
+ ScopedVector<char> buffer(128);
+ va_list arguments;
+ va_start(arguments, format);
+ int len = VSNPrintF(buffer, format, arguments);
+ CHECK_LT(0, len);
+ va_end(arguments);
+ Handle<String> name_str =
+ isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ AbstractCode::cast(*code), *name_str));
}
+} // namespace
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
@@ -190,6 +246,8 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
+ DCHECK_LE(native_module->num_imported_functions(), function->func_index);
+ DCHECK_LT(function->func_index, native_module->num_functions());
WasmCompilationUnit unit(function->func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
@@ -204,6 +262,46 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
}
}
+JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(Isolate* isolate,
+ FunctionSig* sig,
+ bool is_import)
+ : job_(compiler::NewJSToWasmCompilationJob(isolate, sig, is_import)) {}
+
+JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
+
+void JSToWasmWrapperCompilationUnit::Prepare(Isolate* isolate) {
+ CompilationJob::Status status = job_->PrepareJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+}
+
+void JSToWasmWrapperCompilationUnit::Execute() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
+ DCHECK_EQ(job_->state(), CompilationJob::State::kReadyToExecute);
+ CompilationJob::Status status = job_->ExecuteJob();
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+}
+
+Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
+ CompilationJob::Status status = job_->FinalizeJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (must_record_function_compilation(isolate)) {
+ RecordWasmHeapStubCompilation(
+ isolate, code, "%s", job_->compilation_info()->GetDebugName().get());
+ }
+ return code;
+}
+
+// static
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ Isolate* isolate, FunctionSig* sig, bool is_import) {
+ // Run the compilation unit synchronously.
+ JSToWasmWrapperCompilationUnit unit(isolate, sig, is_import);
+ unit.Prepare(isolate);
+ unit.Execute();
+ return unit.Finalize(isolate);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index e7d8ff9471..d0b47b91aa 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -18,6 +18,7 @@ namespace internal {
class AssemblerBuffer;
class Counters;
+class OptimizedCompilationJob;
namespace wasm {
@@ -34,6 +35,10 @@ class WasmInstructionBuffer final {
static std::unique_ptr<WasmInstructionBuffer> New();
+ // Override {operator delete} to avoid implicit instantiation of {operator
+ // delete} with {size_t} argument. The {size_t} argument would be incorrect.
+ void operator delete(void* ptr) { ::operator delete(ptr); }
+
private:
WasmInstructionBuffer() = delete;
DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
@@ -43,6 +48,12 @@ struct WasmCompilationResult {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
+ enum Kind : int8_t {
+ kFunction,
+ kWasmToJsWrapper,
+ kInterpreterEntry,
+ };
+
bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); }
@@ -53,9 +64,10 @@ struct WasmCompilationResult {
uint32_t tagged_parameter_slots = 0;
OwnedVector<byte> source_positions;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
- int func_index;
+ int func_index = static_cast<int>(kAnonymousFuncIndex);
ExecutionTier requested_tier;
ExecutionTier result_tier;
+ Kind kind = kFunction;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
@@ -77,6 +89,14 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ExecutionTier);
private:
+ WasmCompilationResult ExecuteFunctionCompilation(
+ WasmEngine* wasm_engine, CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected);
+
+ WasmCompilationResult ExecuteImportWrapperCompilation(WasmEngine* engine,
+ CompilationEnv* env);
+
int func_index_;
ExecutionTier tier_;
};
@@ -86,6 +106,24 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ASSERT_TRIVIALLY_COPYABLE(WasmCompilationUnit);
STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
+class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
+ public:
+ JSToWasmWrapperCompilationUnit(Isolate* isolate, FunctionSig* sig,
+ bool is_import);
+ ~JSToWasmWrapperCompilationUnit();
+
+ void Prepare(Isolate* isolate);
+ void Execute();
+ Handle<Code> Finalize(Isolate* isolate);
+
+ // Run a compilation unit synchronously.
+ static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ bool is_import);
+
+ private:
+ std::unique_ptr<OptimizedCompilationJob> job_;
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 90d8749f2c..8efac18787 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -291,14 +291,14 @@ class WasmGraphBuildingInterface {
BUILD(SetGlobal, imm.index, value.node);
}
- void GetTable(FullDecoder* decoder, const Value& index, Value* result,
+ void TableGet(FullDecoder* decoder, const Value& index, Value* result,
const TableIndexImmediate<validate>& imm) {
- result->node = BUILD(GetTable, imm.index, index.node, decoder->position());
+ result->node = BUILD(TableGet, imm.index, index.node, decoder->position());
}
- void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
+ void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
const TableIndexImmediate<validate>& imm) {
- BUILD(SetTable, imm.index, index.node, value.node, decoder->position());
+ BUILD(TableSet, imm.index, index.node, value.node, decoder->position());
}
void Unreachable(FullDecoder* decoder) {
@@ -532,6 +532,8 @@ class WasmGraphBuildingInterface {
if (result) result->node = node;
}
+ void AtomicFence(FullDecoder* decoder) { BUILD(AtomicFence); }
+
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
@@ -567,7 +569,7 @@ class WasmGraphBuildingInterface {
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(TableCopy, imm.table_src.index, imm.table_dst.index, args[0].node,
+ BUILD(TableCopy, imm.table_dst.index, imm.table_src.index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
@@ -691,8 +693,8 @@ class WasmGraphBuildingInterface {
case kWasmS128:
return builder_->S128Zero();
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return builder_->RefNull();
default:
UNREACHABLE();
@@ -717,7 +719,7 @@ class WasmGraphBuildingInterface {
Value& val = stack_values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
- DCHECK(val.type == kWasmVar ||
+ DCHECK(val.type == kWasmBottom ||
ValueTypes::MachineRepresentationFor(val.type) ==
ValueTypes::MachineRepresentationFor(old.type));
old.node = first ? val.node
diff --git a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
deleted file mode 100644
index ba2093d2c1..0000000000
--- a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
-#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
-
-#include "src/compiler/wasm-compiler.h"
-#include "src/logging/counters.h"
-#include "src/wasm/value-type.h"
-#include "src/wasm/wasm-code-manager.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class JSToWasmWrapperCache {
- public:
- Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
- bool is_import) {
- std::pair<bool, FunctionSig> key(is_import, *sig);
- Handle<Code>& cached = cache_[key];
- if (cached.is_null()) {
- cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
- .ToHandleChecked();
- }
- return cached;
- }
-
- private:
- // We generate different code for calling imports than calling wasm functions
- // in this module. Both are cached separately.
- using CacheKey = std::pair<bool, FunctionSig>;
- std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 93ff8a9317..7c41c0a209 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -17,8 +17,8 @@ namespace wasm {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Use a push, because mov to an extended register takes 6 bytes.
- pushq(Immediate(func_index)); // max 5 bytes
- EmitJumpSlot(lazy_compile_target); // always 5 bytes
+ pushq_imm32(func_index); // 5 bytes
+ EmitJumpSlot(lazy_compile_target); // 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -43,7 +43,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
- jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
+ jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -97,13 +97,17 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
- Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ int start = pc_offset();
+ Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // 1-2 instr
+ Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK(nop_bytes == 0 || nop_bytes == kInstrSize);
+ if (nop_bytes) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
- CheckConstPool(true, false); // force emit of const pool
+ ForceConstantPoolEmissionWithoutJump();
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
@@ -154,10 +158,14 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
+ int start = pc_offset();
li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 4 instructions for 32-bit platform
// and max. 6 instructions for 64-bit platform.
Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -179,12 +187,16 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
+ int start = pc_offset();
// Load function index to register. max 5 instrs
mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// Jump to {lazy_compile_target}. max 5 instrs
mov(r0, Operand(lazy_compile_target));
mtctr(r0);
bctr();
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index eef9fea167..379a547b55 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -17,7 +17,16 @@ namespace wasm {
// each slot containing a dispatch to the currently published {WasmCode} that
// corresponds to the function.
//
-// Note that the table is split into lines of fixed size, with lines laid out
+// Additionally to this main jump table, there exist special jump tables for
+// other purposes:
+// - the runtime stub table contains one entry per wasm runtime stub (see
+// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
+// builtin.
+// - the lazy compile table contains one entry per wasm function which jumps to
+// the common {WasmCompileLazy} builtin and passes the function index that was
+// invoked.
+//
+// The main jump table is split into lines of fixed size, with lines laid out
// consecutively within the executable memory of the {NativeModule}. The slots
// in turn are consecutive within a line, but do not cross line boundaries.
//
@@ -27,6 +36,7 @@ namespace wasm {
//
// The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes.
+// Other jump tables are just consecutive.
class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
public:
// Translate an offset into the continuous jump table to a jump table index.
@@ -39,7 +49,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
}
// Translate a jump table index to an offset into the continuous jump table.
- static uint32_t SlotIndexToOffset(uint32_t slot_index) {
+ static uint32_t JumpSlotIndexToOffset(uint32_t slot_index) {
uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
uint32_t line_offset =
(slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
@@ -60,40 +70,56 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
return slot_index * kJumpTableStubSlotSize;
}
+ // Translate a slot index to an offset into the lazy compile table.
+ static uint32_t LazyCompileSlotIndexToOffset(uint32_t slot_index) {
+ return slot_index * kLazyCompileTableSlotSize;
+ }
+
// Determine the size of a jump table containing only runtime stub slots.
static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
return slot_count * kJumpTableStubSlotSize;
}
- static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
- uint32_t func_index,
- Address lazy_compile_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + SlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
- jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableSlotSize);
+ // Determine the size of a lazy compile table.
+ static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count) {
+ return slot_count * kLazyCompileTableSlotSize;
+ }
+
+ static void GenerateLazyCompileTable(Address base, uint32_t num_slots,
+ uint32_t num_imported_functions,
+ Address wasm_compile_lazy_target) {
+ uint32_t lazy_compile_table_size = num_slots * kLazyCompileTableSlotSize;
+ // Assume enough space, so the Assembler does not try to grow the buffer.
+ JumpTableAssembler jtasm(base, lazy_compile_table_size + 256);
+ for (uint32_t slot_index = 0; slot_index < num_slots; ++slot_index) {
+ DCHECK_EQ(slot_index * kLazyCompileTableSlotSize, jtasm.pc_offset());
+ jtasm.EmitLazyCompileJumpSlot(slot_index + num_imported_functions,
+ wasm_compile_lazy_target);
}
+ DCHECK_EQ(lazy_compile_table_size, jtasm.pc_offset());
+ FlushInstructionCache(base, lazy_compile_table_size);
}
- static void EmitRuntimeStubSlot(Address base, uint32_t slot_index,
- Address builtin_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + StubSlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitRuntimeStubSlot(builtin_target);
- jtasm.NopBytes(kJumpTableStubSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableStubSlotSize);
+ static void GenerateRuntimeStubTable(Address base, Address* targets,
+ int num_stubs) {
+ uint32_t table_size = num_stubs * kJumpTableStubSlotSize;
+ // Assume enough space, so the Assembler does not try to grow the buffer.
+ JumpTableAssembler jtasm(base, table_size + 256);
+ int offset = 0;
+ for (int index = 0; index < num_stubs; ++index) {
+ DCHECK_EQ(offset, StubSlotIndexToOffset(index));
+ DCHECK_EQ(offset, jtasm.pc_offset());
+ jtasm.EmitRuntimeStubSlot(targets[index]);
+ offset += kJumpTableStubSlotSize;
+ jtasm.NopBytes(offset - jtasm.pc_offset());
}
+ FlushInstructionCache(base, table_size);
}
static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target,
WasmCode::FlushICache flush_i_cache) {
- Address slot = base + SlotIndexToOffset(slot_index);
+ Address slot = base + JumpSlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
@@ -115,44 +141,54 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
- static constexpr int kJumpTableLineSize = 5 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
-#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
+#elif V8_TARGET_ARCH_ARM64
+ static constexpr int kJumpTableLineSize = 1 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 1 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 3 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 128;
- static constexpr int kJumpTableSlotSize = 20;
+ static constexpr int kJumpTableSlotSize = 14;
+ static constexpr int kLazyCompileTableSlotSize = 20;
static constexpr int kJumpTableStubSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 48;
+ static constexpr int kJumpTableSlotSize = 7 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#else
static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1;
+ static constexpr int kLazyCompileTableSlotSize = 1;
static constexpr int kJumpTableStubSlotSize = 1;
#endif
static constexpr int kJumpTableSlotsPerLine =
kJumpTableLineSize / kJumpTableSlotSize;
+ STATIC_ASSERT(kJumpTableSlotsPerLine >= 1);
// {JumpTableAssembler} is never used during snapshot generation, and its code
// must be independent of the code range of any isolate anyway. Just ensure
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 10483cf8ea..b11a557195 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -6,7 +6,7 @@
#include <cinttypes>
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -22,9 +22,9 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
case MachineRepresentation::rep: \
SNPrintF(value, str ":" format, \
- ReadLittleEndianValue<ctype1>( \
+ base::ReadLittleEndianValue<ctype1>( \
reinterpret_cast<Address>(mem_start) + info->address), \
- ReadLittleEndianValue<ctype2>( \
+ base::ReadLittleEndianValue<ctype2>( \
reinterpret_cast<Address>(mem_start) + info->address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 3bb6eb1e58..b5a58d4f27 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -24,7 +24,6 @@
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/identity-map.h"
-#include "src/wasm/js-to-wasm-wrapper-cache.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -34,6 +33,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -152,6 +152,9 @@ class CompilationUnitQueues {
for (int task_id = 0; task_id < max_tasks; ++task_id) {
queues_[task_id].next_steal_task_id = next_task_id(task_id);
}
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
}
base::Optional<WasmCompilationUnit> GetNextUnit(
@@ -254,15 +257,14 @@ class CompilationUnitQueues {
};
struct BigUnitsQueue {
- BigUnitsQueue() = default;
+ BigUnitsQueue() {
+ for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+ }
base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
- std::atomic_bool has_units[kNumTiers] = {
- ATOMIC_VAR_INIT(false),
- ATOMIC_VAR_INIT(false)
- };
+ std::atomic<bool> has_units[kNumTiers];
// Protected by {mutex}:
std::priority_queue<BigUnit> units[kNumTiers];
@@ -271,11 +273,8 @@ class CompilationUnitQueues {
std::vector<Queue> queues_;
BigUnitsQueue big_units_queue_;
- std::atomic_size_t num_units_[kNumTiers] = {
- ATOMIC_VAR_INIT(0),
- ATOMIC_VAR_INIT(0)
- };
- std::atomic_int next_queue_to_add{0};
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
int next_task_id(int task_id) const {
int next = task_id + 1;
@@ -382,7 +381,7 @@ class CompilationStateImpl {
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
// is invoked which triggers background compilation.
- void InitializeCompilationProgress(bool lazy_module);
+ void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -411,13 +410,11 @@ class CompilationStateImpl {
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
- return outstanding_baseline_functions_ == 0;
+ return outstanding_baseline_units_ == 0;
}
bool top_tier_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
return outstanding_top_tier_functions_ == 0;
}
@@ -473,7 +470,7 @@ class CompilationStateImpl {
// Compilation error, atomically updated. This flag can be updated and read
// using relaxed semantics.
- std::atomic_bool compile_failed_{false};
+ std::atomic<bool> compile_failed_{false};
const int max_background_tasks_ = 0;
@@ -519,7 +516,7 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
- int outstanding_baseline_functions_ = 0;
+ int outstanding_baseline_units_ = 0;
int outstanding_top_tier_functions_ = 0;
std::vector<uint8_t> compilation_progress_;
@@ -701,6 +698,10 @@ class CompilationUnitBuilder {
native_module->module())) {}
void AddUnits(uint32_t func_index) {
+ if (func_index < native_module_->module()->num_imported_functions) {
+ baseline_units_.emplace_back(func_index, ExecutionTier::kNone);
+ return;
+ }
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
@@ -823,7 +824,7 @@ void ValidateSequentially(
bool IsLazyModule(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+ (FLAG_asm_wasm_lazy_compilation && is_asmjs_module(module));
}
} // namespace
@@ -848,6 +849,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
ExecutionTierPair tiers = GetRequestedExecutionTiers(
module, compilation_state->compile_mode(), enabled_features, func_index);
+ DCHECK_LE(native_module->num_imported_functions(), func_index);
+ DCHECK_LT(func_index, native_module->num_functions());
WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
@@ -857,7 +860,7 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
// During lazy compilation, we can only get compilation errors when
// {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
// verified before starting its execution.
- DCHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
+ CHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
const WasmFunction* func = &module->functions[func_index];
if (result.failed()) {
ErrorThrower thrower(isolate, nullptr);
@@ -972,6 +975,29 @@ bool ExecuteCompilationUnits(
std::vector<WasmCode*> code_vector =
compile_scope->native_module()->AddCompiledCode(
VectorOf(results_to_publish));
+
+ // For import wrapper compilation units, add result to the cache.
+ const NativeModule* native_module = compile_scope->native_module();
+ int num_imported_functions = native_module->num_imported_functions();
+ DCHECK_EQ(code_vector.size(), results_to_publish.size());
+ WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
+ for (WasmCode* code : code_vector) {
+ int func_index = code->index();
+ DCHECK_LE(0, func_index);
+ DCHECK_LT(func_index, native_module->num_functions());
+ if (func_index < num_imported_functions) {
+ FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind,
+ sig);
+ // If two imported functions have the same key, only one of them should
+ // have been added as a compilation unit. So it is always the first time
+ // we compile a wrapper for this key here.
+ DCHECK_NULL((*cache)[key]);
+ (*cache)[key] = code;
+ code->IncRef();
+ }
+ }
+
compile_scope->compilation_state()->OnFinishedUnits(VectorOf(code_vector));
results_to_publish.clear();
};
@@ -1023,15 +1049,39 @@ bool ExecuteCompilationUnits(
return true;
}
+// Returns the number of units added.
+int AddImportWrapperUnits(NativeModule* native_module,
+ CompilationUnitBuilder* builder) {
+ std::unordered_set<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>
+ keys;
+ int num_imported_functions = native_module->num_imported_functions();
+ for (int func_index = 0; func_index < num_imported_functions; func_index++) {
+ FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ bool has_bigint_feature = native_module->enabled_features().bigint;
+ if (!IsJSCompatibleSignature(sig, has_bigint_feature)) {
+ continue;
+ }
+ WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind, sig);
+ auto it = keys.insert(key);
+ if (it.second) {
+ // Ensure that all keys exist in the cache, so that we can populate the
+ // cache later without locking.
+ (*native_module->import_wrapper_cache())[key] = nullptr;
+ builder->AddUnits(func_index);
+ }
+ }
+ return static_cast<int>(keys.size());
+}
+
void InitializeCompilationUnits(NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
const bool lazy_module = IsLazyModule(native_module->module());
- compilation_state->InitializeCompilationProgress(lazy_module);
-
ModuleWireBytes wire_bytes(native_module->wire_bytes());
CompilationUnitBuilder builder(native_module);
auto* module = native_module->module();
+
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
@@ -1047,6 +1097,9 @@ void InitializeCompilationUnits(NativeModule* native_module) {
builder.AddUnits(func_index);
}
}
+ int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
+ compilation_state->InitializeCompilationProgress(lazy_module,
+ num_import_wrappers);
builder.Commit();
}
@@ -1111,9 +1164,12 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const bool lazy_module = IsLazyModule(wasm_module);
- if (!FLAG_wasm_lazy_validation &&
+ if (!FLAG_wasm_lazy_validation && wasm_module->origin == kWasmOrigin &&
MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(),
lazy_module)) {
+ // Validate wasm modules for lazy compilation if requested. Never validate
+ // asm.js modules as these are valid by construction (additionally a CHECK
+ // will catch this during lazy compilation).
ValidateSequentially(wasm_module, native_module, isolate->counters(),
isolate->allocator(), thrower, lazy_module,
kOnlyLazyFunctions);
@@ -1256,6 +1312,7 @@ AsyncCompileJob::AsyncCompileJob(
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "new AsyncCompileJob");
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
@@ -1386,6 +1443,8 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
void AsyncCompileJob::FinishCompile() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "AsyncCompileJob::FinishCompile");
bool is_after_deserialization = !module_object_.is_null();
if (!is_after_deserialization) {
PrepareRuntimeObjects();
@@ -1865,7 +1924,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
if (section_code == SectionCode::kUnknownSectionCode) {
Decoder decoder(bytes, offset);
section_code = ModuleDecoder::IdentifyUnknownSection(
- decoder, bytes.begin() + bytes.length());
+ &decoder, bytes.begin() + bytes.length());
if (section_code == SectionCode::kUnknownSectionCode) {
// Skip unknown sections that we do not know how to handle.
return true;
@@ -1902,13 +1961,19 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
const bool lazy_module = job_->wasm_lazy_compilation_;
- compilation_state->InitializeCompilationProgress(lazy_module);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
compilation_unit_builder_.reset(
new CompilationUnitBuilder(job_->native_module_.get()));
+
+ NativeModule* native_module = job_->native_module_.get();
+
+ int num_import_wrappers =
+ AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
+ compilation_state->InitializeCompilationProgress(lazy_module,
+ num_import_wrappers);
return true;
}
@@ -2079,16 +2144,16 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
-void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
+void CompilationStateImpl::InitializeCompilationProgress(
+ bool lazy_module, int num_import_wrappers) {
DCHECK(!failed());
auto enabled_features = native_module_->enabled_features();
auto* module = native_module_->module();
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_EQ(0, outstanding_baseline_functions_);
+ DCHECK_EQ(0, outstanding_baseline_units_);
DCHECK_EQ(0, outstanding_top_tier_functions_);
compilation_progress_.reserve(module->num_declared_functions);
-
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
for (int func_index = start; func_index < end; func_index++) {
@@ -2104,7 +2169,7 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
strategy == CompileStrategy::kLazyBaselineEagerTopTier);
// Count functions to complete baseline and top tier compilation.
- if (required_for_baseline) outstanding_baseline_functions_++;
+ if (required_for_baseline) outstanding_baseline_units_++;
if (required_for_top_tier) outstanding_top_tier_functions_++;
// Initialize function's compilation progress.
@@ -2120,24 +2185,25 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
RequiredTopTierField::update(function_progress, required_top_tier);
compilation_progress_.push_back(function_progress);
}
- DCHECK_IMPLIES(lazy_module, outstanding_baseline_functions_ == 0);
+ DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
- DCHECK_LE(0, outstanding_baseline_functions_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+ DCHECK_LE(0, outstanding_baseline_units_);
+ DCHECK_LE(outstanding_baseline_units_, outstanding_top_tier_functions_);
+ outstanding_baseline_units_ += num_import_wrappers;
// Trigger callbacks if module needs no baseline or top tier compilation. This
// can be the case for an empty or fully lazy module.
- if (outstanding_baseline_functions_ == 0) {
+ if (outstanding_baseline_units_ == 0) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
- }
- if (outstanding_top_tier_functions_ == 0) {
- for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedTopTierCompilation);
+ if (outstanding_top_tier_functions_ == 0) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
}
- // Clear the callbacks because no more events will be delivered.
- callbacks_.clear();
}
}
@@ -2170,10 +2236,10 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
base::MutexGuard guard(&callbacks_mutex_);
- // In case of no outstanding functions we can return early.
+ // In case of no outstanding compilation units we can return early.
// This is especially important for lazy modules that were deserialized.
// Compilation progress was not set up in these cases.
- if (outstanding_baseline_functions_ == 0 &&
+ if (outstanding_baseline_units_ == 0 &&
outstanding_top_tier_functions_ == 0) {
return;
}
@@ -2190,49 +2256,61 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
for (WasmCode* code : code_vector) {
DCHECK_NOT_NULL(code);
- DCHECK_NE(code->tier(), ExecutionTier::kNone);
- native_module_->engine()->LogCode(code);
-
- // Read function's compilation progress.
- // This view on the compilation progress may differ from the actually
- // compiled code. Any lazily compiled function does not contribute to the
- // compilation progress but may publish code to the code manager.
- int slot_index =
- code->index() - native_module_->module()->num_imported_functions;
- uint8_t function_progress = compilation_progress_[slot_index];
- ExecutionTier required_baseline_tier =
- RequiredBaselineTierField::decode(function_progress);
- ExecutionTier required_top_tier =
- RequiredTopTierField::decode(function_progress);
- ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+ DCHECK_LT(code->index(), native_module_->num_functions());
bool completes_baseline_compilation = false;
bool completes_top_tier_compilation = false;
- // Check whether required baseline or top tier are reached.
- if (reached_tier < required_baseline_tier &&
- required_baseline_tier <= code->tier()) {
- DCHECK_GT(outstanding_baseline_functions_, 0);
- outstanding_baseline_functions_--;
- if (outstanding_baseline_functions_ == 0) {
+ if (code->index() < native_module_->num_imported_functions()) {
+ // Import wrapper.
+ DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
+ outstanding_baseline_units_--;
+ if (outstanding_baseline_units_ == 0) {
completes_baseline_compilation = true;
}
- }
- if (reached_tier < required_top_tier && required_top_tier <= code->tier()) {
- DCHECK_GT(outstanding_top_tier_functions_, 0);
- outstanding_top_tier_functions_--;
- if (outstanding_top_tier_functions_ == 0) {
- completes_top_tier_compilation = true;
+ } else {
+ // Function.
+ DCHECK_NE(code->tier(), ExecutionTier::kNone);
+ native_module_->engine()->LogCode(code);
+
+ // Read function's compilation progress.
+ // This view on the compilation progress may differ from the actually
+ // compiled code. Any lazily compiled function does not contribute to the
+ // compilation progress but may publish code to the code manager.
+ int slot_index =
+ code->index() - native_module_->module()->num_imported_functions;
+ uint8_t function_progress = compilation_progress_[slot_index];
+ ExecutionTier required_baseline_tier =
+ RequiredBaselineTierField::decode(function_progress);
+ ExecutionTier required_top_tier =
+ RequiredTopTierField::decode(function_progress);
+ ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+
+ // Check whether required baseline or top tier are reached.
+ if (reached_tier < required_baseline_tier &&
+ required_baseline_tier <= code->tier()) {
+ DCHECK_GT(outstanding_baseline_units_, 0);
+ outstanding_baseline_units_--;
+ if (outstanding_baseline_units_ == 0) {
+ completes_baseline_compilation = true;
+ }
+ }
+ if (reached_tier < required_top_tier &&
+ required_top_tier <= code->tier()) {
+ DCHECK_GT(outstanding_top_tier_functions_, 0);
+ outstanding_top_tier_functions_--;
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- }
- // Update function's compilation progress.
- if (code->tier() > reached_tier) {
- compilation_progress_[slot_index] = ReachedTierField::update(
- compilation_progress_[slot_index], code->tier());
+ // Update function's compilation progress.
+ if (code->tier() > reached_tier) {
+ compilation_progress_[slot_index] = ReachedTierField::update(
+ compilation_progress_[slot_index], code->tier());
+ }
+ DCHECK_LE(0, outstanding_baseline_units_);
}
- DCHECK_LE(0, outstanding_baseline_functions_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
// Trigger callbacks.
if (completes_baseline_compilation) {
@@ -2240,8 +2318,11 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- if (completes_top_tier_compilation) {
+ if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
@@ -2335,24 +2416,83 @@ void CompilationStateImpl::SetError() {
callbacks_.clear();
}
+namespace {
+using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
+using JSToWasmWrapperQueue =
+ WrapperQueue<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>>;
+using JSToWasmWrapperUnitMap =
+ std::unordered_map<JSToWasmWrapperKey,
+ std::unique_ptr<JSToWasmWrapperCompilationUnit>,
+ base::hash<JSToWasmWrapperKey>>;
+
+class CompileJSToWasmWrapperTask final : public CancelableTask {
+ public:
+ CompileJSToWasmWrapperTask(CancelableTaskManager* task_manager,
+ JSToWasmWrapperQueue* queue,
+ JSToWasmWrapperUnitMap* compilation_units)
+ : CancelableTask(task_manager),
+ queue_(queue),
+ compilation_units_(compilation_units) {}
+
+ void RunInternal() override {
+ while (base::Optional<JSToWasmWrapperKey> key = queue_->pop()) {
+ JSToWasmWrapperCompilationUnit* unit = (*compilation_units_)[*key].get();
+ unit->Execute();
+ }
+ }
+
+ private:
+ JSToWasmWrapperQueue* const queue_;
+ JSToWasmWrapperUnitMap* const compilation_units_;
+};
+} // namespace
+
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
- JSToWasmWrapperCache js_to_wasm_cache;
+ JSToWasmWrapperQueue queue;
+ JSToWasmWrapperUnitMap compilation_units;
- // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
- // optimization we keep the code space unlocked to avoid repeated unlocking
- // because many such wrapper are allocated in sequence below.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // Prepare compilation units in the main thread.
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
auto& function = module->functions[exp.index];
- Handle<Code> wrapper_code = js_to_wasm_cache.GetOrCompileJSToWasmWrapper(
- isolate, function.sig, function.imported);
- int wrapper_index =
- GetExportWrapperIndex(module, function.sig, function.imported);
+ JSToWasmWrapperKey key(function.imported, *function.sig);
+ if (queue.insert(key)) {
+ auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
+ isolate, function.sig, function.imported);
+ unit->Prepare(isolate);
+ compilation_units.emplace(key, std::move(unit));
+ }
+ }
+
+ // Execute compilation jobs in the background.
+ CancelableTaskManager task_manager;
+ const int max_background_tasks = GetMaxBackgroundTasks();
+ for (int i = 0; i < max_background_tasks; ++i) {
+ auto task = base::make_unique<CompileJSToWasmWrapperTask>(
+ &task_manager, &queue, &compilation_units);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ }
+
+ // Work in the main thread too.
+ while (base::Optional<JSToWasmWrapperKey> key = queue.pop()) {
+ JSToWasmWrapperCompilationUnit* unit = compilation_units[*key].get();
+ unit->Execute();
+ }
+ task_manager.CancelAndWait();
- export_wrappers->set(wrapper_index, *wrapper_code);
- RecordStats(*wrapper_code, isolate->counters());
+ // Finalize compilation jobs in the main thread.
+ // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
+ // optimization we keep the code space unlocked to avoid repeated unlocking
+ // because many such wrapper are allocated in sequence below.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ for (auto& pair : compilation_units) {
+ JSToWasmWrapperKey key = pair.first;
+ JSToWasmWrapperCompilationUnit* unit = pair.second.get();
+ Handle<Code> code = unit->Finalize(isolate);
+ int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
+ export_wrappers->set(wrapper_index, *code);
+ RecordStats(*code, isolate->counters());
}
}
@@ -2365,17 +2505,24 @@ WasmCode* CompileImportWrapper(
// yet.
WasmImportWrapperCache::CacheKey key(kind, sig);
DCHECK_NULL((*cache_scope)[key]);
- bool source_positions = native_module->module()->origin == kAsmJsOrigin;
+ bool source_positions = is_asmjs_module(native_module->module());
// Keep the {WasmCode} alive until we explicitly call {IncRef}.
WasmCodeRefScope code_ref_scope;
- WasmCode* wasm_code = compiler::CompileWasmImportCallWrapper(
- wasm_engine, native_module, kind, sig, source_positions);
- (*cache_scope)[key] = wasm_code;
- wasm_code->IncRef();
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ wasm_engine, &env, kind, sig, source_positions);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), GetCodeKind(result),
+ ExecutionTier::kNone);
+ WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ (*cache_scope)[key] = published_code;
+ published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
- wasm_code->instructions().length());
- counters->wasm_reloc_size()->Increment(wasm_code->reloc_info().length());
- return wasm_code;
+ published_code->instructions().length());
+ counters->wasm_reloc_size()->Increment(published_code->reloc_info().length());
+ return published_code;
}
Handle<Script> CreateWasmScript(Isolate* isolate,
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index d465d6a322..27c7bff868 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -9,6 +9,7 @@
#include <functional>
#include <memory>
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
#include "src/wasm/compilation-environment.h"
@@ -67,6 +68,33 @@ bool CompileLazy(Isolate*, NativeModule*, int func_index);
int GetMaxBackgroundTasks();
+template <typename Key, typename Hash>
+class WrapperQueue {
+ public:
+ // Removes an arbitrary key from the queue and returns it.
+ // If the queue is empty, returns nullopt.
+ // Thread-safe.
+ base::Optional<Key> pop() {
+ base::Optional<Key> key = base::nullopt;
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto it = queue_.begin();
+ if (it != queue_.end()) {
+ key = *it;
+ queue_.erase(it);
+ }
+ return key;
+ }
+
+ // Add the given key to the queue and returns true iff the insert was
+ // successful.
+ // Not thread-safe.
+ bool insert(const Key& key) { return queue_.insert(key).second; }
+
+ private:
+ base::Mutex mutex_;
+ std::unordered_set<Key, Hash> queue_;
+};
+
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
// as foreground and background tasks. Any phase that touches the V8 heap or
@@ -91,6 +119,8 @@ class AsyncCompileJob {
Isolate* isolate() const { return isolate_; }
+ Handle<Context> context() const { return native_context_; }
+
private:
class CompileTask;
class CompileStep;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 4201b1e76c..56712977b1 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -123,7 +123,7 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
case WasmInitExpr::kRefNullConst:
return kWasmNullRef;
case WasmInitExpr::kRefFuncConst:
- return kWasmAnyFunc;
+ return kWasmFuncRef;
default:
UNREACHABLE();
}
@@ -131,35 +131,35 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
-WireBytesRef consume_string(Decoder& decoder, bool validate_utf8,
+WireBytesRef consume_string(Decoder* decoder, bool validate_utf8,
const char* name) {
- uint32_t length = decoder.consume_u32v("string length");
- uint32_t offset = decoder.pc_offset();
- const byte* string_start = decoder.pc();
+ uint32_t length = decoder->consume_u32v("string length");
+ uint32_t offset = decoder->pc_offset();
+ const byte* string_start = decoder->pc();
// Consume bytes before validation to guarantee that the string is not oob.
if (length > 0) {
- decoder.consume_bytes(length, name);
- if (decoder.ok() && validate_utf8 &&
+ decoder->consume_bytes(length, name);
+ if (decoder->ok() && validate_utf8 &&
!unibrow::Utf8::ValidateEncoding(string_start, length)) {
- decoder.errorf(string_start, "%s: no valid UTF-8 string", name);
+ decoder->errorf(string_start, "%s: no valid UTF-8 string", name);
}
}
- return {offset, decoder.failed() ? 0 : length};
+ return {offset, decoder->failed() ? 0 : length};
}
// An iterator over the sections in a wasm binary module.
// Automatically skips all unknown sections.
class WasmSectionIterator {
public:
- explicit WasmSectionIterator(Decoder& decoder)
+ explicit WasmSectionIterator(Decoder* decoder)
: decoder_(decoder),
section_code_(kUnknownSectionCode),
- section_start_(decoder.pc()),
- section_end_(decoder.pc()) {
+ section_start_(decoder->pc()),
+ section_end_(decoder->pc()) {
next();
}
- inline bool more() const { return decoder_.ok() && decoder_.more(); }
+ inline bool more() const { return decoder_->ok() && decoder_->more(); }
inline SectionCode section_code() const { return section_code_; }
@@ -184,23 +184,23 @@ class WasmSectionIterator {
// Advances to the next section, checking that decoding the current section
// stopped at {section_end_}.
void advance(bool move_to_section_end = false) {
- if (move_to_section_end && decoder_.pc() < section_end_) {
- decoder_.consume_bytes(
- static_cast<uint32_t>(section_end_ - decoder_.pc()));
- }
- if (decoder_.pc() != section_end_) {
- const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
- decoder_.errorf(decoder_.pc(),
- "section was %s than expected size "
- "(%u bytes expected, %zu decoded)",
- msg, section_length(),
- static_cast<size_t>(decoder_.pc() - section_start_));
+ if (move_to_section_end && decoder_->pc() < section_end_) {
+ decoder_->consume_bytes(
+ static_cast<uint32_t>(section_end_ - decoder_->pc()));
+ }
+ if (decoder_->pc() != section_end_) {
+ const char* msg = decoder_->pc() < section_end_ ? "shorter" : "longer";
+ decoder_->errorf(decoder_->pc(),
+ "section was %s than expected size "
+ "(%u bytes expected, %zu decoded)",
+ msg, section_length(),
+ static_cast<size_t>(decoder_->pc() - section_start_));
}
next();
}
private:
- Decoder& decoder_;
+ Decoder* decoder_;
SectionCode section_code_;
const byte* section_start_;
const byte* payload_start_;
@@ -209,17 +209,17 @@ class WasmSectionIterator {
// Reads the section code/name at the current position and sets up
// the embedder fields.
void next() {
- if (!decoder_.more()) {
+ if (!decoder_->more()) {
section_code_ = kUnknownSectionCode;
return;
}
- section_start_ = decoder_.pc();
- uint8_t section_code = decoder_.consume_u8("section code");
+ section_start_ = decoder_->pc();
+ uint8_t section_code = decoder_->consume_u8("section code");
// Read and check the section size.
- uint32_t section_length = decoder_.consume_u32v("section length");
+ uint32_t section_length = decoder_->consume_u32v("section length");
- payload_start_ = decoder_.pc();
- if (decoder_.checkAvailable(section_length)) {
+ payload_start_ = decoder_->pc();
+ if (decoder_->checkAvailable(section_length)) {
// Get the limit of the section within the module.
section_end_ = payload_start_ + section_length;
} else {
@@ -234,19 +234,19 @@ class WasmSectionIterator {
ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
// As a side effect, the above function will forward the decoder to after
// the identifier string.
- payload_start_ = decoder_.pc();
+ payload_start_ = decoder_->pc();
} else if (!IsValidSectionCode(section_code)) {
- decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
- section_code);
+ decoder_->errorf(decoder_->pc(), "unknown section code #0x%02x",
+ section_code);
section_code = kUnknownSectionCode;
}
- section_code_ = decoder_.failed() ? kUnknownSectionCode
- : static_cast<SectionCode>(section_code);
+ section_code_ = decoder_->failed() ? kUnknownSectionCode
+ : static_cast<SectionCode>(section_code);
- if (section_code_ == kUnknownSectionCode && section_end_ > decoder_.pc()) {
+ if (section_code_ == kUnknownSectionCode && section_end_ > decoder_->pc()) {
// skip to the end of the unknown section.
- uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_.pc());
- decoder_.consume_bytes(remaining, "section payload");
+ uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_->pc());
+ decoder_->consume_bytes(remaining, "section payload");
}
}
};
@@ -259,13 +259,13 @@ class ModuleDecoderImpl : public Decoder {
explicit ModuleDecoderImpl(const WasmFeatures& enabled, ModuleOrigin origin)
: Decoder(nullptr, nullptr),
enabled_features_(enabled),
- origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {}
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsSloppyOrigin : origin) {}
ModuleDecoderImpl(const WasmFeatures& enabled, const byte* module_start,
const byte* module_end, ModuleOrigin origin)
: Decoder(module_start, module_end),
enabled_features_(enabled),
- origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsSloppyOrigin : origin) {
if (end_ < start_) {
error(start_, "end is less than start");
end_ = start_;
@@ -520,8 +520,8 @@ class ModuleDecoderImpl : public Decoder {
});
WasmImport* import = &module_->import_table.back();
const byte* pos = pc_;
- import->module_name = consume_string(*this, true, "module name");
- import->field_name = consume_string(*this, true, "field name");
+ import->module_name = consume_string(this, true, "module name");
+ import->field_name = consume_string(this, true, "field name");
import->kind =
static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
@@ -550,7 +550,7 @@ class ModuleDecoderImpl : public Decoder {
table->imported = true;
ValueType type = consume_reference_type();
if (!enabled_features_.anyref) {
- if (type != kWasmAnyFunc) {
+ if (type != kWasmFuncRef) {
error(pc_ - 1, "invalid table type");
break;
}
@@ -635,7 +635,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTableSection() {
// TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
// implementation of AnyRef landed.
- uint32_t max_count = enabled_features_.anyref ? 10 : kV8MaxWasmTables;
+ uint32_t max_count = enabled_features_.anyref ? 100000 : kV8MaxWasmTables;
uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
@@ -694,7 +694,7 @@ class ModuleDecoderImpl : public Decoder {
});
WasmExport* exp = &module_->export_table.back();
- exp->name = consume_string(*this, true, "field name");
+ exp->name = consume_string(this, true, "field name");
const byte* pos = pc();
exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
@@ -746,7 +746,7 @@ class ModuleDecoderImpl : public Decoder {
}
}
// Check for duplicate exports (except for asm.js).
- if (ok() && origin_ != kAsmJsOrigin && module_->export_table.size() > 1) {
+ if (ok() && origin_ == kWasmOrigin && module_->export_table.size() > 1) {
std::vector<WasmExport> sorted_exports(module_->export_table);
auto cmp_less = [this](const WasmExport& a, const WasmExport& b) {
@@ -808,16 +808,16 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (!ValueTypes::IsSubType(module_->tables[table_index].type,
- kWasmAnyFunc)) {
+ if (!ValueTypes::IsSubType(kWasmFuncRef,
+ module_->tables[table_index].type)) {
errorf(pos,
- "Invalid element segment. Table %u is not of type AnyFunc",
+ "Invalid element segment. Table %u is not of type FuncRef",
table_index);
break;
}
} else {
ValueType type = consume_reference_type();
- if (!ValueTypes::IsSubType(type, kWasmAnyFunc)) {
+ if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
error(pc_ - 1, "invalid element segment type");
break;
}
@@ -957,7 +957,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
if (name_type == NameSectionKindCode::kModule) {
- WireBytesRef name = consume_string(inner, false, "module name");
+ WireBytesRef name = consume_string(&inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
} else {
inner.consume_bytes(name_payload_len, "name subsection payload");
@@ -970,7 +970,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeSourceMappingURLSection() {
Decoder inner(start_, pc_, end_, buffer_offset_);
- WireBytesRef url = wasm::consume_string(inner, true, "module name");
+ WireBytesRef url = wasm::consume_string(&inner, true, "module name");
if (inner.ok() &&
!has_seen_unordered_section(kSourceMappingURLSectionCode)) {
const byte* url_start =
@@ -1128,7 +1128,7 @@ class ModuleDecoderImpl : public Decoder {
offset += 8;
Decoder decoder(start_ + offset, end_, offset);
- WasmSectionIterator section_iter(decoder);
+ WasmSectionIterator section_iter(&decoder);
while (ok() && section_iter.more()) {
// Shift the offset by the section header length
@@ -1269,7 +1269,7 @@ class ModuleDecoderImpl : public Decoder {
ValueTypes::TypeName(module->globals[other_index].type));
}
} else {
- if (!ValueTypes::IsSubType(global->type, TypeOf(module, global->init))) {
+ if (!ValueTypes::IsSubType(TypeOf(module, global->init), global->type)) {
errorf(pos, "type error in global initialization, expected %s, got %s",
ValueTypes::TypeName(global->type),
ValueTypes::TypeName(TypeOf(module, global->init)));
@@ -1373,32 +1373,33 @@ class ModuleDecoderImpl : public Decoder {
uint32_t consume_func_index(WasmModule* module, WasmFunction** func,
const char* name) {
- return consume_index(name, module->functions, func);
+ return consume_index(name, &module->functions, func);
}
uint32_t consume_global_index(WasmModule* module, WasmGlobal** global) {
- return consume_index("global index", module->globals, global);
+ return consume_index("global index", &module->globals, global);
}
uint32_t consume_table_index(WasmModule* module, WasmTable** table) {
- return consume_index("table index", module->tables, table);
+ return consume_index("table index", &module->tables, table);
}
uint32_t consume_exception_index(WasmModule* module, WasmException** except) {
- return consume_index("exception index", module->exceptions, except);
+ return consume_index("exception index", &module->exceptions, except);
}
template <typename T>
- uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
+ uint32_t consume_index(const char* name, std::vector<T>* vector, T** ptr) {
const byte* pos = pc_;
uint32_t index = consume_u32v(name);
- if (index >= vector.size()) {
+ if (index >= vector->size()) {
errorf(pos, "%s %u out of bounds (%d entr%s)", name, index,
- static_cast<int>(vector.size()), vector.size() == 1 ? "y" : "ies");
+ static_cast<int>(vector->size()),
+ vector->size() == 1 ? "y" : "ies");
*ptr = nullptr;
return 0;
}
- *ptr = &vector[index];
+ *ptr = &(*vector)[index];
return index;
}
@@ -1594,14 +1595,14 @@ class ModuleDecoderImpl : public Decoder {
case kLocalS128:
if (enabled_features_.simd) return kWasmS128;
break;
- case kLocalAnyFunc:
- if (enabled_features_.anyref) return kWasmAnyFunc;
+ case kLocalFuncRef:
+ if (enabled_features_.anyref) return kWasmFuncRef;
break;
case kLocalAnyRef:
if (enabled_features_.anyref) return kWasmAnyRef;
break;
- case kLocalExceptRef:
- if (enabled_features_.eh) return kWasmExceptRef;
+ case kLocalExnRef:
+ if (enabled_features_.eh) return kWasmExnRef;
break;
default:
break;
@@ -1617,8 +1618,8 @@ class ModuleDecoderImpl : public Decoder {
byte val = consume_u8("reference type");
ValueTypeCode t = static_cast<ValueTypeCode>(val);
switch (t) {
- case kLocalAnyFunc:
- return kWasmAnyFunc;
+ case kLocalFuncRef:
+ return kWasmFuncRef;
case kLocalAnyRef:
if (!enabled_features_.anyref) {
error(pc_ - 1,
@@ -1680,45 +1681,41 @@ class ModuleDecoderImpl : public Decoder {
void consume_segment_header(const char* name, bool* is_active,
uint32_t* index, WasmInitExpr* offset) {
const byte* pos = pc();
- // In the MVP, this is a table or memory index field that must be 0, but
- // we've repurposed it as a flags field in the bulk memory proposal.
- uint32_t flags;
- if (enabled_features_.bulk_memory) {
- flags = consume_u32v("flags");
- if (failed()) return;
- } else {
- // Without the bulk memory proposal, we should still read the table
- // index. This is the same as reading the `ActiveWithIndex` flag with
- // the bulk memory proposal.
- flags = SegmentFlags::kActiveWithIndex;
+ uint32_t flag = consume_u32v("flag");
+
+ // Some flag values are only valid for specific proposals.
+ if (flag == SegmentFlags::kPassive) {
+ if (!enabled_features_.bulk_memory) {
+ error(
+ "Passive element segments require --experimental-wasm-bulk-memory");
+ return;
+ }
+ } else if (flag == SegmentFlags::kActiveWithIndex) {
+ if (!(enabled_features_.bulk_memory || enabled_features_.anyref)) {
+ error(
+ "Element segments with table indices require "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ return;
+ }
+ } else if (flag != SegmentFlags::kActiveNoIndex) {
+ errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flag);
+ return;
}
- bool read_index;
- bool read_offset;
- if (flags == SegmentFlags::kActiveNoIndex) {
+ // We know now that the flag is valid. Time to read the rest.
+ if (flag == SegmentFlags::kActiveNoIndex) {
*is_active = true;
- read_index = false;
- read_offset = true;
- } else if (flags == SegmentFlags::kPassive) {
+ *index = 0;
+ *offset = consume_init_expr(module_.get(), kWasmI32);
+ return;
+ }
+ if (flag == SegmentFlags::kPassive) {
*is_active = false;
- read_index = false;
- read_offset = false;
- } else if (flags == SegmentFlags::kActiveWithIndex) {
- *is_active = true;
- read_index = true;
- read_offset = true;
- } else {
- errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flags);
return;
}
-
- if (read_index) {
+ if (flag == SegmentFlags::kActiveWithIndex) {
+ *is_active = true;
*index = consume_u32v(name);
- } else {
- *index = 0;
- }
-
- if (read_offset) {
*offset = consume_init_expr(module_.get(), kWasmI32);
}
}
@@ -1833,17 +1830,17 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
return impl_->FinishDecoding(verify_functions);
}
-SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
+SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder,
const byte* end) {
WireBytesRef string = consume_string(decoder, true, "section name");
- if (decoder.failed() || decoder.pc() > end) {
+ if (decoder->failed() || decoder->pc() > end) {
return kUnknownSectionCode;
}
const byte* section_name_start =
- decoder.start() + decoder.GetBufferRelativeOffset(string.offset());
+ decoder->start() + decoder->GetBufferRelativeOffset(string.offset());
TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - decoder.start()),
+ static_cast<int>(section_name_start - decoder->start()),
string.length() < 20 ? string.length() : 20, section_name_start);
if (string.length() == num_chars(kNameString) &&
@@ -1989,20 +1986,20 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
namespace {
-bool FindNameSection(Decoder& decoder) {
+bool FindNameSection(Decoder* decoder) {
static constexpr int kModuleHeaderSize = 8;
- decoder.consume_bytes(kModuleHeaderSize, "module header");
+ decoder->consume_bytes(kModuleHeaderSize, "module header");
WasmSectionIterator section_iter(decoder);
- while (decoder.ok() && section_iter.more() &&
+ while (decoder->ok() && section_iter.more() &&
section_iter.section_code() != kNameSectionCode) {
section_iter.advance(true);
}
if (!section_iter.more()) return false;
// Reset the decoder to not read beyond the name section end.
- decoder.Reset(section_iter.payload(), decoder.pc_offset());
+ decoder->Reset(section_iter.payload(), decoder->pc_offset());
return true;
}
@@ -2014,7 +2011,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
DCHECK(names->empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(decoder)) return;
+ if (!FindNameSection(&decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -2031,7 +2028,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
for (; decoder.ok() && functions_count > 0; --functions_count) {
uint32_t function_index = decoder.consume_u32v("function index");
- WireBytesRef name = consume_string(decoder, false, "function name");
+ WireBytesRef name = consume_string(&decoder, false, "function name");
// Be lenient with errors in the name section: Ignore non-UTF8 names. You
// can even assign to the same function multiple times (last valid one
@@ -2049,7 +2046,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
DCHECK(result->names.empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(decoder)) return;
+ if (!FindNameSection(&decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -2074,7 +2071,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t num_names = decoder.consume_u32v("namings count");
for (uint32_t k = 0; k < num_names; ++k) {
uint32_t local_index = decoder.consume_u32v("local index");
- WireBytesRef name = consume_string(decoder, true, "local name");
+ WireBytesRef name = consume_string(&decoder, true, "local name");
if (!decoder.ok()) break;
if (local_index > kMaxInt) continue;
func_names.max_local_index =
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 07d6e66019..8e121c9d30 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -139,12 +139,12 @@ class ModuleDecoder {
// Translates the unknown section that decoder is pointing to to an extended
// SectionCode if the unknown section is known to decoder.
- // The decoder is expected to point after the section lenght and just before
+ // The decoder is expected to point after the section length and just before
// the identifier string of the unknown section.
// If a SectionCode other than kUnknownSectionCode is returned, the decoder
// will point right after the identifier string. Otherwise, the position is
// undefined.
- static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end);
+ static SectionCode IdentifyUnknownSection(Decoder* decoder, const byte* end);
private:
const WasmFeatures enabled_features_;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 8293674826..a4b0139ea4 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -25,6 +25,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::WriteLittleEndianValue;
+
namespace {
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
@@ -48,35 +51,8 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
}
}
-// Queue of import wrapper keys to compile for an instance.
-class ImportWrapperQueue {
- public:
- // Removes an arbitrary cache key from the queue and returns it.
- // If the queue is empty, returns nullopt.
- // Thread-safe.
- base::Optional<WasmImportWrapperCache::CacheKey> pop() {
- base::Optional<WasmImportWrapperCache::CacheKey> key = base::nullopt;
- base::LockGuard<base::Mutex> lock(&mutex_);
- auto it = queue_.begin();
- if (it != queue_.end()) {
- key = *it;
- queue_.erase(it);
- }
- return key;
- }
-
- // Add the given key to the queue.
- // Not thread-safe.
- void insert(const WasmImportWrapperCache::CacheKey& key) {
- queue_.insert(key);
- }
-
- private:
- base::Mutex mutex_;
- std::unordered_set<WasmImportWrapperCache::CacheKey,
- WasmImportWrapperCache::CacheKeyHash>
- queue_;
-};
+using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>;
class CompileImportWrapperTask final : public CancelableTask {
public:
@@ -200,9 +176,9 @@ class InstanceBuilder {
Handle<String> import_name,
Handle<Object> value);
- // Initialize imported tables of type anyfunc.
+ // Initialize imported tables of type funcref.
bool InitializeImportedIndirectFunctionTable(
- Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmInstanceObject> instance, int table_index, int import_index,
Handle<WasmTableObject> table_object);
// Process a single imported table.
@@ -255,7 +231,7 @@ class InstanceBuilder {
// and globals.
void ProcessExports(Handle<WasmInstanceObject> instance);
- void InitializeTables(Handle<WasmInstanceObject> instance);
+ void InitializeIndirectFunctionTables(Handle<WasmInstanceObject> instance);
void LoadTableSegments(Handle<WasmInstanceObject> instance);
@@ -336,8 +312,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
memory->set_is_detachable(false);
DCHECK_IMPLIES(native_module->use_trap_handler(),
- module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
+ is_asmjs_module(module_) || memory->is_wasm_memory() ||
memory->backing_store() == nullptr);
} else if (initial_pages > 0 || native_module->use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
@@ -421,15 +396,34 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set up table storage space.
//--------------------------------------------------------------------------
int table_count = static_cast<int>(module_->tables.size());
- Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
- for (int i = module_->num_imported_tables; i < table_count; i++) {
- const WasmTable& table = module_->tables[i];
- Handle<WasmTableObject> table_obj = WasmTableObject::New(
- isolate_, table.type, table.initial_size, table.has_maximum_size,
- table.maximum_size, nullptr);
- tables->set(i, *table_obj);
+ {
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ for (int i = module_->num_imported_tables; i < table_count; i++) {
+ const WasmTable& table = module_->tables[i];
+ Handle<WasmTableObject> table_obj = WasmTableObject::New(
+ isolate_, table.type, table.initial_size, table.has_maximum_size,
+ table.maximum_size, nullptr);
+ tables->set(i, *table_obj);
+ }
+ instance->set_tables(*tables);
+ }
+
+ {
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ // Table 0 is handled specially. See {InitializeIndirectFunctionTable} for
+ // the initilization. All generated and runtime code will use this optimized
+ // shortcut in the instance. Hence it is safe to start with table 1 in the
+ // iteration below.
+ for (int i = 1; i < table_count; ++i) {
+ const WasmTable& table = module_->tables[i];
+ if (table.type == kWasmFuncRef) {
+ Handle<WasmIndirectFunctionTable> table_obj =
+ WasmIndirectFunctionTable::New(isolate_, table.initial_size);
+ tables->set(i, *table_obj);
+ }
+ }
+ instance->set_indirect_function_tables(*tables);
}
- instance->set_tables(*tables);
//--------------------------------------------------------------------------
// Process the imports for the module.
@@ -446,7 +440,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Initialize the indirect tables.
//--------------------------------------------------------------------------
if (table_count > 0) {
- InitializeTables(instance);
+ InitializeIndirectFunctionTables(instance);
}
//--------------------------------------------------------------------------
@@ -550,9 +544,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, function.sig, function.imported)
- .ToHandleChecked();
+ Handle<Code> wrapper_code =
+ JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate_, function.sig, function.imported);
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
@@ -755,8 +749,8 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
@@ -800,7 +794,7 @@ void InstanceBuilder::SanitizeImports() {
int int_index = static_cast<int>(index);
MaybeHandle<Object> result =
- module_->origin == kAsmJsOrigin
+ is_asmjs_module(module_)
? LookupImportAsm(int_index, import_name)
: LookupImport(int_index, module_name, import_name);
if (thrower_->error()) {
@@ -842,8 +836,10 @@ bool InstanceBuilder::ProcessImportedFunction(
}
auto js_receiver = Handle<JSReceiver>::cast(value);
FunctionSig* expected_sig = module_->functions[func_index].sig;
- auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig,
- enabled_.bigint);
+ auto resolved = compiler::ResolveWasmImportCall(js_receiver, expected_sig,
+ enabled_.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
+ js_receiver = resolved.second;
switch (kind) {
case compiler::WasmImportCallKind::kLinkError:
ReportLinkError("imported function does not match the expected type",
@@ -851,7 +847,7 @@ bool InstanceBuilder::ProcessImportedFunction(
return false;
case compiler::WasmImportCallKind::kWasmToWasm: {
// The imported function is a WASM function from another instance.
- auto imported_function = Handle<WasmExportedFunction>::cast(value);
+ auto imported_function = Handle<WasmExportedFunction>::cast(js_receiver);
Handle<WasmInstanceObject> imported_instance(
imported_function->instance(), isolate_);
// The import reference is the instance object itself.
@@ -866,7 +862,8 @@ bool InstanceBuilder::ProcessImportedFunction(
}
case compiler::WasmImportCallKind::kWasmToCapi: {
NativeModule* native_module = instance->module_object().native_module();
- Address host_address = WasmCapiFunction::cast(*value).GetHostCallTarget();
+ Address host_address =
+ WasmCapiFunction::cast(*js_receiver).GetHostCallTarget();
WasmCodeRefScope code_ref_scope;
WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
isolate_->wasm_engine(), native_module, expected_sig, host_address);
@@ -904,14 +901,12 @@ bool InstanceBuilder::ProcessImportedFunction(
}
bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
- Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmInstanceObject> instance, int table_index, int import_index,
Handle<WasmTableObject> table_object) {
int imported_table_size = table_object->entries().length();
// Allocate a new dispatch table.
- if (!instance->has_indirect_function_table()) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, imported_table_size);
- }
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, imported_table_size);
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_table_size; ++i) {
@@ -919,15 +914,22 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
bool is_null;
MaybeHandle<WasmInstanceObject> maybe_target_instance;
int function_index;
+ MaybeHandle<WasmJSFunction> maybe_js_function;
WasmTableObject::GetFunctionTableEntry(isolate_, table_object, i, &is_valid,
&is_null, &maybe_target_instance,
- &function_index);
+ &function_index, &maybe_js_function);
if (!is_valid) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
import_index, i);
return false;
}
if (is_null) continue;
+ Handle<WasmJSFunction> js_function;
+ if (maybe_js_function.ToHandle(&js_function)) {
+ WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ isolate_, instance, table_index, i, js_function);
+ continue;
+ }
Handle<WasmInstanceObject> target_instance =
maybe_target_instance.ToHandleChecked();
@@ -939,7 +941,7 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
// Look up the signature's canonical id. If there is no canonical
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
- IndirectFunctionTableEntry(instance, i)
+ IndirectFunctionTableEntry(instance, table_index, i)
.Set(module_->signature_map.Find(*sig), target_instance,
function_index);
}
@@ -958,7 +960,6 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
const WasmTable& table = module_->tables[table_index];
- instance->tables().set(table_index, *value);
auto table_object = Handle<WasmTableObject>::cast(value);
int imported_table_size = table_object->entries().length();
@@ -995,13 +996,13 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
return false;
}
- // The indirect function table only exists for table 0.
- if (table.type == kWasmAnyFunc && table_index == 0 &&
- !InitializeImportedIndirectFunctionTable(instance, import_index,
- table_object)) {
+ if (table.type == kWasmFuncRef &&
+ !InitializeImportedIndirectFunctionTable(instance, table_index,
+ import_index, table_object)) {
return false;
}
+ instance->tables().set(table_index, *value);
return true;
}
@@ -1068,7 +1069,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
return false;
}
- bool is_sub_type = ValueTypes::IsSubType(global.type, global_object->type());
+ bool is_sub_type = ValueTypes::IsSubType(global_object->type(), global.type);
bool is_same_type = global_object->type() == global.type;
bool valid_type = global.mutability ? is_same_type : is_sub_type;
@@ -1129,7 +1130,7 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
module_name, import_name);
return false;
}
- if (module_->origin == kAsmJsOrigin) {
+ if (is_asmjs_module(module_)) {
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::kZero) here is what using the observable
@@ -1162,11 +1163,11 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
if (ValueTypes::IsReferenceType(global.type)) {
// There shouldn't be any null-ref globals.
DCHECK_NE(ValueType::kWasmNullRef, global.type);
- if (global.type == ValueType::kWasmAnyFunc) {
+ if (global.type == ValueType::kWasmFuncRef) {
if (!value->IsNull(isolate_) &&
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
ReportLinkError(
- "imported anyfunc global must be null or an exported function",
+ "imported funcref global must be null or an exported function",
import_index, module_name, import_name);
return false;
}
@@ -1217,8 +1218,9 @@ void InstanceBuilder::CompileImportWrappers(
auto js_receiver = Handle<JSReceiver>::cast(value);
uint32_t func_index = module_->import_table[index].index;
FunctionSig* sig = module_->functions[func_index].sig;
- auto kind =
- compiler::GetWasmImportCallKind(js_receiver, sig, enabled_.bigint);
+ auto resolved =
+ compiler::ResolveWasmImportCall(js_receiver, sig, enabled_.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
kind == compiler::WasmImportCallKind::kLinkError ||
kind == compiler::WasmImportCallKind::kWasmToCapi) {
@@ -1431,7 +1433,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
for (auto& table : module_->tables) {
- if (table.type == kWasmAnyFunc) return true;
+ if (table.type == kWasmFuncRef) return true;
}
return false;
}
@@ -1458,6 +1460,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
Handle<JSObject> exports_object;
+ MaybeHandle<String> single_function_name;
bool is_asm_js = false;
switch (module_->origin) {
case kWasmOrigin: {
@@ -1465,10 +1468,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
exports_object = isolate_->factory()->NewJSObjectWithNullProto();
break;
}
- case kAsmJsOrigin: {
+ case kAsmJsSloppyOrigin:
+ case kAsmJsStrictOrigin: {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
exports_object = isolate_->factory()->NewJSObject(object_function);
+ single_function_name = isolate_->factory()->InternalizeUtf8String(
+ AsmJs::kSingleFunctionName);
is_asm_js = true;
break;
}
@@ -1477,9 +1483,6 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
instance->set_exports_object(*exports_object);
- Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-
PropertyDescriptor desc;
desc.set_writable(is_asm_js);
desc.set_enumerable(true);
@@ -1490,14 +1493,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
isolate_, module_object_, exp.name)
.ToHandleChecked();
- Handle<JSObject> export_to;
- if (is_asm_js && exp.kind == kExternalFunction &&
- String::Equals(isolate_, name, single_function_name)) {
- export_to = instance;
- } else {
- export_to = exports_object;
- }
-
+ Handle<JSObject> export_to = exports_object;
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
@@ -1505,8 +1501,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
MaybeHandle<WasmExportedFunction> wasm_exported_function =
WasmInstanceObject::GetOrCreateWasmExportedFunction(
isolate_, instance, exp.index);
-
desc.set_value(wasm_exported_function.ToHandleChecked());
+
+ if (is_asm_js &&
+ String::Equals(isolate_, name,
+ single_function_name.ToHandleChecked())) {
+ export_to = instance;
+ }
break;
}
case kExternalTable: {
@@ -1611,21 +1612,21 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
}
-void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
- size_t table_count = module_->tables.size();
- for (size_t index = 0; index < table_count; ++index) {
- const WasmTable& table = module_->tables[index];
+void InstanceBuilder::InitializeIndirectFunctionTables(
+ Handle<WasmInstanceObject> instance) {
+ for (int i = 0; i < static_cast<int>(module_->tables.size()); ++i) {
+ const WasmTable& table = module_->tables[i];
- if (!instance->has_indirect_function_table() &&
- table.type == kWasmAnyFunc) {
+ if (table.type == kWasmFuncRef) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, table.initial_size);
+ instance, i, table.initial_size);
}
}
}
bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmTableObject> table_object,
+ uint32_t table_index,
const WasmElemSegment& elem_segment, uint32_t dst,
uint32_t src, size_t count) {
// TODO(wasm): Move this functionality into wasm-objects, since it is used
@@ -1642,8 +1643,8 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
- if (table_object->type() == kWasmAnyFunc) {
- IndirectFunctionTableEntry(instance, entry_index).clear();
+ if (table_object->type() == kWasmFuncRef) {
+ IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
WasmTableObject::Set(isolate, table_object, entry_index,
isolate->factory()->null_value());
@@ -1652,13 +1653,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmFunction* function = &module->functions[func_index];
- // Update the local dispatch table first if necessary. We only have to
- // update the dispatch table if the first table of the instance is changed.
- // For all other tables, function calls do not use a dispatch table at
- // the moment.
- if (elem_segment.table_index == 0 && table_object->type() == kWasmAnyFunc) {
+ // Update the local dispatch table first if necessary.
+ if (table_object->type() == kWasmFuncRef) {
uint32_t sig_id = module->signature_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
@@ -1699,6 +1697,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
// Passive segments are not copied during instantiation.
if (!elem_segment.active) continue;
+ uint32_t table_index = elem_segment.table_index;
uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
uint32_t src = 0;
size_t count = elem_segment.entries.size();
@@ -1708,7 +1707,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
handle(WasmTableObject::cast(
instance->tables().get(elem_segment.table_index)),
isolate_),
- elem_segment, dst, src, count);
+ table_index, elem_segment, dst, src, count);
if (enabled_.bulk_memory) {
if (!success) {
thrower_->LinkError("table initializer is out of bounds");
@@ -1724,7 +1723,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
- if (module_->tables[index].type == kWasmAnyFunc) {
+ if (module_->tables[index].type == kWasmFuncRef) {
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(index)), isolate_);
@@ -1749,19 +1748,12 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
- // This code path is only used for passive element segments with the
- // table.init instruction. This instruction was introduced in the
- // bulk-memory-operations proposal. At the moment, table.init can only operate
- // on table-0. If table.init should work for tables with higher indices, then
- // we have to adjust the code in {LoadElemSegmentImpl}. The code there uses
- // {IndirectFunctionTableEntry} at the moment, which only works for table-0.
- CHECK_EQ(table_index, 0);
auto& elem_segment = instance->module()->elem_segments[segment_index];
return LoadElemSegmentImpl(
isolate, instance,
handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate),
- elem_segment, dst, src, count);
+ table_index, elem_segment, dst, src, count);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 49fd2892eb..bca5c2b941 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -16,6 +16,16 @@ class Signature;
namespace wasm {
+// Type lattice: For any two types connected by a line, the type at the bottom
+// is a subtype of the other type.
+//
+// AnyRef
+// / \
+// FuncRef ExnRef
+// \ /
+// I32 I64 F32 F64 NullRef
+// \ \ \ \ /
+// ------------ Bottom
enum ValueType : uint8_t {
kWasmStmt,
kWasmI32,
@@ -24,10 +34,10 @@ enum ValueType : uint8_t {
kWasmF64,
kWasmS128,
kWasmAnyRef,
- kWasmAnyFunc,
+ kWasmFuncRef,
kWasmNullRef,
- kWasmExceptRef,
- kWasmVar,
+ kWasmExnRef,
+ kWasmBottom,
};
using FunctionSig = Signature<ValueType>;
@@ -178,25 +188,31 @@ class StoreType {
// A collection of ValueType-related static methods.
class V8_EXPORT_PRIVATE ValueTypes {
public:
- static inline bool IsSubType(ValueType expected, ValueType actual) {
+ static inline bool IsSubType(ValueType actual, ValueType expected) {
return (expected == actual) ||
(expected == kWasmAnyRef && actual == kWasmNullRef) ||
- (expected == kWasmAnyRef && actual == kWasmAnyFunc) ||
- (expected == kWasmAnyRef && actual == kWasmExceptRef) ||
- (expected == kWasmAnyFunc && actual == kWasmNullRef) ||
- // TODO(mstarzinger): For now we treat "null_ref" as a sub-type of
- // "except_ref", which is correct but might change. See here:
+ (expected == kWasmAnyRef && actual == kWasmFuncRef) ||
+ (expected == kWasmAnyRef && actual == kWasmExnRef) ||
+ (expected == kWasmFuncRef && actual == kWasmNullRef) ||
+ // TODO(mstarzinger): For now we treat "nullref" as a sub-type of
+ // "exnref", which is correct but might change. See here:
// https://github.com/WebAssembly/exception-handling/issues/55
- (expected == kWasmExceptRef && actual == kWasmNullRef);
+ (expected == kWasmExnRef && actual == kWasmNullRef);
}
static inline bool IsReferenceType(ValueType type) {
- // This function assumes at the moment that it is never called with
- // {kWasmNullRef}. If this assumption is wrong, it should be added to the
- // result calculation below.
- DCHECK_NE(type, kWasmNullRef);
- return type == kWasmAnyRef || type == kWasmAnyFunc ||
- type == kWasmExceptRef;
+ return type == kWasmAnyRef || type == kWasmFuncRef || type == kWasmExnRef;
+ }
+
+ static inline ValueType CommonSubType(ValueType a, ValueType b) {
+ if (a == b) return a;
+ // The only sub type of any value type is {bot}.
+ if (!IsReferenceType(a) || !IsReferenceType(b)) return kWasmBottom;
+ if (IsSubType(a, b)) return a;
+ if (IsSubType(b, a)) return b;
+ // {a} and {b} are not each other's subtype. The biggest sub-type of all
+ // reference types is {kWasmNullRef}.
+ return kWasmNullRef;
}
static byte MemSize(MachineType type) {
@@ -214,8 +230,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmS128:
return 16;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return kSystemPointerSize;
default:
UNREACHABLE();
@@ -232,6 +248,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 3;
case kWasmS128:
return 4;
+ case kWasmAnyRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
+ return kSystemPointerSizeLog2;
default:
UNREACHABLE();
}
@@ -253,10 +273,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return kLocalS128;
case kWasmAnyRef:
return kLocalAnyRef;
- case kWasmAnyFunc:
- return kLocalAnyFunc;
- case kWasmExceptRef:
- return kLocalExceptRef;
+ case kWasmFuncRef:
+ return kLocalFuncRef;
+ case kWasmExnRef:
+ return kLocalExnRef;
case kWasmStmt:
return kLocalVoid;
default:
@@ -275,8 +295,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineType::Float64();
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return MachineType::TaggedPointer();
case kWasmS128:
return MachineType::Simd128();
@@ -298,9 +318,9 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineRepresentation::kFloat64;
case kWasmAnyRef:
- case kWasmAnyFunc:
+ case kWasmFuncRef:
case kWasmNullRef:
- case kWasmExceptRef:
+ case kWasmExnRef:
return MachineRepresentation::kTaggedPointer;
case kWasmS128:
return MachineRepresentation::kSimd128;
@@ -344,13 +364,13 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 'd';
case kWasmAnyRef:
return 'r';
- case kWasmAnyFunc:
+ case kWasmFuncRef:
return 'a';
case kWasmS128:
return 's';
case kWasmStmt:
return 'v';
- case kWasmVar:
+ case kWasmBottom:
return '*';
default:
return '?';
@@ -369,18 +389,18 @@ class V8_EXPORT_PRIVATE ValueTypes {
return "f64";
case kWasmAnyRef:
return "anyref";
- case kWasmAnyFunc:
- return "anyfunc";
+ case kWasmFuncRef:
+ return "funcref";
case kWasmNullRef:
return "nullref";
- case kWasmExceptRef:
+ case kWasmExnRef:
return "exn";
case kWasmS128:
return "s128";
case kWasmStmt:
return "<stmt>";
- case kWasmVar:
- return "<var>";
+ case kWasmBottom:
+ return "<bot>";
default:
return "<unknown>";
}
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
new file mode 100644
index 0000000000..822f46addd
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -0,0 +1,73 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_ARGUMENTS_H_
+#define V8_WASM_WASM_ARGUMENTS_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "src/base/memory.h"
+#include "src/codegen/signature.h"
+#include "src/common/globals.h"
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Helper class for {Push}ing Wasm value arguments onto the stack in the format
+// that the CWasmEntryStub expects, as well as for {Pop}ping return values.
+// {Reset} must be called if a packer instance used for pushing is then
+// reused for popping: it resets the internal pointer to the beginning of
+// the stack region.
+class CWasmArgumentsPacker {
+ public:
+ explicit CWasmArgumentsPacker(size_t buffer_size)
+ : heap_buffer_(buffer_size <= kMaxOnStackBuffer ? 0 : buffer_size),
+ buffer_((buffer_size <= kMaxOnStackBuffer) ? on_stack_buffer_
+ : heap_buffer_.data()) {}
+ i::Address argv() const { return reinterpret_cast<i::Address>(buffer_); }
+ void Reset() { offset_ = 0; }
+
+ template <typename T>
+ void Push(T val) {
+ Address address = reinterpret_cast<Address>(buffer_ + offset_);
+ offset_ += sizeof(val);
+ base::WriteUnalignedValue(address, val);
+ }
+
+ template <typename T>
+ T Pop() {
+ Address address = reinterpret_cast<Address>(buffer_ + offset_);
+ offset_ += sizeof(T);
+ return base::ReadUnalignedValue<T>(address);
+ }
+
+ static int TotalSize(FunctionSig* sig) {
+ int return_size = 0;
+ for (ValueType t : sig->returns()) {
+ return_size += ValueTypes::ElementSizeInBytes(t);
+ }
+ int param_size = 0;
+ for (ValueType t : sig->parameters()) {
+ param_size += ValueTypes::ElementSizeInBytes(t);
+ }
+ return std::max(return_size, param_size);
+ }
+
+ private:
+ static const size_t kMaxOnStackBuffer = 10 * i::kSystemPointerSize;
+
+ uint8_t on_stack_buffer_[kMaxOnStackBuffer];
+ std::vector<uint8_t> heap_buffer_;
+ uint8_t* buffer_;
+ size_t offset_ = 0;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_ARGUMENTS_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 2eddce3d95..3d0cde0cce 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -150,7 +150,8 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
// The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
// to call {WasmEngine::EnableCodeLogging} if this return value would change
// for any isolate. Otherwise we might lose code events.
- return isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
+ return isolate->logger()->is_listening_to_code_events() ||
+ isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
isolate->is_profiling();
}
@@ -286,7 +287,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
if (handler_table_size() > 0) {
- HandlerTable table(handler_table(), handler_table_size());
+ HandlerTable table(handler_table(), handler_table_size(),
+ HandlerTable::kReturnAddressBasedEncoding);
os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
<< "):\n";
table.HandlerTableReturnPrint(os);
@@ -403,12 +405,15 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
- bool can_request_more)
+ bool can_request_more,
+ std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
free_code_space_(code_space.region()),
- can_request_more_memory_(can_request_more) {
+ can_request_more_memory_(can_request_more),
+ async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(can_request_more ? 4 : 1);
owned_code_space_.emplace_back(std::move(code_space));
+ async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
@@ -487,6 +492,8 @@ Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
owned_code_space_.emplace_back(std::move(new_mem));
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
+ async_counters_->wasm_module_num_code_spaces()->AddSample(
+ static_cast<int>(owned_code_space_.size()));
}
const Address commit_page_size = page_allocator->CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
@@ -613,7 +620,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
: code_allocator_(engine->code_manager(), std::move(code_space),
- can_request_more),
+ can_request_more, async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
@@ -694,12 +701,26 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
+ if (!lazy_compile_table_) {
+ uint32_t num_slots = module_->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ lazy_compile_table_ = CreateEmptyJumpTable(
+ JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
+ JumpTableAssembler::GenerateLazyCompileTable(
+ lazy_compile_table_->instruction_start(), num_slots,
+ module_->num_imported_functions,
+ runtime_stub_entry(WasmCode::kWasmCompileLazy));
+ }
+
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
- JumpTableAssembler::EmitLazyCompileJumpSlot(
- jump_table_->instruction_start(), slot_index, func_index,
- runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
+ slot_index, lazy_compile_target,
+ WasmCode::kFlushICache);
}
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
@@ -713,23 +734,22 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
WasmCode::kRuntimeStubCount));
Address base = jump_table->instruction_start();
EmbeddedData embedded_data = EmbeddedData::FromBlob();
-#define RUNTIME_STUB(Name) {Builtins::k##Name, WasmCode::k##Name},
+#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- std::pair<Builtins::Name, WasmCode::RuntimeStubId> wasm_runtime_stubs[] = {
+ Builtins::Name wasm_runtime_stubs[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
- for (auto pair : wasm_runtime_stubs) {
- CHECK(embedded_data.ContainsBuiltin(pair.first));
- Address builtin = embedded_data.InstructionStartOfBuiltin(pair.first);
- JumpTableAssembler::EmitRuntimeStubSlot(base, pair.second, builtin,
- WasmCode::kNoFlushICache);
- uint32_t slot_offset =
- JumpTableAssembler::StubSlotIndexToOffset(pair.second);
- runtime_stub_entries_[pair.second] = base + slot_offset;
+ Address builtin_address[WasmCode::kRuntimeStubCount];
+ for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Builtins::Name builtin = wasm_runtime_stubs[i];
+ CHECK(embedded_data.ContainsBuiltin(builtin));
+ builtin_address[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ runtime_stub_entries_[i] =
+ base + JumpTableAssembler::StubSlotIndexToOffset(i);
}
- FlushInstructionCache(jump_table->instructions().begin(),
- jump_table->instructions().size());
+ JumpTableAssembler::GenerateRuntimeStubTable(base, builtin_address,
+ WasmCode::kRuntimeStubCount);
DCHECK_NULL(runtime_stub_table_);
runtime_stub_table_ = jump_table;
#else // V8_EMBEDDED_BUILTINS
@@ -822,7 +842,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
std::unique_ptr<WasmCode> new_code{new WasmCode{
this, // native_module
- WasmCode::kAnonymousFuncIndex, // index
+ kAnonymousFuncIndex, // index
dst_code_bytes, // instructions
stack_slots, // stack_slots
0, // tagged_parameter_slots
@@ -920,8 +940,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
code->MaybePrint();
code->Validate();
- code->RegisterTrapHandlerData();
-
return code;
}
@@ -930,27 +948,28 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
return PublishCodeLocked(std::move(code));
}
-namespace {
-WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
- switch (tier) {
- case ExecutionTier::kInterpreter:
+WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
+ switch (result.kind) {
+ case WasmCompilationResult::kWasmToJsWrapper:
+ return WasmCode::Kind::kWasmToJsWrapper;
+ case WasmCompilationResult::kInterpreterEntry:
return WasmCode::Kind::kInterpreterEntry;
- case ExecutionTier::kLiftoff:
- case ExecutionTier::kTurbofan:
+ case WasmCompilationResult::kFunction:
return WasmCode::Kind::kFunction;
- case ExecutionTier::kNone:
+ default:
UNREACHABLE();
}
}
-} // namespace
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
- if (!code->IsAnonymous()) {
+ if (!code->IsAnonymous() &&
+ code->index() >= module_->num_imported_functions) {
DCHECK_LT(code->index(), num_functions());
- DCHECK_LE(module_->num_imported_functions, code->index());
+
+ code->RegisterTrapHandlerData();
// Assume an order of execution tiers that represents the quality of their
// generated code.
@@ -1017,8 +1036,6 @@ WasmCode* NativeModule::AddDeserializedCode(
std::move(protected_instructions), std::move(reloc_info),
std::move(source_position_table), kind, tier}};
- code->RegisterTrapHandlerData();
-
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
@@ -1056,7 +1073,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
this, // native_module
- WasmCode::kAnonymousFuncIndex, // index
+ kAnonymousFuncIndex, // index
code_space, // instructions
0, // stack_slots
0, // tagged_parameter_slots
@@ -1112,11 +1129,16 @@ WasmCode* NativeModule::Lookup(Address pc) const {
return candidate;
}
+uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
+ uint32_t slot_idx = func_index - module_->num_imported_functions;
+ DCHECK_GT(module_->num_declared_functions, slot_idx);
+ return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
+}
+
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// Return the jump table slot for that function index.
DCHECK_NOT_NULL(jump_table_);
- uint32_t slot_idx = func_index - module_->num_imported_functions;
- uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
+ uint32_t slot_offset = GetJumpTableOffset(func_index);
DCHECK_LT(slot_offset, jump_table_->instructions().size());
return jump_table_->instruction_start() + slot_offset;
}
@@ -1416,9 +1438,8 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
generated_code.emplace_back(AddCodeWithCodeSpace(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions),
- GetCodeKindForExecutionTier(result.result_tier), result.result_tier,
- this_code_space));
+ std::move(result.source_positions), GetCodeKind(result),
+ result.result_tier, this_code_space));
}
DCHECK_EQ(0, code_space.size());
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 49c287df2c..db7b4f061d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -176,7 +176,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
- static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
private:
@@ -270,6 +269,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
+WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
+
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
@@ -277,7 +278,8 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
class WasmCodeAllocator {
public:
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
- bool can_request_more);
+ bool can_request_more,
+ std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
size_t committed_code_space() const {
@@ -315,7 +317,7 @@ class WasmCodeAllocator {
// Code space that was allocated for code (subset of {owned_code_space_}).
DisjointAllocationPool allocated_code_space_;
// Code space that was allocated before but is dead now. Full pages within
- // this region are discarded. It's still a subset of {owned_code_space_}).
+ // this region are discarded. It's still a subset of {owned_code_space_}.
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
@@ -329,6 +331,8 @@ class WasmCodeAllocator {
bool is_executable_ = false;
const bool can_request_more_memory_;
+
+ std::shared_ptr<Counters> async_counters_;
};
class V8_EXPORT_PRIVATE NativeModule final {
@@ -399,10 +403,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
- ptrdiff_t jump_table_offset(uint32_t func_index) const {
- DCHECK_GE(func_index, num_imported_functions());
- return GetCallTargetForFunction(func_index) - jump_table_start();
- }
+ uint32_t GetJumpTableOffset(uint32_t func_index) const;
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
@@ -558,6 +559,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
+ // Lazy compile stub table, containing entries to jump to the
+ // {WasmCompileLazy} builtin, passing the function index.
+ WasmCode* lazy_compile_table_ = nullptr;
+
// The compilation state keeps track of compilation tasks for this module.
// Note that its destructor blocks until all tasks are finished/aborted and
// hence needs to be destructed first when this native module dies.
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index fce60cb593..fbbe19396c 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -26,9 +26,9 @@ enum ValueTypeCode : uint8_t {
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
kLocalS128 = 0x7b,
- kLocalAnyFunc = 0x70,
+ kLocalFuncRef = 0x70,
kLocalAnyRef = 0x6f,
- kLocalExceptRef = 0x68,
+ kLocalExnRef = 0x68,
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
@@ -106,6 +106,8 @@ constexpr WasmCodePosition kNoCodePosition = -1;
constexpr uint32_t kExceptionAttribute = 0;
+constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 33d9a64bf4..2955bc602f 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -617,8 +617,8 @@ Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
}
// static
-Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
- Handle<WasmDebugInfo> debug_info, wasm::FunctionSig* sig) {
+Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
+ wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
@@ -642,24 +642,9 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
DCHECK(entries->get(index).IsUndefined(isolate));
Handle<Code> new_entry_code =
compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
- Handle<WasmExportedFunctionData> function_data =
- Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_wrapper_code(*new_entry_code);
- function_data->set_instance(debug_info->wasm_instance());
- function_data->set_jump_table_offset(-1);
- function_data->set_function_index(-1);
- Handle<String> name =
- isolate->factory()->InternalizeString(StaticCharVector("c-wasm-entry"));
- NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, function_data, isolate->sloppy_function_map());
- Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
- new_entry->set_context(debug_info->wasm_instance().native_context());
- new_entry->shared().set_internal_formal_parameter_count(
- compiler::CWasmEntryParameters::kNumParameters);
- entries->set(index, *new_entry);
+ entries->set(index, *new_entry_code);
}
- return handle(JSFunction::cast(entries->get(index)), isolate);
+ return handle(Code::cast(entries->get(index)), isolate);
}
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 83053fd71f..7b91b16b80 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -8,6 +8,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
+#include "src/execution/v8threads.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
@@ -88,24 +89,24 @@ class LogCodesTask : public Task {
WasmEngine* const engine_;
};
-class WasmGCForegroundTask : public Task {
- public:
- explicit WasmGCForegroundTask(Isolate* isolate) : isolate_(isolate) {
- DCHECK_NOT_NULL(isolate);
- }
-
- ~WasmGCForegroundTask() {
- // If the isolate is already shutting down, the platform can delete this
- // task without ever executing it. For that case, we need to deregister the
- // task from the engine to avoid UAF.
- if (isolate_) {
- WasmEngine* engine = isolate_->wasm_engine();
- engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
+void CheckNoArchivedThreads(Isolate* isolate) {
+ class ArchivedThreadsVisitor : public ThreadVisitor {
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ // Archived threads are rarely used, and not combined with Wasm at the
+ // moment. Implement this and test it properly once we have a use case for
+ // that.
+ FATAL("archived threads in combination with wasm not supported");
}
- }
+ } archived_threads_visitor;
+ isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
+}
+
+class WasmGCForegroundTask : public CancelableTask {
+ public:
+ explicit WasmGCForegroundTask(Isolate* isolate)
+ : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
- void Run() final {
- if (isolate_ == nullptr) return; // cancelled.
+ void RunInternal() final {
WasmEngine* engine = isolate_->wasm_engine();
// If the foreground task is executing, there is no wasm code active. Just
// report an empty set of live wasm code.
@@ -114,13 +115,10 @@ class WasmGCForegroundTask : public Task {
DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
}
#endif
+ CheckNoArchivedThreads(isolate_);
engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
- // Cancel to signal to the destructor that this task executed.
- Cancel();
}
- void Cancel() { isolate_ = nullptr; }
-
private:
Isolate* isolate_;
};
@@ -240,10 +238,13 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
- Handle<HeapNumber> uses_bitset) {
+ Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
+ ModuleOrigin origin = language_mode == LanguageMode::kSloppy
+ ? kAsmJsSloppyOrigin
+ : kAsmJsStrictOrigin;
ModuleResult result =
DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false,
- kAsmJsOrigin, isolate->counters(), allocator());
+ origin, isolate->counters(), allocator());
if (result.failed()) {
// This happens once in a while when we have missed some limit check
// in the asm parser. Output an error message to help diagnose, but crash.
@@ -465,6 +466,9 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_[native_module]->isolates.insert(isolate);
}
+
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
return module_object;
}
@@ -524,6 +528,24 @@ bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
return false;
}
+void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
+ // Under the mutex get all jobs to delete. Then delete them without holding
+ // the mutex, such that deletion can reenter the WasmEngine.
+ std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
+ {
+ base::MutexGuard guard(&mutex_);
+ for (auto it = async_compile_jobs_.begin();
+ it != async_compile_jobs_.end();) {
+ if (!it->first->context().is_identical_to(context)) {
+ ++it;
+ continue;
+ }
+ jobs_to_delete.push_back(std::move(it->second));
+ it = async_compile_jobs_.erase(it);
+ }
+ }
+}
+
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
// Under the mutex get all jobs to delete. Then delete them without holding
// the mutex, such that deletion can reenter the WasmEngine.
@@ -775,6 +797,8 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
}
+ CheckNoArchivedThreads(isolate);
+
ReportLiveCodeForGC(isolate,
OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}
@@ -876,11 +900,7 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
DCHECK(!mutex_.TryLock());
DCHECK_NOT_NULL(current_gc_info_);
- auto it = current_gc_info_->outstanding_isolates.find(isolate);
- if (it == current_gc_info_->outstanding_isolates.end()) return false;
- if (auto* fg_task = it->second) fg_task->Cancel();
- current_gc_info_->outstanding_isolates.erase(it);
- return true;
+ return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
}
void WasmEngine::PotentiallyFinishCurrentGC() {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 2ae3e81368..69e6cdae6e 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -62,7 +62,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<AsmWasmData> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
- Handle<HeapNumber> uses_bitset);
+ Handle<HeapNumber> uses_bitset, LanguageMode language_mode);
Handle<WasmModuleObject> FinalizeTranslatedAsmJs(
Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
Handle<Script> script);
@@ -140,6 +140,11 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Isolate is currently running.
bool HasRunningCompileJob(Isolate* isolate);
+ // Deletes all AsyncCompileJobs that belong to the given context. All
+ // compilation is aborted, no more callbacks will be triggered. This is used
+ // when a context is disposed, e.g. because of browser navigation.
+ void DeleteCompileJobsOnContext(Handle<Context> context);
+
// Deletes all AsyncCompileJobs that belong to the given Isolate. All
// compilation is aborted, no more callbacks will be triggered. This is used
// for tearing down an isolate, or to clean it up to be reused.
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 997cf83bb7..08e6139abe 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -29,7 +29,7 @@
#include "src/trap-handler/trap-handler.h"
#endif
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/utils.h"
#include "src/wasm/wasm-external-refs.h"
@@ -37,6 +37,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadUnalignedValue;
+using base::WriteUnalignedValue;
+
void f32_trunc_wrapper(Address data) {
WriteUnalignedValue<float>(data, truncf(ReadUnalignedValue<float>(data)));
}
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index b586d07ff4..9630fa76dd 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -18,6 +18,11 @@ WasmCode*& WasmImportWrapperCache::ModificationScope::operator[](
return cache_->entry_map_[key];
}
+WasmCode*& WasmImportWrapperCache::operator[](
+ const WasmImportWrapperCache::CacheKey& key) {
+ return entry_map_[key];
+}
+
WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
FunctionSig* sig) const {
auto it = entry_map_.find({kind, sig});
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 62f27cd9a4..e9e60faad4 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -45,6 +45,10 @@ class WasmImportWrapperCache {
base::MutexGuard guard_;
};
+ // Not thread-safe, use ModificationScope to get exclusive write access to the
+ // cache.
+ V8_EXPORT_PRIVATE WasmCode*& operator[](const CacheKey& key);
+
// Assumes the key exists in the map.
V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
FunctionSig* sig) const;
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index f06cead069..4449439896 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -12,7 +12,6 @@
#include "src/compiler/wasm-compiler.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects-inl.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/utils/boxed-float.h"
#include "src/utils/identity-map.h"
#include "src/utils/utils.h"
@@ -21,12 +20,12 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -34,6 +33,11 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::ReadUnalignedValue;
+using base::WriteLittleEndianValue;
+using base::WriteUnalignedValue;
+
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
@@ -582,7 +586,7 @@ inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
}
inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
- return static_cast<float>(a);
+ return DoubleToFloat32(a);
}
inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
@@ -816,7 +820,7 @@ class SideTable : public ZoneObject {
bool is_loop = opcode == kExprLoop;
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
@@ -832,7 +836,7 @@ class SideTable : public ZoneObject {
case kExprIf: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
@@ -865,7 +869,7 @@ class SideTable : public ZoneObject {
case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
@@ -1279,8 +1283,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
@@ -1460,8 +1464,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
val = WasmValue(isolate_->factory()->null_value());
break;
}
@@ -1658,8 +1662,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
- MachineRepresentation rep) {
+ bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
@@ -1672,7 +1676,7 @@ class ThreadImpl {
converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
Push(result);
- len = 1 + imm.length;
+ *len = 1 + imm.length;
if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, false, rep);
@@ -1685,8 +1689,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
- MachineRepresentation rep) {
+ bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
sizeof(ctype));
ctype val = Pop().to<ctype>();
@@ -1698,7 +1702,7 @@ class ThreadImpl {
return false;
}
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
- len = 1 + imm.length;
+ *len = 1 + imm.length;
if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, true, rep);
@@ -1730,24 +1734,24 @@ class ThreadImpl {
template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
- Address& address, pc_t pc, int& len,
+ Address* address, pc_t pc, int* const len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
- address = BoundsCheckMem<type>(imm.offset, index);
+ *address = BoundsCheckMem<type>(imm.offset, index);
if (!address) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- len = 2 + imm.length;
+ *len = 2 + imm.length;
return true;
}
bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int& len) {
+ InterpreterCode* code, pc_t pc, int* const len) {
switch (opcode) {
case kExprI32SConvertSatF32:
Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
@@ -1776,7 +1780,7 @@ class ThreadImpl {
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
- len += imm.length;
+ *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
pc)) {
return false;
@@ -1784,6 +1788,9 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
auto src_max =
@@ -1799,7 +1806,7 @@ class ThreadImpl {
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
@@ -1808,11 +1815,15 @@ class ThreadImpl {
}
case kExprMemoryCopy: {
MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ *len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
- bool copy_backward = src < dst && dst - src < size;
+ bool copy_backward = src < dst;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
// Trap without copying any bytes if we are copying backward and the
// copy is partially out-of-bounds. We only need to check that the dst
@@ -1825,25 +1836,27 @@ class ThreadImpl {
memory_copy_wrapper(dst_addr, src_addr, size);
}
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
- len += imm.length;
return ok;
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc + 1));
+ *len += imm.length;
auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
memory_fill_wrapper(dst_addr, value, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
- len += imm.length;
return ok;
}
case kExprTableInit: {
TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.elem_segment_index,
pc)) {
return false;
@@ -1860,7 +1873,7 @@ class ThreadImpl {
}
case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
@@ -1877,9 +1890,64 @@ class ThreadImpl {
isolate_, instance_object_, imm.table_dst.index,
imm.table_src.index, dst, src, size);
if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- len += imm.length;
+ *len += imm.length;
return ok;
}
+ case kExprTableGrow: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ auto delta = Pop().to<uint32_t>();
+ auto value = Pop().to_anyref();
+ int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
+ Push(WasmValue(result));
+ *len += imm.length;
+ return true;
+ }
+ case kExprTableSize: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ Push(WasmValue(table_size));
+ *len += imm.length;
+ return true;
+ }
+ case kExprTableFill: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto count = Pop().to<uint32_t>();
+ auto value = Pop().to_anyref();
+ auto start = Pop().to<uint32_t>();
+
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ if (start > table_size) {
+ DoTrap(kTrapTableOutOfBounds, pc);
+ return false;
+ }
+
+ // Even when table.fill goes out-of-bounds, as many entries as possible
+ // are put into the table. Only afterwards we trap.
+ uint32_t fill_count = std::min(count, table_size - start);
+ WasmTableObject::Fill(isolate_, table, start, value, fill_count);
+
+ if (fill_count < count) {
+ DoTrap(kTrapTableOutOfBounds, pc);
+ return false;
+ }
+ *len += imm.length;
+ return true;
+ }
default:
FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
OpcodeName(code->start[pc]));
@@ -1911,7 +1979,7 @@ class ThreadImpl {
}
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int& len) {
+ InterpreterCode* code, pc_t pc, int* const len) {
#if V8_TARGET_BIG_ENDIAN
constexpr bool kBigEndian = true;
#else
@@ -1919,27 +1987,27 @@ class ThreadImpl {
#endif
WasmValue result;
switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- op_type result; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- if (kBigEndian) { \
- auto oplambda = [](type a, type b) { return a op b; }; \
- result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
- } else { \
- result = static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
- } \
- Push(WasmValue(result)); \
- break; \
+#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ op_type result; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ if (kBigEndian) { \
+ auto oplambda = [](type a, type b) { return a op b; }; \
+ result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
+ } else { \
+ result = static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ } \
+ Push(WasmValue(result)); \
+ break; \
}
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
@@ -2003,24 +2071,24 @@ class ThreadImpl {
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
atomic_exchange, =);
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
- case kExpr##name: { \
- type old_val; \
- type new_val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &old_val, &new_val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- old_val = AdjustByteOrder<type>(old_val); \
- new_val = AdjustByteOrder<type>(new_val); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
- Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
- break; \
+#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
+ case kExpr##name: { \
+ type old_val; \
+ type new_val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &old_val, &new_val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ old_val = AdjustByteOrder<type>(old_val); \
+ new_val = AdjustByteOrder<type>(new_val); \
+ std::atomic_compare_exchange_strong( \
+ reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
+ Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
+ break; \
}
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
uint32_t);
@@ -2037,19 +2105,20 @@ class ThreadImpl {
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
- Push(result); \
- break; \
+#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
+ len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
+ Push(result); \
+ break; \
}
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
@@ -2059,20 +2128,20 @@ class ThreadImpl {
ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
- AdjustByteOrder<type>(val)); \
- break; \
+#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
+ AdjustByteOrder<type>(val)); \
+ break; \
}
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
@@ -2082,6 +2151,10 @@ class ThreadImpl {
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
+ case kExprAtomicFence:
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ *len += 2;
+ break;
default:
UNREACHABLE();
return false;
@@ -2118,7 +2191,7 @@ class ThreadImpl {
}
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
- pc_t pc, int& len) {
+ pc_t pc, int* const len) {
switch (opcode) {
#define SPLAT_CASE(format, sType, valType, num) \
case kExpr##format##Splat: { \
@@ -2129,23 +2202,27 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
- SPLAT_CASE(I32x4, int4, int32_t, 4)
+ SPLAT_CASE(F64x2, float2, double, 2)
SPLAT_CASE(F32x4, float4, float, 4)
+ SPLAT_CASE(I64x2, int2, int64_t, 2)
+ SPLAT_CASE(I32x4, int4, int32_t, 4)
SPLAT_CASE(I16x8, int8, int32_t, 8)
SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE
#define EXTRACT_LANE_CASE(format, name) \
case kExpr##format##ExtractLane: { \
SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue val = Pop(); \
Simd128 s = val.to_s128(); \
auto ss = s.to_##name(); \
Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
return true; \
}
- EXTRACT_LANE_CASE(I32x4, i32x4)
+ EXTRACT_LANE_CASE(F64x2, f64x2)
EXTRACT_LANE_CASE(F32x4, f32x4)
+ EXTRACT_LANE_CASE(I64x2, i64x2)
+ EXTRACT_LANE_CASE(I32x4, i32x4)
EXTRACT_LANE_CASE(I16x8, i16x8)
EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
@@ -2169,6 +2246,9 @@ class ThreadImpl {
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
+ BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
+ BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
+ BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
@@ -2222,10 +2302,13 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
+ UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
+ UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
@@ -2246,12 +2329,32 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
+ CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
+ CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
+ CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
+ CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
+ CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
+ CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
+ CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
+ CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
+ CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
+ CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
+ CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
+ CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
@@ -2298,7 +2401,7 @@ class ThreadImpl {
#define REPLACE_LANE_CASE(format, name, stype, ctype) \
case kExpr##format##ReplaceLane: { \
SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue new_val = Pop(); \
WasmValue simd_val = Pop(); \
stype s = simd_val.to_s128().to_##name(); \
@@ -2306,7 +2409,9 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
+ REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
+ REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
@@ -2320,7 +2425,7 @@ class ThreadImpl {
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue v = Pop(); \
stype s = v.to_s128().to_##name(); \
stype res; \
@@ -2331,6 +2436,11 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
+ static_cast<uint64_t>(a) << imm.shift)
+ SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> imm.shift)
+ SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
+ static_cast<uint64_t>(a) >> imm.shift)
SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
static_cast<uint32_t>(a) << imm.shift)
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
@@ -2452,7 +2562,7 @@ class ThreadImpl {
case kExprS8x16Shuffle: {
Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc));
- len += 16;
+ *len += 16;
int16 v2 = Pop().to_s128().to_i8x16();
int16 v1 = Pop().to_s128().to_i8x16();
int16 res;
@@ -2465,6 +2575,7 @@ class ThreadImpl {
Push(WasmValue(Simd128(res)));
return true;
}
+ case kExprS1x2AnyTrue:
case kExprS1x4AnyTrue:
case kExprS1x8AnyTrue:
case kExprS1x16AnyTrue: {
@@ -2483,6 +2594,7 @@ class ThreadImpl {
Push(WasmValue(res)); \
return true; \
}
+ REDUCTION_CASE(S1x2AllTrue, i64x2, int2, 2, &)
REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
@@ -2583,8 +2695,8 @@ class ThreadImpl {
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
Handle<Object> anyref = value.to_anyref();
encoded_values->set(encoded_index++, *anyref);
break;
@@ -2683,8 +2795,8 @@ class ThreadImpl {
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
value = WasmValue(anyref);
break;
@@ -3005,11 +3117,9 @@ class ThreadImpl {
CallIndirectImmediate<Decoder::kNoValidate> imm(
kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
- // Assume only one table for now.
- DCHECK_LE(module()->tables.size(), 1u);
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, imm.sig_index);
+ CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -3077,14 +3187,12 @@ class ThreadImpl {
CallIndirectImmediate<Decoder::kNoValidate> imm(
kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
- // Assume only one table for now.
- DCHECK_LE(module()->tables.size(), 1u);
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
// TODO(wasm): Calling functions needs some refactoring to avoid
// multi-exit code like this.
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, imm.sig_index);
+ CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL: {
InterpreterCode* target = result.interpreter_code;
@@ -3141,8 +3249,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
@@ -3156,10 +3264,42 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
-
+ case kExprTableGet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ uint32_t entry_index = Pop().to<uint32_t>();
+ if (entry_index >= table_size) {
+ return DoTrap(kTrapTableOutOfBounds, pc);
+ }
+ Handle<Object> value =
+ WasmTableObject::Get(isolate_, table, entry_index);
+ Push(WasmValue(value));
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprTableSet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ Handle<Object> value = Pop().to_anyref();
+ uint32_t entry_index = Pop().to<uint32_t>();
+ if (entry_index >= table_size) {
+ return DoTrap(kTrapTableOutOfBounds, pc);
+ }
+ WasmTableObject::Set(isolate_, table, entry_index, value);
+ len = 1 + imm.length;
+ break;
+ }
#define LOAD_CASE(name, ctype, mtype, rep) \
case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
+ if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len, \
MachineRepresentation::rep)) \
return; \
break; \
@@ -3183,7 +3323,7 @@ class ThreadImpl {
#define STORE_CASE(name, ctype, mtype, rep) \
case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
+ if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len, \
MachineRepresentation::rep)) \
return; \
break; \
@@ -3300,16 +3440,16 @@ class ThreadImpl {
}
case kNumericPrefix: {
++len;
- if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
break;
}
case kAtomicPrefix: {
- if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
break;
}
case kSimdPrefix: {
++len;
- if (!ExecuteSimdOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len)) return;
break;
}
@@ -3547,118 +3687,71 @@ class ThreadImpl {
}
Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
- Handle<JSFunction> wasm_entry =
- WasmDebugInfo::GetCWasmEntry(debug_info, sig);
+ Handle<Code> wasm_entry = WasmDebugInfo::GetCWasmEntry(debug_info, sig);
TRACE(" => Calling external wasm function\n");
// Copy the arguments to one buffer.
- // TODO(clemensh): Introduce a helper for all argument buffer
- // con-/destruction.
- std::vector<uint8_t> arg_buffer(num_args * 8);
- size_t offset = 0;
+ CWasmArgumentsPacker packer(CWasmArgumentsPacker::TotalSize(sig));
sp_t base_index = StackHeight() - num_args;
for (int i = 0; i < num_args; ++i) {
- int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
- if (arg_buffer.size() < offset + param_size) {
- arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
- }
- Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
WasmValue arg = GetStackValue(base_index + i);
switch (sig->GetParam(i)) {
case kWasmI32:
- WriteUnalignedValue(address, arg.to<uint32_t>());
+ packer.Push(arg.to<uint32_t>());
break;
case kWasmI64:
- WriteUnalignedValue(address, arg.to<uint64_t>());
+ packer.Push(arg.to<uint64_t>());
break;
case kWasmF32:
- WriteUnalignedValue(address, arg.to<float>());
+ packer.Push(arg.to<float>());
break;
case kWasmF64:
- WriteUnalignedValue(address, arg.to<double>());
+ packer.Push(arg.to<double>());
break;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
- DCHECK_EQ(kSystemPointerSize, param_size);
- WriteUnalignedValue<Object>(address, *arg.to_anyref());
+ case kWasmFuncRef:
+ case kWasmExnRef:
+ packer.Push(arg.to_anyref()->ptr());
break;
default:
UNIMPLEMENTED();
}
- offset += param_size;
- }
-
- // Ensure that there is enough space in the arg_buffer to hold the return
- // value(s).
- size_t return_size = 0;
- for (ValueType t : sig->returns()) {
- return_size += ValueTypes::ElementSizeInBytes(t);
- }
- if (arg_buffer.size() < return_size) {
- arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer and the code target data pointers in handles. As
- // these are aligned pointers, to the GC it will look like Smis.
- Handle<Object> arg_buffer_obj(
- Object(reinterpret_cast<Address>(arg_buffer.data())), isolate);
- DCHECK(!arg_buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(Object(code->instruction_start()), isolate);
- DCHECK(!code_entry_obj->IsHeapObject());
-
- static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
- "code below needs adaption");
- Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
- args[compiler::CWasmEntryParameters::kObjectRef] = object_ref;
- args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
-
- Handle<Object> receiver = isolate->factory()->undefined_value();
- trap_handler::SetThreadInWasm();
- MaybeHandle<Object> maybe_retval =
- Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
+ Address call_target = code->instruction_start();
+ Execution::CallWasm(isolate, wasm_entry, call_target, object_ref,
+ packer.argv());
TRACE(" => External wasm function returned%s\n",
- maybe_retval.is_null() ? " with exception" : "");
+ isolate->has_pending_exception() ? " with exception" : "");
// Pop arguments off the stack.
Drop(num_args);
- if (maybe_retval.is_null()) {
- // JSEntry may throw a stack overflow before we actually get to wasm code
- // or back to the interpreter, meaning the thread-in-wasm flag won't be
- // cleared.
- if (trap_handler::IsThreadInWasm()) {
- trap_handler::ClearThreadInWasm();
- }
+ if (isolate->has_pending_exception()) {
return TryHandleException(isolate);
}
- trap_handler::ClearThreadInWasm();
-
// Push return values.
- if (sig->return_count() > 0) {
- // TODO(wasm): Handle multiple returns.
- DCHECK_EQ(1, sig->return_count());
- Address address = reinterpret_cast<Address>(arg_buffer.data());
- switch (sig->GetReturn()) {
+ packer.Reset();
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ switch (sig->GetReturn(i)) {
case kWasmI32:
- Push(WasmValue(ReadUnalignedValue<uint32_t>(address)));
+ Push(WasmValue(packer.Pop<uint32_t>()));
break;
case kWasmI64:
- Push(WasmValue(ReadUnalignedValue<uint64_t>(address)));
+ Push(WasmValue(packer.Pop<uint64_t>()));
break;
case kWasmF32:
- Push(WasmValue(ReadUnalignedValue<float>(address)));
+ Push(WasmValue(packer.Pop<float>()));
break;
case kWasmF64:
- Push(WasmValue(ReadUnalignedValue<double>(address)));
+ Push(WasmValue(packer.Pop<double>()));
break;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
- Handle<Object> ref(ReadUnalignedValue<Object>(address), isolate);
+ case kWasmFuncRef:
+ case kWasmExnRef: {
+ Handle<Object> ref(Object(packer.Pop<Address>()), isolate);
Push(WasmValue(ref));
break;
}
@@ -3710,25 +3803,24 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
module()->signature_map.Find(*module()->signatures[sig_index]));
-
- // The function table is stored in the instance.
- // TODO(wasm): the wasm interpreter currently supports only one table.
- CHECK_EQ(0, table_index);
// Bounds check against table size.
- if (entry_index >= instance_object_->indirect_function_table_size()) {
+ if (entry_index >=
+ static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
+ isolate_, instance_object_, table_index))) {
return {ExternalCallResult::INVALID_FUNC};
}
- IndirectFunctionTableEntry entry(instance_object_, entry_index);
+ IndirectFunctionTableEntry entry(instance_object_, table_index,
+ entry_index);
// Signature check.
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
FunctionSig* signature = module()->signatures[sig_index];
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
WasmCode* code = GetTargetCode(isolate_, entry.target());
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index fb633c6c26..1ee76fc11d 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1094,7 +1094,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!value->ToString(context).ToLocal(&string)) return;
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
- type = i::wasm::kWasmAnyFunc;
+ type = i::wasm::kWasmFuncRef;
} else if (enabled_features.anyref &&
string->StringEquals(v8_str(isolate, "anyref"))) {
type = i::wasm::kWasmAnyRef;
@@ -1222,7 +1222,7 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmAnyRef;
} else if (enabled_features.anyref &&
string->StringEquals(v8_str(isolate, "anyfunc"))) {
- *type = i::wasm::kWasmAnyFunc;
+ *type = i::wasm::kWasmFuncRef;
} else {
// Unrecognized type.
*type = i::wasm::kWasmStmt;
@@ -1322,7 +1322,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Number> number_value;
if (!value->ToNumber(context).ToLocal(&number_value)) return;
if (!number_value->NumberValue(context).To(&f64_value)) return;
- f32_value = static_cast<float>(f64_value);
+ f32_value = i::DoubleToFloat32(f64_value);
}
global_obj->SetF32(f32_value);
break;
@@ -1347,15 +1347,15 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetAnyRef(Utils::OpenHandle(*value));
break;
}
- case i::wasm::kWasmAnyFunc: {
+ case i::wasm::kWasmFuncRef: {
if (args.Length() < 2) {
// When no inital value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
- global_obj->SetAnyFunc(i_isolate, i_isolate->factory()->null_value());
+ global_obj->SetFuncRef(i_isolate, i_isolate->factory()->null_value());
break;
}
- if (!global_obj->SetAnyFunc(i_isolate, Utils::OpenHandle(*value))) {
+ if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
thrower.TypeError(
"The value of anyfunc globals must be null or an "
"exported function");
@@ -1437,7 +1437,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Decode the function type and construct a signature.
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
- i::wasm::FunctionSig::Builder builder(&zone, parameters_len, results_len);
+ i::wasm::FunctionSig::Builder builder(&zone, results_len, parameters_len);
for (uint32_t i = 0; i < parameters_len; ++i) {
i::wasm::ValueType type;
MaybeLocal<Value> maybe = parameters->Get(context, i);
@@ -1513,13 +1513,12 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Function.type()");
i::wasm::FunctionSig* sig;
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
sig = i::Handle<i::WasmExportedFunction>::cast(arg0)->sig();
} else if (i::WasmJSFunction::IsWasmJSFunction(*arg0)) {
- // TODO(7742): Implement deserialization of signature.
- sig = nullptr;
- UNIMPLEMENTED();
+ sig = i::Handle<i::WasmJSFunction>::cast(arg0)->GetSignature(&zone);
} else {
thrower.TypeError("Argument 0 must be a WebAssembly.Function");
return;
@@ -1686,7 +1685,7 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<String> element;
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
- if (table->type() == i::wasm::ValueType::kWasmAnyFunc) {
+ if (table->type() == i::wasm::ValueType::kWasmFuncRef) {
element = v8_str(isolate, "anyfunc");
} else if (enabled_features.anyref &&
table->type() == i::wasm::ValueType::kWasmAnyRef) {
@@ -1694,7 +1693,6 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
} else {
UNREACHABLE();
}
- // TODO(aseemgarg): update anyfunc to funcref
if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
v8_str(isolate, "element"), element)
.IsJust()) {
@@ -1865,8 +1863,8 @@ void WebAssemblyGlobalGetValueCommon(
return_value.Set(receiver->GetF64());
break;
case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmAnyFunc:
- case i::wasm::kWasmExceptRef:
+ case i::wasm::kWasmFuncRef:
+ case i::wasm::kWasmExnRef:
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
default:
@@ -1925,7 +1923,7 @@ void WebAssemblyGlobalSetValue(
case i::wasm::kWasmF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
- receiver->SetF32(static_cast<float>(f64_value));
+ receiver->SetF32(i::DoubleToFloat32(f64_value));
break;
}
case i::wasm::kWasmF64: {
@@ -1935,12 +1933,12 @@ void WebAssemblyGlobalSetValue(
break;
}
case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmExceptRef: {
+ case i::wasm::kWasmExnRef: {
receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
break;
}
- case i::wasm::kWasmAnyFunc: {
- if (!receiver->SetAnyFunc(i_isolate, Utils::OpenHandle(*args[0]))) {
+ case i::wasm::kWasmFuncRef: {
+ if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
thrower.TypeError(
"value of an anyfunc reference must be either null or an "
"exported function");
@@ -2245,7 +2243,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
if (enabled_features.type_reflection) {
Handle<JSFunction> function_constructor = InstallConstructorFunc(
isolate, webassembly, "Function", WebAssemblyFunction);
- context->set_wasm_function_constructor(*function_constructor);
SetDummyInstanceTemplate(isolate, function_constructor);
JSFunction::EnsureHasInitialMap(function_constructor);
Handle<JSObject> function_proto(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 8633a61504..f203649542 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -40,6 +40,9 @@ bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
*did_retry = true;
if (trial == kAllocationRetries) return false;
// Otherwise, collect garbage and retry.
+ // TODO(wasm): Since reservation limits are engine-wide, we should do an
+ // engine-wide GC here (i.e. trigger a GC in each isolate using the engine,
+ // and wait for them all to finish). See https://crbug.com/v8/9405.
heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index eb253219ad..7dd6b1c7b2 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -16,7 +16,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
namespace v8 {
namespace internal {
@@ -26,18 +26,18 @@ namespace {
// Emit a section code and the size as a padded varint that can be patched
// later.
-size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
+size_t EmitSection(SectionCode code, ZoneBuffer* buffer) {
// Emit the section code.
- buffer.write_u8(code);
+ buffer->write_u8(code);
// Emit a placeholder for the length.
- return buffer.reserve_u32v();
+ return buffer->reserve_u32v();
}
// Patch the size of a section after it's finished.
-void FixupSection(ZoneBuffer& buffer, size_t start) {
- buffer.patch_u32v(start, static_cast<uint32_t>(buffer.offset() - start -
- kPaddedVarInt32Size));
+void FixupSection(ZoneBuffer* buffer, size_t start) {
+ buffer->patch_u32v(start, static_cast<uint32_t>(buffer->offset() - start -
+ kPaddedVarInt32Size));
}
} // namespace
@@ -186,22 +186,22 @@ void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
body_.Truncate(position);
}
-void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
- buffer.write_u32v(signature_index_);
+void WasmFunctionBuilder::WriteSignature(ZoneBuffer* buffer) const {
+ buffer->write_u32v(signature_index_);
}
-void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
+void WasmFunctionBuilder::WriteBody(ZoneBuffer* buffer) const {
size_t locals_size = locals_.Size();
- buffer.write_size(locals_size + body_.size());
- buffer.EnsureSpace(locals_size);
- byte** ptr = buffer.pos_ptr();
+ buffer->write_size(locals_size + body_.size());
+ buffer->EnsureSpace(locals_size);
+ byte** ptr = buffer->pos_ptr();
locals_.Emit(*ptr);
(*ptr) += locals_size; // UGLY: manual bump of position pointer
if (body_.size() > 0) {
- size_t base = buffer.offset();
- buffer.write(body_.begin(), body_.size());
+ size_t base = buffer->offset();
+ buffer->write(body_.begin(), body_.size());
for (DirectCallIndex call : direct_calls_) {
- buffer.patch_u32v(
+ buffer->patch_u32v(
base + call.offset,
call.direct_index +
static_cast<uint32_t>(builder_->function_imports_.size()));
@@ -209,29 +209,29 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
}
}
-void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
+void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const {
if (asm_func_start_source_position_ == 0 && asm_offsets_.size() == 0) {
- buffer.write_size(0);
+ buffer->write_size(0);
return;
}
size_t locals_enc_size = LEBHelper::sizeof_u32v(locals_.Size());
size_t func_start_size =
LEBHelper::sizeof_u32v(asm_func_start_source_position_);
- buffer.write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
+ buffer->write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
// Offset of the recorded byte offsets.
DCHECK_GE(kMaxUInt32, locals_.Size());
- buffer.write_u32v(static_cast<uint32_t>(locals_.Size()));
+ buffer->write_u32v(static_cast<uint32_t>(locals_.Size()));
// Start position of the function.
- buffer.write_u32v(asm_func_start_source_position_);
- buffer.write(asm_offsets_.begin(), asm_offsets_.size());
+ buffer->write_u32v(asm_func_start_source_position_);
+ buffer->write(asm_offsets_.begin(), asm_offsets_.size());
}
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
function_imports_(zone),
- function_exports_(zone),
global_imports_(zone),
+ exports_(zone),
functions_(zone),
data_segments_(zone),
indirect_functions_(zone),
@@ -274,7 +274,10 @@ uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
if (count > FLAG_wasm_max_table_size - index) {
return std::numeric_limits<uint32_t>::max();
}
- indirect_functions_.resize(indirect_functions_.size() + count);
+ DCHECK(max_table_size_ == 0 ||
+ indirect_functions_.size() + count <= max_table_size_);
+ indirect_functions_.resize(indirect_functions_.size() + count,
+ WasmElemSegment::kNullIndex);
return index;
}
@@ -283,15 +286,23 @@ void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
indirect_functions_[indirect] = direct;
}
+void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
+ DCHECK_GE(FLAG_wasm_max_table_size, max);
+ DCHECK_GE(max, indirect_functions_.size());
+ max_table_size_ = max;
+}
+
uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
FunctionSig* sig) {
+ DCHECK(adding_imports_allowed_);
function_imports_.push_back({name, AddSignature(sig)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
- ValueType type) {
- global_imports_.push_back({name, ValueTypes::ValueTypeCodeFor(type)});
+ ValueType type, bool mutability) {
+ global_imports_.push_back(
+ {name, ValueTypes::ValueTypeCodeFor(type), mutability});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -300,14 +311,33 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
}
void WasmModuleBuilder::AddExport(Vector<const char> name,
- WasmFunctionBuilder* function) {
- function_exports_.push_back({name, function->func_index()});
+ ImportExportKindCode kind, uint32_t index) {
+ DCHECK_LE(index, std::numeric_limits<int>::max());
+ exports_.push_back({name, kind, static_cast<int>(index)});
+}
+
+uint32_t WasmModuleBuilder::AddExportedGlobal(ValueType type, bool mutability,
+ const WasmInitExpr& init,
+ Vector<const char> name) {
+ uint32_t index = AddGlobal(type, mutability, init);
+ AddExport(name, kExternalGlobal, index);
+ return index;
+}
+
+void WasmModuleBuilder::ExportImportedFunction(Vector<const char> name,
+ int import_index) {
+#if DEBUG
+ // The size of function_imports_ must not change any more.
+ adding_imports_allowed_ = false;
+#endif
+ exports_.push_back(
+ {name, kExternalFunction,
+ import_index - static_cast<int>(function_imports_.size())});
}
-uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
- bool mutability,
+uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool mutability,
const WasmInitExpr& init) {
- globals_.push_back({type, exported, mutability, init});
+ globals_.push_back({type, mutability, init});
return static_cast<uint32_t>(globals_.size() - 1);
}
@@ -322,25 +352,25 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
-void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
+void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// == Emit magic =============================================================
- buffer.write_u32(kWasmMagic);
- buffer.write_u32(kWasmVersion);
+ buffer->write_u32(kWasmMagic);
+ buffer->write_u32(kWasmVersion);
// == Emit signatures ========================================================
if (signatures_.size() > 0) {
size_t start = EmitSection(kTypeSectionCode, buffer);
- buffer.write_size(signatures_.size());
+ buffer->write_size(signatures_.size());
for (FunctionSig* sig : signatures_) {
- buffer.write_u8(kWasmFunctionTypeCode);
- buffer.write_size(sig->parameter_count());
+ buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(param));
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(param));
}
- buffer.write_size(sig->return_count());
+ buffer->write_size(sig->return_count());
for (auto ret : sig->returns()) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(ret));
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(ret));
}
}
FixupSection(buffer, start);
@@ -349,19 +379,19 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit imports ===========================================================
if (global_imports_.size() + function_imports_.size() > 0) {
size_t start = EmitSection(kImportSectionCode, buffer);
- buffer.write_size(global_imports_.size() + function_imports_.size());
+ buffer->write_size(global_imports_.size() + function_imports_.size());
for (auto import : global_imports_) {
- buffer.write_u32v(0); // module name (length)
- buffer.write_string(import.name); // field name
- buffer.write_u8(kExternalGlobal);
- buffer.write_u8(import.type_code);
- buffer.write_u8(0); // immutable
+ buffer->write_u32v(0); // module name (length)
+ buffer->write_string(import.name); // field name
+ buffer->write_u8(kExternalGlobal);
+ buffer->write_u8(import.type_code);
+ buffer->write_u8(import.mutability ? 1 : 0);
}
for (auto import : function_imports_) {
- buffer.write_u32v(0); // module name (length)
- buffer.write_string(import.name); // field name
- buffer.write_u8(kExternalFunction);
- buffer.write_u32v(import.sig_index);
+ buffer->write_u32v(0); // module name (length)
+ buffer->write_string(import.name); // field name
+ buffer->write_u8(kExternalFunction);
+ buffer->write_u32v(import.sig_index);
}
FixupSection(buffer, start);
}
@@ -370,7 +400,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
uint32_t num_function_names = 0;
if (functions_.size() > 0) {
size_t start = EmitSection(kFunctionSectionCode, buffer);
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
for (auto* function : functions_) {
function->WriteSignature(buffer);
if (!function->name_.empty()) ++num_function_names;
@@ -381,28 +411,31 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit function table ====================================================
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kTableSectionCode, buffer);
- buffer.write_u8(1); // table count
- buffer.write_u8(kLocalAnyFunc);
- buffer.write_u8(kHasMaximumFlag);
- buffer.write_size(indirect_functions_.size());
- buffer.write_size(indirect_functions_.size());
+ buffer->write_u8(1); // table count
+ buffer->write_u8(kLocalFuncRef);
+ buffer->write_u8(kHasMaximumFlag);
+ buffer->write_size(indirect_functions_.size());
+ size_t max =
+ max_table_size_ > 0 ? max_table_size_ : indirect_functions_.size();
+ DCHECK_GE(max, indirect_functions_.size());
+ buffer->write_size(max);
FixupSection(buffer, start);
}
// == emit memory declaration ================================================
{
size_t start = EmitSection(kMemorySectionCode, buffer);
- buffer.write_u8(1); // memory count
+ buffer->write_u8(1); // memory count
if (has_shared_memory_) {
- buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
- : MemoryFlags::kSharedNoMaximum);
+ buffer->write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
+ : MemoryFlags::kSharedNoMaximum);
} else {
- buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
- : MemoryFlags::kNoMaximum);
+ buffer->write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
+ : MemoryFlags::kNoMaximum);
}
- buffer.write_u32v(min_memory_size_);
+ buffer->write_u32v(min_memory_size_);
if (has_max_memory_size_) {
- buffer.write_u32v(max_memory_size_);
+ buffer->write_u32v(max_memory_size_);
}
FixupSection(buffer, start);
}
@@ -410,76 +443,90 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit globals ===========================================================
if (globals_.size() > 0) {
size_t start = EmitSection(kGlobalSectionCode, buffer);
- buffer.write_size(globals_.size());
+ buffer->write_size(globals_.size());
for (auto global : globals_) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(global.type));
- buffer.write_u8(global.mutability ? 1 : 0);
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(global.type));
+ buffer->write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
DCHECK_EQ(kWasmI32, global.type);
- buffer.write_u8(kExprI32Const);
- buffer.write_i32v(global.init.val.i32_const);
+ buffer->write_u8(kExprI32Const);
+ buffer->write_i32v(global.init.val.i32_const);
break;
case WasmInitExpr::kI64Const:
DCHECK_EQ(kWasmI64, global.type);
- buffer.write_u8(kExprI64Const);
- buffer.write_i64v(global.init.val.i64_const);
+ buffer->write_u8(kExprI64Const);
+ buffer->write_i64v(global.init.val.i64_const);
break;
case WasmInitExpr::kF32Const:
DCHECK_EQ(kWasmF32, global.type);
- buffer.write_u8(kExprF32Const);
- buffer.write_f32(global.init.val.f32_const);
+ buffer->write_u8(kExprF32Const);
+ buffer->write_f32(global.init.val.f32_const);
break;
case WasmInitExpr::kF64Const:
DCHECK_EQ(kWasmF64, global.type);
- buffer.write_u8(kExprF64Const);
- buffer.write_f64(global.init.val.f64_const);
+ buffer->write_u8(kExprF64Const);
+ buffer->write_f64(global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex:
- buffer.write_u8(kExprGetGlobal);
- buffer.write_u32v(global.init.val.global_index);
+ buffer->write_u8(kExprGetGlobal);
+ buffer->write_u32v(global.init.val.global_index);
break;
default: {
// No initializer, emit a default value.
switch (global.type) {
case kWasmI32:
- buffer.write_u8(kExprI32Const);
+ buffer->write_u8(kExprI32Const);
// LEB encoding of 0.
- buffer.write_u8(0);
+ buffer->write_u8(0);
break;
case kWasmI64:
- buffer.write_u8(kExprI64Const);
+ buffer->write_u8(kExprI64Const);
// LEB encoding of 0.
- buffer.write_u8(0);
+ buffer->write_u8(0);
break;
case kWasmF32:
- buffer.write_u8(kExprF32Const);
- buffer.write_f32(0.f);
+ buffer->write_u8(kExprF32Const);
+ buffer->write_f32(0.f);
break;
case kWasmF64:
- buffer.write_u8(kExprF64Const);
- buffer.write_f64(0.);
+ buffer->write_u8(kExprF64Const);
+ buffer->write_f64(0.);
break;
default:
UNREACHABLE();
}
}
}
- buffer.write_u8(kExprEnd);
+ buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
// == emit exports ===========================================================
- if (!function_exports_.empty()) {
+ if (exports_.size() > 0) {
size_t start = EmitSection(kExportSectionCode, buffer);
- buffer.write_size(function_exports_.size());
- for (auto function_export : function_exports_) {
- buffer.write_string(function_export.name);
- buffer.write_u8(kExternalFunction);
- buffer.write_size(function_export.function_index +
- function_imports_.size());
+ buffer->write_size(exports_.size());
+ for (auto ex : exports_) {
+ buffer->write_string(ex.name);
+ buffer->write_u8(ex.kind);
+ switch (ex.kind) {
+ case kExternalFunction:
+ buffer->write_size(ex.index + function_imports_.size());
+ break;
+ case kExternalGlobal:
+ buffer->write_size(ex.index + global_imports_.size());
+ break;
+ case kExternalMemory:
+ case kExternalTable:
+ // The WasmModuleBuilder doesn't support importing tables or memories
+ // yet, so there is no index offset to add.
+ buffer->write_size(ex.index);
+ break;
+ case kExternalException:
+ UNREACHABLE();
+ }
}
FixupSection(buffer, start);
}
@@ -487,22 +534,33 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit start function index ==============================================
if (start_function_index_ >= 0) {
size_t start = EmitSection(kStartSectionCode, buffer);
- buffer.write_size(start_function_index_ + function_imports_.size());
+ buffer->write_size(start_function_index_ + function_imports_.size());
FixupSection(buffer, start);
}
// == emit function table elements ===========================================
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer.write_u8(1); // count of entries
- buffer.write_u8(0); // table index
- buffer.write_u8(kExprI32Const); // offset
- buffer.write_u32v(0);
- buffer.write_u8(kExprEnd);
- buffer.write_size(indirect_functions_.size()); // element count
-
- for (auto index : indirect_functions_) {
- buffer.write_size(index + function_imports_.size());
+ buffer->write_u8(1); // count of entries
+ buffer->write_u8(0); // table index
+ uint32_t first_element = 0;
+ while (first_element < indirect_functions_.size() &&
+ indirect_functions_[first_element] == WasmElemSegment::kNullIndex) {
+ first_element++;
+ }
+ uint32_t last_element =
+ static_cast<uint32_t>(indirect_functions_.size() - 1);
+ while (last_element >= first_element &&
+ indirect_functions_[last_element] == WasmElemSegment::kNullIndex) {
+ last_element--;
+ }
+ buffer->write_u8(kExprI32Const); // offset
+ buffer->write_u32v(first_element);
+ buffer->write_u8(kExprEnd);
+ uint32_t element_count = last_element - first_element + 1;
+ buffer->write_size(element_count);
+ for (uint32_t i = first_element; i <= last_element; i++) {
+ buffer->write_size(indirect_functions_[i] + function_imports_.size());
}
FixupSection(buffer, start);
@@ -518,18 +576,18 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
if (emit_compilation_hints) {
// Emit the section code.
- buffer.write_u8(kUnknownSectionCode);
+ buffer->write_u8(kUnknownSectionCode);
// Emit a placeholder for section length.
- size_t start = buffer.reserve_u32v();
+ size_t start = buffer->reserve_u32v();
// Emit custom section name.
- buffer.write_string(CStrVector("compilationHints"));
+ buffer->write_string(CStrVector("compilationHints"));
// Emit hint count.
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
// Emit hint bytes.
for (auto* fn : functions_) {
uint8_t hint_byte =
fn->hint_ != kNoCompilationHint ? fn->hint_ : kDefaultCompilationHint;
- buffer.write_u8(hint_byte);
+ buffer->write_u8(hint_byte);
}
FixupSection(buffer, start);
}
@@ -537,7 +595,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit code ==============================================================
if (functions_.size() > 0) {
size_t start = EmitSection(kCodeSectionCode, buffer);
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
for (auto* function : functions_) {
function->WriteBody(buffer);
}
@@ -547,15 +605,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit data segments =====================================================
if (data_segments_.size() > 0) {
size_t start = EmitSection(kDataSectionCode, buffer);
- buffer.write_size(data_segments_.size());
+ buffer->write_size(data_segments_.size());
for (auto segment : data_segments_) {
- buffer.write_u8(0); // linear memory segment
- buffer.write_u8(kExprI32Const); // initializer expression for dest
- buffer.write_u32v(segment.dest);
- buffer.write_u8(kExprEnd);
- buffer.write_u32v(static_cast<uint32_t>(segment.data.size()));
- buffer.write(&segment.data[0], segment.data.size());
+ buffer->write_u8(0); // linear memory segment
+ buffer->write_u8(kExprI32Const); // initializer expression for dest
+ buffer->write_u32v(segment.dest);
+ buffer->write_u8(kExprEnd);
+ buffer->write_u32v(static_cast<uint32_t>(segment.data.size()));
+ buffer->write(&segment.data[0], segment.data.size());
}
FixupSection(buffer, start);
}
@@ -563,33 +621,33 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit names =============================================================
if (num_function_names > 0 || !function_imports_.empty()) {
// Emit the section code.
- buffer.write_u8(kUnknownSectionCode);
+ buffer->write_u8(kUnknownSectionCode);
// Emit a placeholder for the length.
- size_t start = buffer.reserve_u32v();
+ size_t start = buffer->reserve_u32v();
// Emit the section string.
- buffer.write_string(CStrVector("name"));
+ buffer->write_string(CStrVector("name"));
// Emit a subsection for the function names.
- buffer.write_u8(NameSectionKindCode::kFunction);
+ buffer->write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
- size_t functions_start = buffer.reserve_u32v();
+ size_t functions_start = buffer->reserve_u32v();
// Emit the function names.
// Imports are always named.
uint32_t num_imports = static_cast<uint32_t>(function_imports_.size());
- buffer.write_size(num_imports + num_function_names);
+ buffer->write_size(num_imports + num_function_names);
uint32_t function_index = 0;
for (; function_index < num_imports; ++function_index) {
const WasmFunctionImport* import = &function_imports_[function_index];
DCHECK(!import->name.empty());
- buffer.write_u32v(function_index);
- buffer.write_string(import->name);
+ buffer->write_u32v(function_index);
+ buffer->write_string(import->name);
}
if (num_function_names > 0) {
for (auto* function : functions_) {
DCHECK_EQ(function_index,
function->func_index() + function_imports_.size());
if (!function->name_.empty()) {
- buffer.write_u32v(function_index);
- buffer.write_string(function->name_);
+ buffer->write_u32v(function_index);
+ buffer->write_string(function->name_);
}
++function_index;
}
@@ -599,15 +657,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
}
-void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
+void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer* buffer) const {
// == Emit asm.js offset table ===============================================
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
// Emit the offset table per function.
for (auto* function : functions_) {
function->WriteAsmWasmOffsetTable(buffer);
}
// Append a 0 to indicate that this is an encoded table.
- buffer.write_u8(0);
+ buffer->write_u8(0);
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 750dafa227..9e6a8933e2 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -8,7 +8,7 @@
#include "src/codegen/signature.h"
#include "src/zone/zone-containers.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
@@ -36,19 +36,19 @@ class ZoneBuffer : public ZoneObject {
void write_u16(uint16_t x) {
EnsureSpace(2);
- WriteLittleEndianValue<uint16_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint16_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 2;
}
void write_u32(uint32_t x) {
EnsureSpace(4);
- WriteLittleEndianValue<uint32_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint32_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 4;
}
void write_u64(uint64_t x) {
EnsureSpace(8);
- WriteLittleEndianValue<uint64_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint64_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 8;
}
@@ -187,9 +187,9 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
}
void DeleteCodeAfter(size_t position);
- void WriteSignature(ZoneBuffer& buffer) const;
- void WriteBody(ZoneBuffer& buffer) const;
- void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
+ void WriteSignature(ZoneBuffer* buffer) const;
+ void WriteBody(ZoneBuffer* buffer) const;
+ void WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const;
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
@@ -231,22 +231,34 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Building methods.
uint32_t AddImport(Vector<const char> name, FunctionSig* sig);
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
- uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
+ uint32_t AddGlobal(ValueType type, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
- uint32_t AddGlobalImport(Vector<const char> name, ValueType type);
+ uint32_t AddGlobalImport(Vector<const char> name, ValueType type,
+ bool mutability);
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
+ // In the current implementation, it's supported to have uninitialized slots
+ // at the beginning and/or end of the indirect function table, as long as
+ // the filled slots form a contiguous block in the middle.
uint32_t AllocateIndirectFunctions(uint32_t count);
void SetIndirectFunction(uint32_t indirect, uint32_t direct);
+ void SetMaxTableSize(uint32_t max);
void MarkStartFunction(WasmFunctionBuilder* builder);
- void AddExport(Vector<const char> name, WasmFunctionBuilder* builder);
+ void AddExport(Vector<const char> name, ImportExportKindCode kind,
+ uint32_t index);
+ void AddExport(Vector<const char> name, WasmFunctionBuilder* builder) {
+ AddExport(name, kExternalFunction, builder->func_index());
+ }
+ uint32_t AddExportedGlobal(ValueType type, bool mutability,
+ const WasmInitExpr& init, Vector<const char> name);
+ void ExportImportedFunction(Vector<const char> name, int import_index);
void SetMinMemorySize(uint32_t value);
void SetMaxMemorySize(uint32_t value);
void SetHasSharedMemory();
// Writing methods.
- void WriteTo(ZoneBuffer& buffer) const;
- void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
+ void WriteTo(ZoneBuffer* buffer) const;
+ void WriteAsmJsOffsetTable(ZoneBuffer* buffer) const;
Zone* zone() { return zone_; }
@@ -258,19 +270,20 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t sig_index;
};
- struct WasmFunctionExport {
+ struct WasmGlobalImport {
Vector<const char> name;
- uint32_t function_index;
+ ValueTypeCode type_code;
+ bool mutability;
};
- struct WasmGlobalImport {
+ struct WasmExport {
Vector<const char> name;
- ValueTypeCode type_code;
+ ImportExportKindCode kind;
+ int index; // Can be negative for re-exported imports.
};
struct WasmGlobal {
ValueType type;
- bool exported;
bool mutability;
WasmInitExpr init;
};
@@ -284,18 +297,23 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
ZoneVector<WasmFunctionImport> function_imports_;
- ZoneVector<WasmFunctionExport> function_exports_;
ZoneVector<WasmGlobalImport> global_imports_;
+ ZoneVector<WasmExport> exports_;
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
ZoneVector<WasmGlobal> globals_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
+ uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
bool has_shared_memory_;
+#if DEBUG
+ // Once AddExportedImport is called, no more imports can be added.
+ bool adding_imports_allowed_ = true;
+#endif
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index eb40c51dd3..7dea208d8e 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -164,7 +164,11 @@ struct WasmCompilationHint {
WasmCompilationHintTier top_tier;
};
-enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+enum ModuleOrigin : uint8_t {
+ kWasmOrigin,
+ kAsmJsSloppyOrigin,
+ kAsmJsStrictOrigin
+};
#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
((origin) == kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
@@ -221,6 +225,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
void AddFunctionNameForTesting(int function_index, WireBytesRef name);
};
+inline bool is_asmjs_module(const WasmModule* module) {
+ return module->origin != kWasmOrigin;
+}
+
size_t EstimateStoredSize(const WasmModule* module);
// Returns the number of possible export wrappers for a given module.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index e1fc2d2410..7a80b7ea2b 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -7,7 +7,7 @@
#include "src/wasm/wasm-objects.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/contexts-inl.h"
#include "src/objects/foreign-inl.h"
@@ -51,10 +51,11 @@ CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
CAST_ACCESSOR(AsmWasmData)
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(*this, offset).IsUndefined(); \
- } \
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ DEF_GETTER(holder, has_##name, bool) { \
+ Object value = TaggedField<Object, offset>::load(isolate, *this); \
+ return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
+ } \
ACCESSORS(holder, name, type, offset)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
@@ -65,7 +66,7 @@ CAST_ACCESSOR(AsmWasmData)
/* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
/* way of accessing them in order to avoid undefined behavior in C++ */ \
/* code. */ \
- return ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
+ return base::ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
} else { \
return *reinterpret_cast<type const*>(FIELD_ADDR(*this, offset)); \
} \
@@ -77,7 +78,7 @@ CAST_ACCESSOR(AsmWasmData)
/* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
/* way of accessing them in order to avoid undefined behavior in C++ */ \
/* code. */ \
- WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
+ base::WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
} else { \
*reinterpret_cast<type*>(FIELD_ADDR(*this, offset)) = value; \
} \
@@ -110,7 +111,7 @@ void WasmModuleObject::reset_breakpoint_infos() {
GetReadOnlyRoots().undefined_value());
}
bool WasmModuleObject::is_asm_js() {
- bool asm_js = module()->origin == wasm::kAsmJsOrigin;
+ bool asm_js = is_asmjs_module(module());
DCHECK_EQ(asm_js, script().IsUserJavaScript());
DCHECK_EQ(asm_js, has_asm_js_offset_table());
return asm_js;
@@ -148,53 +149,54 @@ Address WasmGlobalObject::address() const {
}
int32_t WasmGlobalObject::GetI32() {
- return ReadLittleEndianValue<int32_t>(address());
+ return base::ReadLittleEndianValue<int32_t>(address());
}
int64_t WasmGlobalObject::GetI64() {
- return ReadLittleEndianValue<int64_t>(address());
+ return base::ReadLittleEndianValue<int64_t>(address());
}
float WasmGlobalObject::GetF32() {
- return ReadLittleEndianValue<float>(address());
+ return base::ReadLittleEndianValue<float>(address());
}
double WasmGlobalObject::GetF64() {
- return ReadLittleEndianValue<double>(address());
+ return base::ReadLittleEndianValue<double>(address());
}
Handle<Object> WasmGlobalObject::GetRef() {
- // We use this getter for anyref, anyfunc, and except_ref.
+ // We use this getter for anyref, funcref, and exnref.
DCHECK(wasm::ValueTypes::IsReferenceType(type()));
return handle(tagged_buffer().get(offset()), GetIsolate());
}
void WasmGlobalObject::SetI32(int32_t value) {
- WriteLittleEndianValue<int32_t>(address(), value);
+ base::WriteLittleEndianValue<int32_t>(address(), value);
}
void WasmGlobalObject::SetI64(int64_t value) {
- WriteLittleEndianValue<int64_t>(address(), value);
+ base::WriteLittleEndianValue<int64_t>(address(), value);
}
void WasmGlobalObject::SetF32(float value) {
- WriteLittleEndianValue<float>(address(), value);
+ base::WriteLittleEndianValue<float>(address(), value);
}
void WasmGlobalObject::SetF64(double value) {
- WriteLittleEndianValue<double>(address(), value);
+ base::WriteLittleEndianValue<double>(address(), value);
}
void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
- // We use this getter anyref and except_ref.
- DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExceptRef);
+ // We use this getter anyref and exnref.
+ DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExnRef);
tagged_buffer().set(offset(), *value);
}
-bool WasmGlobalObject::SetAnyFunc(Isolate* isolate, Handle<Object> value) {
- DCHECK_EQ(type(), wasm::kWasmAnyFunc);
+bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
+ DCHECK_EQ(type(), wasm::kWasmFuncRef);
if (!value->IsNull(isolate) &&
- !WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ !WasmExportedFunction::IsWasmExportedFunction(*value) &&
+ !WasmCapiFunction::IsWasmCapiFunction(*value)) {
return false;
}
tagged_buffer().set(offset(), *value);
@@ -249,6 +251,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_tables, FixedArray,
+ kIndirectFunctionTablesOffset)
ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
kImportedFunctionRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
@@ -257,16 +261,10 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
kExceptionsTableOffset)
-ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
-ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_exported_functions, FixedArray,
kWasmExportedFunctionsOffset)
-inline bool WasmInstanceObject::has_indirect_function_table() {
- return indirect_function_table_sig_ids() != nullptr;
-}
-
void WasmInstanceObject::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@@ -276,10 +274,29 @@ void WasmInstanceObject::clear_padding() {
}
IndirectFunctionTableEntry::IndirectFunctionTableEntry(
- Handle<WasmInstanceObject> instance, int index)
- : instance_(instance), index_(index) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, instance->indirect_function_table_size());
+ Handle<WasmInstanceObject> instance, int table_index, int entry_index)
+ : instance_(table_index == 0 ? instance
+ : Handle<WasmInstanceObject>::null()),
+ table_(table_index != 0
+ ? handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(
+ table_index)),
+ instance->GetIsolate())
+ : Handle<WasmIndirectFunctionTable>::null()),
+ index_(entry_index) {
+ DCHECK_GE(entry_index, 0);
+ DCHECK_LT(entry_index, table_index == 0
+ ? instance->indirect_function_table_size()
+ : table_->size());
+}
+
+IndirectFunctionTableEntry::IndirectFunctionTableEntry(
+ Handle<WasmIndirectFunctionTable> table, int entry_index)
+ : instance_(Handle<WasmInstanceObject>::null()),
+ table_(table),
+ index_(entry_index) {
+ DCHECK_GE(entry_index, 0);
+ DCHECK_LT(entry_index, table_->size());
}
ImportedFunctionEntry::ImportedFunctionEntry(
@@ -307,6 +324,10 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
+ACCESSORS(WasmExportedFunctionData, wasm_call_target, Smi,
+ kWasmCallTargetOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
// WasmJSFunction
WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
@@ -317,6 +338,13 @@ CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, Struct)
CAST_ACCESSOR(WasmJSFunctionData)
+SMI_ACCESSORS(WasmJSFunctionData, serialized_return_count,
+ kSerializedReturnCountOffset)
+SMI_ACCESSORS(WasmJSFunctionData, serialized_parameter_count,
+ kSerializedParameterCountOffset)
+ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
+ kSerializedSignatureOffset)
+ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
// WasmCapiFunction
@@ -336,6 +364,18 @@ ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
+// WasmIndirectFunctionTable
+OBJECT_CONSTRUCTORS_IMPL(WasmIndirectFunctionTable, Struct)
+CAST_ACCESSOR(WasmIndirectFunctionTable)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, size, uint32_t, kSizeOffset)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, sig_ids, uint32_t*,
+ kSigIdsOffset)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, targets, Address*,
+ kTargetsOffset)
+OPTIONAL_ACCESSORS(WasmIndirectFunctionTable, managed_native_allocations,
+ Foreign, kManagedNativeAllocationsOffset)
+ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
+
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 27a56695c2..f44f8326ad 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -139,7 +139,9 @@ class WasmInstanceNativeAllocations {
instance->set_indirect_function_table_refs(*new_refs);
for (uint32_t j = old_size; j < new_size; j++) {
- IndirectFunctionTableEntry(instance, static_cast<int>(j)).clear();
+ // {WasmInstanceNativeAllocations} only manages the memory of table 0.
+ // Therefore we pass the {table_index} as a constant here.
+ IndirectFunctionTableEntry(instance, 0, static_cast<int>(j)).clear();
}
}
uint32_t* indirect_function_table_sig_ids_ = nullptr;
@@ -509,7 +511,7 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
Isolate* isolate = module_object->GetIsolate();
const WasmModule* module = module_object->module();
- if (module->origin != wasm::kAsmJsOrigin) {
+ if (module->origin == wasm::kWasmOrigin) {
// for non-asm.js modules, we just add the function's start offset
// to make a module-relative position.
return byte_offset + module_object->GetFunctionOffset(func_index);
@@ -789,19 +791,21 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
backing_store->set(i, null);
}
+ Handle<Object> max;
+ if (has_maximum) {
+ max = isolate->factory()->NewNumberFromUint(maximum);
+ } else {
+ max = isolate->factory()->undefined_value();
+ }
+
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor(), isolate);
auto table_obj = Handle<WasmTableObject>::cast(
isolate->factory()->NewJSObject(table_ctor));
+ DisallowHeapAllocation no_gc;
table_obj->set_raw_type(static_cast<int>(type));
table_obj->set_entries(*backing_store);
- Handle<Object> max;
- if (has_maximum) {
- max = isolate->factory()->NewNumberFromUint(maximum);
- } else {
- max = isolate->factory()->undefined_value();
- }
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -865,15 +869,14 @@ int WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- continue;
- }
- // For Table 0 we have to update the indirect function table.
+
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- DCHECK_EQ(old_size, instance->indirect_function_table_size());
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(instance,
- new_size);
+
+ DCHECK_EQ(old_size, WasmInstanceObject::IndirectFunctionTableSize(
+ isolate, instance, table_index));
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, new_size);
}
for (uint32_t entry = old_size; entry < new_size; ++entry) {
@@ -895,10 +898,11 @@ bool WasmTableObject::IsValidElement(Isolate* isolate,
Handle<Object> entry) {
// Anyref tables take everything.
if (table->type() == wasm::kWasmAnyRef) return true;
- // Anyfunc tables can store {null} or {WasmExportedFunction} or
- // {WasmCapiFunction} objects.
+ // FuncRef tables can store {null}, {WasmExportedFunction}, {WasmJSFunction},
+ // or {WasmCapiFunction} objects.
if (entry->IsNull(isolate)) return true;
return WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmJSFunction::IsWasmJSFunction(*entry) ||
WasmCapiFunction::IsWasmCapiFunction(*entry);
}
@@ -932,6 +936,9 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
DCHECK_NOT_NULL(wasm_function->sig);
UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
target_instance, func_index);
+ } else if (WasmJSFunction::IsWasmJSFunction(*entry)) {
+ UpdateDispatchTables(isolate, table, entry_index,
+ Handle<WasmJSFunction>::cast(entry));
} else {
DCHECK(WasmCapiFunction::IsWasmCapiFunction(*entry));
UpdateDispatchTables(isolate, table, entry_index,
@@ -955,7 +962,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
// First we handle the easy anyref table case.
if (table->type() == wasm::kWasmAnyRef) return entry;
- // Now we handle the anyfunc case.
+ // Now we handle the funcref case.
if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
WasmCapiFunction::IsWasmCapiFunction(*entry)) {
return entry;
@@ -1005,11 +1012,6 @@ void WasmTableObject::UpdateDispatchTables(
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- // TODO(ahaas): Introduce dispatch tables for the other tables as well.
- continue;
- }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1017,11 +1019,33 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(*sig);
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, target_instance, target_func_index);
}
}
+void WasmTableObject::UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
+ Handle<WasmJSFunction> function) {
+ // We simply need to update the IFTs for each instance that imports
+ // this table.
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset)),
+ isolate);
+ WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ isolate, instance, table_index, entry_index, function);
+ }
+}
+
void WasmTableObject::UpdateDispatchTables(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
Handle<WasmCapiFunction> capi_function) {
@@ -1052,11 +1076,6 @@ void WasmTableObject::UpdateDispatchTables(
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- // TODO(ahaas): Introduce dispatch tables for the other tables as well.
- continue;
- }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1077,7 +1096,7 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(sig);
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, wasm_code->instruction_start(), *tuple);
}
}
@@ -1091,16 +1110,13 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- continue;
- }
Handle<WasmInstanceObject> target_instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
- DCHECK_LT(index, target_instance->indirect_function_table_size());
- IndirectFunctionTableEntry(target_instance, index).clear();
+ DCHECK_LT(index, WasmInstanceObject::IndirectFunctionTableSize(
+ isolate, target_instance, table_index));
+ IndirectFunctionTableEntry(target_instance, table_index, index).clear();
}
}
@@ -1118,8 +1134,8 @@ void WasmTableObject::SetFunctionTablePlaceholder(
void WasmTableObject::GetFunctionTableEntry(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
- int* function_index) {
- DCHECK_EQ(table->type(), wasm::kWasmAnyFunc);
+ int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function) {
+ DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
DCHECK_LT(entry_index, table->entries().length());
// We initialize {is_valid} with {true}. We may change it later.
*is_valid = true;
@@ -1132,17 +1148,91 @@ void WasmTableObject::GetFunctionTableEntry(
auto target_func = Handle<WasmExportedFunction>::cast(element);
*instance = handle(target_func->instance(), isolate);
*function_index = target_func->function_index();
+ *maybe_js_function = MaybeHandle<WasmJSFunction>();
+ return;
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*element)) {
+ *instance = MaybeHandle<WasmInstanceObject>();
+ *maybe_js_function = Handle<WasmJSFunction>::cast(element);
return;
- } else if (element->IsTuple2()) {
+ }
+ if (element->IsTuple2()) {
auto tuple = Handle<Tuple2>::cast(element);
*instance = handle(WasmInstanceObject::cast(tuple->value1()), isolate);
*function_index = Smi::cast(tuple->value2()).value();
+ *maybe_js_function = MaybeHandle<WasmJSFunction>();
return;
}
*is_valid = false;
}
namespace {
+class IftNativeAllocations {
+ public:
+ IftNativeAllocations(Handle<WasmIndirectFunctionTable> table, uint32_t size)
+ : sig_ids_(size), targets_(size) {
+ table->set_sig_ids(sig_ids_.data());
+ table->set_targets(targets_.data());
+ }
+
+ static size_t SizeInMemory(uint32_t size) {
+ return size * (sizeof(Address) + sizeof(uint32_t));
+ }
+
+ void resize(Handle<WasmIndirectFunctionTable> table, uint32_t new_size) {
+ DCHECK_GE(new_size, sig_ids_.size());
+ DCHECK_EQ(this, Managed<IftNativeAllocations>::cast(
+ table->managed_native_allocations())
+ .raw());
+ sig_ids_.resize(new_size);
+ targets_.resize(new_size);
+ table->set_sig_ids(sig_ids_.data());
+ table->set_targets(targets_.data());
+ }
+
+ private:
+ std::vector<uint32_t> sig_ids_;
+ std::vector<Address> targets_;
+};
+} // namespace
+
+Handle<WasmIndirectFunctionTable> WasmIndirectFunctionTable::New(
+ Isolate* isolate, uint32_t size) {
+ auto refs = isolate->factory()->NewFixedArray(static_cast<int>(size));
+ auto table = Handle<WasmIndirectFunctionTable>::cast(
+ isolate->factory()->NewStruct(WASM_INDIRECT_FUNCTION_TABLE_TYPE));
+ table->set_size(size);
+ table->set_refs(*refs);
+ auto native_allocations = Managed<IftNativeAllocations>::Allocate(
+ isolate, IftNativeAllocations::SizeInMemory(size), table, size);
+ table->set_managed_native_allocations(*native_allocations);
+ for (uint32_t i = 0; i < size; ++i) {
+ IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ }
+ return table;
+}
+
+void WasmIndirectFunctionTable::Resize(Isolate* isolate,
+ Handle<WasmIndirectFunctionTable> table,
+ uint32_t new_size) {
+ uint32_t old_size = table->size();
+ if (old_size >= new_size) return; // Nothing to do.
+
+ Managed<IftNativeAllocations>::cast(table->managed_native_allocations())
+ .raw()
+ ->resize(table, new_size);
+
+ Handle<FixedArray> old_refs(table->refs(), isolate);
+ Handle<FixedArray> new_refs = isolate->factory()->CopyFixedArrayAndGrow(
+ old_refs, static_cast<int>(new_size - old_size));
+ table->set_refs(*new_refs);
+ table->set_size(new_size);
+ for (uint32_t i = old_size; i < new_size; ++i) {
+ IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ }
+}
+
+namespace {
bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
if (new_size > old_buffer->allocation_length()) return false;
@@ -1380,6 +1470,15 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
isolate->native_context()->wasm_global_constructor(), isolate);
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
+ {
+ // Disallow GC until all fields have acceptable types.
+ DisallowHeapAllocation no_gc;
+
+ global_obj->set_flags(0);
+ global_obj->set_type(type);
+ global_obj->set_offset(offset);
+ global_obj->set_is_mutable(is_mutable);
+ }
if (wasm::ValueTypes::IsReferenceType(type)) {
DCHECK(maybe_untagged_buffer.is_null());
@@ -1412,19 +1511,24 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_untagged_buffer(*untagged_buffer);
}
- global_obj->set_flags(0);
- global_obj->set_type(type);
- global_obj->set_offset(offset);
- global_obj->set_is_mutable(is_mutable);
return global_obj;
}
void IndirectFunctionTableEntry::clear() {
- instance_->indirect_function_table_sig_ids()[index_] = -1;
- instance_->indirect_function_table_targets()[index_] = 0;
- instance_->indirect_function_table_refs().set(
- index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
+ if (!instance_.is_null()) {
+ instance_->indirect_function_table_sig_ids()[index_] = -1;
+ instance_->indirect_function_table_targets()[index_] = 0;
+ instance_->indirect_function_table_refs().set(
+ index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
+ } else {
+ DCHECK(!table_.is_null());
+ table_->sig_ids()[index_] = -1;
+ table_->targets()[index_] = 0;
+ table_->refs().set(
+ index_,
+ ReadOnlyRoots(GetIsolateFromWritableObject(*table_)).undefined_value());
+ }
}
void IndirectFunctionTableEntry::Set(int sig_id,
@@ -1455,31 +1559,34 @@ void IndirectFunctionTableEntry::Set(int sig_id,
void IndirectFunctionTableEntry::Set(int sig_id, Address call_target,
Object ref) {
- instance_->indirect_function_table_sig_ids()[index_] = sig_id;
- instance_->indirect_function_table_targets()[index_] = call_target;
- instance_->indirect_function_table_refs().set(index_, ref);
-}
-
-Object IndirectFunctionTableEntry::object_ref() {
- return instance_->indirect_function_table_refs().get(index_);
+ if (!instance_.is_null()) {
+ instance_->indirect_function_table_sig_ids()[index_] = sig_id;
+ instance_->indirect_function_table_targets()[index_] = call_target;
+ instance_->indirect_function_table_refs().set(index_, ref);
+ } else {
+ DCHECK(!table_.is_null());
+ table_->sig_ids()[index_] = sig_id;
+ table_->targets()[index_] = call_target;
+ table_->refs().set(index_, ref);
+ }
}
-int IndirectFunctionTableEntry::sig_id() {
- return instance_->indirect_function_table_sig_ids()[index_];
+Object IndirectFunctionTableEntry::object_ref() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_refs().get(index_)
+ : table_->refs().get(index_);
}
-Address IndirectFunctionTableEntry::target() {
- return instance_->indirect_function_table_targets()[index_];
+int IndirectFunctionTableEntry::sig_id() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_sig_ids()[index_]
+ : table_->sig_ids()[index_];
}
-void IndirectFunctionTableEntry::CopyFrom(
- const IndirectFunctionTableEntry& that) {
- instance_->indirect_function_table_sig_ids()[index_] =
- that.instance_->indirect_function_table_sig_ids()[that.index_];
- instance_->indirect_function_table_targets()[index_] =
- that.instance_->indirect_function_table_targets()[that.index_];
- instance_->indirect_function_table_refs().set(
- index_, that.instance_->indirect_function_table_refs().get(that.index_));
+Address IndirectFunctionTableEntry::target() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_targets()[index_]
+ : table_->targets()[index_];
}
void ImportedFunctionEntry::SetWasmToJs(
@@ -1535,11 +1642,21 @@ constexpr uint16_t WasmInstanceObject::kTaggedFieldOffsets[];
// static
bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- Handle<WasmInstanceObject> instance, uint32_t minimum_size) {
+ Handle<WasmInstanceObject> instance, int table_index,
+ uint32_t minimum_size) {
+ Isolate* isolate = instance->GetIsolate();
+ if (table_index > 0) {
+ DCHECK_LT(table_index, instance->indirect_function_tables().length());
+ auto table =
+ handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index)),
+ isolate);
+ WasmIndirectFunctionTable::Resize(isolate, table, minimum_size);
+ return true;
+ }
uint32_t old_size = instance->indirect_function_table_size();
if (old_size >= minimum_size) return false; // Nothing to do.
- Isolate* isolate = instance->GetIsolate();
HandleScope scope(isolate);
auto native_allocations = GetNativeAllocations(*instance);
native_allocations->resize_indirect_function_table(isolate, instance,
@@ -1624,8 +1741,6 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_indirect_function_table_targets(nullptr);
instance->set_native_context(*isolate->native_context());
instance->set_module_object(*module_object);
- instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value());
- instance->set_null_value(ReadOnlyRoots(isolate).null_value());
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
@@ -1695,83 +1810,55 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
return native_module->GetCallTargetForFunction(func_index);
}
-namespace {
-void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
- uint32_t src, uint32_t count, bool copy_backward) {
- DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
- if (copy_backward) {
- for (uint32_t i = count; i > 0; i--) {
- auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
- auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
- to_entry.CopyFrom(from_entry);
- }
- } else {
- for (uint32_t i = 0; i < count; i++) {
- auto to_entry = IndirectFunctionTableEntry(instance, dst + i);
- auto from_entry = IndirectFunctionTableEntry(instance, src + i);
- to_entry.CopyFrom(from_entry);
- }
+int WasmInstanceObject::IndirectFunctionTableSize(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index) {
+ if (table_index == 0) {
+ return instance->indirect_function_table_size();
}
+ auto table =
+ handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index)),
+ isolate);
+ return table->size();
}
-} // namespace
// static
bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_src_index,
uint32_t table_dst_index,
+ uint32_t table_src_index,
uint32_t dst, uint32_t src,
uint32_t count) {
- if (static_cast<int>(table_dst_index) >= instance->tables().length()) {
- return false;
- }
- if (static_cast<int>(table_src_index) >= instance->tables().length()) {
- return false;
- }
-
- // TODO(titzer): multiple tables in TableCopy
- CHECK_EQ(0, table_src_index);
- CHECK_EQ(0, table_dst_index);
- auto max = instance->indirect_function_table_size();
- bool copy_backward = src < dst && dst - src < count;
- bool ok = ClampToBounds(dst, &count, max);
+ // Copying 0 elements is a no-op.
+ if (count == 0) return true;
+ CHECK_LT(table_dst_index, instance->tables().length());
+ CHECK_LT(table_src_index, instance->tables().length());
+ auto table_dst = handle(
+ WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
+ auto table_src = handle(
+ WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
+ uint32_t max_dst = static_cast<uint32_t>(table_dst->entries().length());
+ uint32_t max_src = static_cast<uint32_t>(table_src->entries().length());
+ bool copy_backward = src < dst;
+ bool ok = ClampToBounds(dst, &count, max_dst);
// Use & instead of && so the clamp is not short-circuited.
- ok &= ClampToBounds(src, &count, max);
+ ok &= ClampToBounds(src, &count, max_src);
// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;
- if (dst == src || count == 0) return ok; // no-op
-
- // TODO(titzer): multiple tables in TableCopy
- auto table = handle(
- WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
- // Broadcast table copy operation to all instances that import this table.
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
- for (int i = 0; i < dispatch_tables->length();
- i += kDispatchTableNumElements) {
- Handle<WasmInstanceObject> target_instance(
- WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset)),
- isolate);
- CopyTableEntriesImpl(target_instance, dst, src, count, copy_backward);
+ // no-op
+ if ((dst == src && table_dst_index == table_src_index) || count == 0) {
+ return ok;
}
- // Copy the function entries.
- auto dst_table = handle(
- WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
- auto src_table = handle(
- WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
- if (copy_backward) {
- for (uint32_t i = count; i > 0; i--) {
- dst_table->entries().set(dst + i - 1,
- src_table->entries().get(src + i - 1));
- }
- } else {
- for (uint32_t i = 0; i < count; i++) {
- dst_table->entries().set(dst + i, src_table->entries().get(src + i));
- }
+ for (uint32_t i = 0; i < count; ++i) {
+ uint32_t src_index = copy_backward ? (src + count - i - 1) : src + i;
+ uint32_t dst_index = copy_backward ? (dst + count - i - 1) : dst + i;
+ auto value = WasmTableObject::Get(isolate, table_src, src_index);
+ WasmTableObject::Set(isolate, table_dst, dst_index, value);
}
return ok;
}
@@ -1782,6 +1869,8 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
uint32_t table_index,
uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
+ // Copying 0 elements is a no-op.
+ if (count == 0) return true;
// Note that this implementation just calls through to module instantiation.
// This is intentional, so that the runtime only depends on the object
// methods, and not the module instantiation logic.
@@ -1830,9 +1919,8 @@ WasmInstanceObject::GetOrCreateWasmExportedFunction(
// The wrapper may not exist yet if no function in the exports section has
// this signature. We compile it and store the wrapper in the module for
// later use.
- wrapper = compiler::CompileJSToWasmWrapper(isolate, function.sig,
- function.imported)
- .ToHandleChecked();
+ wrapper = wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate, function.sig, function.imported);
module_object->export_wrappers().set(wrapper_index, *wrapper);
}
result = WasmExportedFunction::New(
@@ -1861,6 +1949,55 @@ void WasmInstanceObject::SetWasmExportedFunction(
}
// static
+void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int table_index,
+ int entry_index, Handle<WasmJSFunction> js_function) {
+ // Deserialize the signature encapsulated with the {WasmJSFunction}.
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ wasm::FunctionSig* sig = js_function->GetSignature(&zone);
+ auto sig_id = instance->module()->signature_map.Find(*sig);
+
+ // Compile a wrapper for the target callable.
+ Handle<JSReceiver> callable(js_function->GetCallable(), isolate);
+ wasm::WasmCodeRefScope code_ref_scope;
+ Address call_target = kNullAddress;
+ if (sig_id >= 0) {
+ wasm::NativeModule* native_module =
+ instance->module_object().native_module();
+ // TODO(mstarzinger): Cache and reuse wrapper code.
+ const wasm::WasmFeatures enabled = native_module->enabled_features();
+ auto resolved =
+ compiler::ResolveWasmImportCall(callable, sig, enabled.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
+ callable = resolved.second; // Update to ultimate target.
+ DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
+ wasm::CompilationEnv env = native_module->CreateCompilationEnv();
+ wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ isolate->wasm_engine(), &env, kind, sig, false);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), GetCodeKind(result),
+ wasm::ExecutionTier::kNone);
+ wasm::WasmCode* published_code =
+ native_module->PublishCode(std::move(wasm_code));
+ isolate->counters()->wasm_generated_code_size()->Increment(
+ published_code->instructions().length());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ published_code->reloc_info().length());
+ call_target = published_code->instruction_start();
+ }
+
+ // Update the dispatch table.
+ Handle<Tuple2> tuple =
+ isolate->factory()->NewTuple2(instance, callable, AllocationType::kOld);
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
+ .Set(sig_id, call_target, *tuple);
+}
+
+// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag) {
@@ -2013,8 +2150,8 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
encoded_size += 8;
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
encoded_size += 1;
break;
default:
@@ -2080,10 +2217,10 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
int num_imported_functions = instance->module()->num_imported_functions;
int jump_table_offset = -1;
if (func_index >= num_imported_functions) {
- ptrdiff_t jump_table_diff =
- instance->module_object().native_module()->jump_table_offset(
+ uint32_t jump_table_diff =
+ instance->module_object().native_module()->GetJumpTableOffset(
func_index);
- DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX);
+ DCHECK_GE(kMaxInt, jump_table_diff);
jump_table_offset = static_cast<int>(jump_table_diff);
}
Handle<WasmExportedFunctionData> function_data =
@@ -2093,9 +2230,13 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+ function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
+ function_data->set_packed_args_size(0);
MaybeHandle<String> maybe_name;
- if (instance->module()->origin == wasm::kAsmJsOrigin) {
+ bool is_asm_js_module = instance->module_object().is_asm_js();
+ if (is_asm_js_module) {
// We can use the function name only for asm.js. For WebAssembly, the
// function name is specified as the function_index.toString().
maybe_name = WasmModuleObject::GetFunctionNameOrNull(
@@ -2110,10 +2251,18 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
}
- bool is_asm_js_module = instance->module_object().is_asm_js();
- Handle<Map> function_map = is_asm_js_module
- ? isolate->sloppy_function_map()
- : isolate->wasm_exported_function_map();
+ Handle<Map> function_map;
+ switch (instance->module()->origin) {
+ case wasm::kWasmOrigin:
+ function_map = isolate->wasm_exported_function_map();
+ break;
+ case wasm::kAsmJsSloppyOrigin:
+ function_map = isolate->sloppy_function_map();
+ break;
+ case wasm::kAsmJsStrictOrigin:
+ function_map = isolate->strict_function_map();
+ break;
+ }
NewFunctionArgs args =
NewFunctionArgs::ForWasm(name, function_data, function_map);
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
@@ -2143,9 +2292,22 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int return_count = static_cast<int>(sig->return_count());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ Handle<PodArray<wasm::ValueType>> serialized_sig =
+ PodArray<wasm::ValueType>::New(isolate, sig_size, AllocationType::kOld);
+ if (sig_size > 0) {
+ serialized_sig->copy_in(0, sig->all().begin(), sig_size);
+ }
Handle<WasmJSFunctionData> function_data =
Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
+ function_data->set_serialized_return_count(return_count);
+ function_data->set_serialized_parameter_count(parameter_count);
+ function_data->set_serialized_signature(*serialized_sig);
+ function_data->set_callable(*callable);
// TODO(7742): Make this callable by using a proper wrapper code.
function_data->set_wrapper_code(
isolate->builtins()->builtin(Builtins::kIllegal));
@@ -2160,6 +2322,37 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
return Handle<WasmJSFunction>::cast(js_function);
}
+JSReceiver WasmJSFunction::GetCallable() const {
+ return shared().wasm_js_function_data().callable();
+}
+
+wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ int sig_size = function_data.serialized_signature().length();
+ wasm::ValueType* types = zone->NewArray<wasm::ValueType>(sig_size);
+ if (sig_size > 0) {
+ function_data.serialized_signature().copy_out(0, types, sig_size);
+ }
+ int return_count = function_data.serialized_return_count();
+ int parameter_count = function_data.serialized_parameter_count();
+ return new (zone) wasm::FunctionSig(return_count, parameter_count, types);
+}
+
+bool WasmJSFunction::MatchesSignature(wasm::FunctionSig* sig) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int return_count = static_cast<int>(sig->return_count());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ if (return_count != function_data.serialized_return_count() ||
+ parameter_count != function_data.serialized_parameter_count()) {
+ return false;
+ }
+ if (sig_size == 0) return true; // Prevent undefined behavior.
+ const wasm::ValueType* expected = sig->all().begin();
+ return function_data.serialized_signature().matches(expected, sig_size);
+}
+
Address WasmCapiFunction::GetHostCallTarget() const {
return shared().wasm_capi_function_data().call_target();
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 1e6ced0b76..1200f7040a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -40,15 +40,17 @@ class SeqOneByteString;
class WasmCapiFunction;
class WasmDebugInfo;
class WasmExceptionTag;
+class WasmExportedFunction;
class WasmInstanceObject;
+class WasmJSFunction;
class WasmModuleObject;
-class WasmExportedFunction;
+class WasmIndirectFunctionTable;
template <class CppType>
class Managed;
#define DECL_OPTIONAL_ACCESSORS(name, type) \
- V8_INLINE bool has_##name(); \
+ DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
// A helper for an entry in an indirect function table (IFT).
@@ -60,7 +62,11 @@ class Managed;
// - target = entrypoint to Wasm code or import wrapper code
class IndirectFunctionTableEntry {
public:
- inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int index);
+ inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int table_index,
+ int entry_index);
+
+ inline IndirectFunctionTableEntry(Handle<WasmIndirectFunctionTable> table,
+ int entry_index);
void clear();
V8_EXPORT_PRIVATE void Set(int sig_id,
@@ -68,14 +74,13 @@ class IndirectFunctionTableEntry {
int target_func_index);
void Set(int sig_id, Address call_target, Object ref);
- void CopyFrom(const IndirectFunctionTableEntry& that);
-
- Object object_ref();
- int sig_id();
- Address target();
+ Object object_ref() const;
+ int sig_id() const;
+ Address target() const;
private:
Handle<WasmInstanceObject> const instance_;
+ Handle<WasmIndirectFunctionTable> const table_;
int const index_;
};
@@ -292,6 +297,7 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void Fill(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t start, Handle<Object> entry, uint32_t count);
+ // TODO(mstarzinger): Unify these three methods into one.
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index, wasm::FunctionSig* sig,
@@ -300,6 +306,10 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index,
+ Handle<WasmJSFunction> function);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
Handle<WasmCapiFunction> capi_function);
static void ClearDispatchTables(Isolate* isolate,
@@ -312,14 +322,12 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
int func_index);
// This function reads the content of a function table entry and returns it
- // through the out parameters {is_valid}, {is_null}, {instance}, and
- // {function_index}.
- static void GetFunctionTableEntry(Isolate* isolate,
- Handle<WasmTableObject> table,
- int entry_index, bool* is_valid,
- bool* is_null,
- MaybeHandle<WasmInstanceObject>* instance,
- int* function_index);
+ // through the out parameters {is_valid}, {is_null}, {instance},
+ // {function_index}, and {maybe_js_function}.
+ static void GetFunctionTableEntry(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
+ int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function);
OBJECT_CONSTRUCTORS(WasmTableObject, JSObject);
};
@@ -406,7 +414,7 @@ class WasmGlobalObject : public JSObject {
inline void SetF32(float value);
inline void SetF64(double value);
inline void SetAnyRef(Handle<Object> value);
- inline bool SetAnyFunc(Isolate* isolate, Handle<Object> value);
+ inline bool SetFuncRef(Isolate* isolate, Handle<Object> value);
private:
// This function returns the address of the global's data in the
@@ -431,12 +439,11 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
- DECL_ACCESSORS(undefined_value, Oddball)
- DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
DECL_OPTIONAL_ACCESSORS(wasm_exported_functions, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
@@ -482,7 +489,6 @@ class WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kGlobalsStartOffset, kSystemPointerSize) \
V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
- V(kUndefinedValueOffset, kTaggedSize) \
V(kIsolateRootOffset, kSystemPointerSize) \
V(kJumpTableStartOffset, kSystemPointerSize) \
/* End of often-accessed fields. */ \
@@ -495,9 +501,9 @@ class WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
V(kDebugInfoOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \
+ V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kExceptionsTableOffset, kTaggedSize) \
- V(kNullValueOffset, kTaggedSize) \
V(kCEntryStubOffset, kTaggedSize) \
V(kWasmExportedFunctionsOffset, kTaggedSize) \
V(kRealStackLimitAddressOffset, kSystemPointerSize) \
@@ -526,7 +532,6 @@ class WasmInstanceObject : public JSObject {
static constexpr uint16_t kTaggedFieldOffsets[] = {
kImportedFunctionRefsOffset,
kIndirectFunctionTableRefsOffset,
- kUndefinedValueOffset,
kModuleObjectOffset,
kExportsObjectOffset,
kNativeContextOffset,
@@ -536,18 +541,17 @@ class WasmInstanceObject : public JSObject {
kImportedMutableGlobalsBuffersOffset,
kDebugInfoOffset,
kTablesOffset,
+ kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
kExceptionsTableOffset,
- kNullValueOffset,
kCEntryStubOffset,
kWasmExportedFunctionsOffset};
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
V8_EXPORT_PRIVATE static bool EnsureIndirectFunctionTableWithMinimumSize(
- Handle<WasmInstanceObject> instance, uint32_t minimum_size);
-
- bool has_indirect_function_table();
+ Handle<WasmInstanceObject> instance, int table_index,
+ uint32_t minimum_size);
V8_EXPORT_PRIVATE void SetRawMemory(byte* mem_start, size_t mem_size);
@@ -561,11 +565,15 @@ class WasmInstanceObject : public JSObject {
Address GetCallTarget(uint32_t func_index);
+ static int IndirectFunctionTableSize(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index);
+
// Copies table entries. Returns {false} if the ranges are out-of-bounds.
static bool CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_src_index,
- uint32_t table_dst_index, uint32_t dst,
+ uint32_t table_dst_index,
+ uint32_t table_src_index, uint32_t dst,
uint32_t src,
uint32_t count) V8_WARN_UNUSED_RESULT;
@@ -597,6 +605,14 @@ class WasmInstanceObject : public JSObject {
int index,
Handle<WasmExportedFunction> val);
+ // Imports a constructed {WasmJSFunction} into the indirect function table of
+ // this instance. Note that this might trigger wrapper compilation, since a
+ // {WasmJSFunction} is instance-independent and just wraps a JS callable.
+ static void ImportWasmJSFunctionIntoTable(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ int table_index, int entry_index,
+ Handle<WasmJSFunction> js_function);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
@@ -681,6 +697,12 @@ class WasmJSFunction : public JSFunction {
static Handle<WasmJSFunction> New(Isolate* isolate, wasm::FunctionSig* sig,
Handle<JSReceiver> callable);
+ JSReceiver GetCallable() const;
+ // Deserializes the signature of this function using the provided zone. Note
+ // that lifetime of the signature is hence directly coupled to the zone.
+ wasm::FunctionSig* GetSignature(Zone* zone);
+ bool MatchesSignature(wasm::FunctionSig* sig);
+
DECL_CAST(WasmJSFunction)
OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
};
@@ -704,6 +726,34 @@ class WasmCapiFunction : public JSFunction {
OBJECT_CONSTRUCTORS(WasmCapiFunction, JSFunction);
};
+class WasmIndirectFunctionTable : public Struct {
+ public:
+ DECL_PRIMITIVE_ACCESSORS(size, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(sig_ids, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(targets, Address*)
+ DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
+ DECL_ACCESSORS(refs, FixedArray)
+
+ V8_EXPORT_PRIVATE static Handle<WasmIndirectFunctionTable> New(
+ Isolate* isolate, uint32_t size);
+ static void Resize(Isolate* isolate, Handle<WasmIndirectFunctionTable> table,
+ uint32_t new_size);
+
+ DECL_CAST(WasmIndirectFunctionTable)
+
+ DECL_PRINTER(WasmIndirectFunctionTable)
+ DECL_VERIFIER(WasmIndirectFunctionTable)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_INDIRECT_FUNCTION_TABLE_FIELDS)
+
+ STATIC_ASSERT(kStartOfStrongFieldsOffset == kManagedNativeAllocationsOffset);
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
+
+ OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
+};
+
class WasmCapiFunctionData : public Struct {
public:
DECL_PRIMITIVE_ACCESSORS(call_target, Address)
@@ -734,6 +784,9 @@ class WasmExportedFunctionData : public Struct {
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
+ DECL_ACCESSORS(c_wrapper_code, Object)
+ DECL_ACCESSORS(wasm_call_target, Smi)
+ DECL_INT_ACCESSORS(packed_args_size)
DECL_CAST(WasmExportedFunctionData)
@@ -754,6 +807,10 @@ class WasmExportedFunctionData : public Struct {
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
class WasmJSFunctionData : public Struct {
public:
+ DECL_INT_ACCESSORS(serialized_return_count)
+ DECL_INT_ACCESSORS(serialized_parameter_count)
+ DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
+ DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
DECL_CAST(WasmJSFunctionData)
@@ -847,8 +904,8 @@ class WasmDebugInfo : public Struct {
Address frame_pointer,
int frame_index);
- V8_EXPORT_PRIVATE static Handle<JSFunction> GetCWasmEntry(
- Handle<WasmDebugInfo>, wasm::FunctionSig*);
+ V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
+ wasm::FunctionSig*);
OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 88b9e90381..d3fb4c42cf 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -23,7 +23,9 @@ namespace wasm {
#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
+#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
@@ -31,6 +33,7 @@ namespace wasm {
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_S1x2_OP(name, str) CASE_OP(S1x2##name, "s1x2." str)
#define CASE_S1x4_OP(name, str) CASE_OP(S1x4##name, "s1x4." str)
#define CASE_S1x8_OP(name, str) CASE_OP(S1x8##name, "s1x8." str)
#define CASE_S1x16_OP(name, str) CASE_OP(S1x16##name, "s1x16." str)
@@ -148,8 +151,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(TeeLocal, "local.tee")
CASE_OP(GetGlobal, "global.get")
CASE_OP(SetGlobal, "global.set")
- CASE_OP(GetTable, "table.get")
- CASE_OP(SetTable, "table.set")
+ CASE_OP(TableGet, "table.get")
+ CASE_OP(TableSet, "table.set")
CASE_ALL_OP(Const, "const")
CASE_OP(MemorySize, "memory.size")
CASE_OP(MemoryGrow, "memory.grow")
@@ -217,11 +220,26 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
+ CASE_F64x2_OP(Neg, "neg")
+ CASE_I64x2_OP(Neg, "neg")
CASE_SIMD_OP(Eq, "eq")
+ CASE_F64x2_OP(Eq, "eq")
+ CASE_I64x2_OP(Eq, "eq")
CASE_SIMD_OP(Ne, "ne")
+ CASE_F64x2_OP(Ne, "ne")
+ CASE_I64x2_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
+ CASE_I64x2_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
+ CASE_I64x2_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
+ CASE_I64x2_OP(Mul, "mul")
+ CASE_F64x2_OP(Splat, "splat")
+ CASE_F64x2_OP(Lt, "lt")
+ CASE_F64x2_OP(Le, "le")
+ CASE_F64x2_OP(Gt, "gt")
+ CASE_F64x2_OP(Ge, "ge")
+ CASE_F64x2_OP(Abs, "abs")
CASE_F32x4_OP(Abs, "abs")
CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
@@ -240,18 +258,29 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
+ CASE_F64x2_OP(ExtractLane, "extract_lane")
+ CASE_F64x2_OP(ReplaceLane, "replace_lane")
CASE_F32x4_OP(ExtractLane, "extract_lane")
CASE_F32x4_OP(ReplaceLane, "replace_lane")
+ CASE_I64x2_OP(ExtractLane, "extract_lane")
+ CASE_I64x2_OP(ReplaceLane, "replace_lane")
CASE_SIMDI_OP(ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
CASE_SIGN_OP(SIMDI, Max, "max")
CASE_SIGN_OP(SIMDI, Lt, "lt")
+ CASE_SIGN_OP(I64x2, Lt, "lt")
CASE_SIGN_OP(SIMDI, Le, "le")
+ CASE_SIGN_OP(I64x2, Le, "le")
CASE_SIGN_OP(SIMDI, Gt, "gt")
+ CASE_SIGN_OP(I64x2, Gt, "gt")
CASE_SIGN_OP(SIMDI, Ge, "ge")
+ CASE_SIGN_OP(I64x2, Ge, "ge")
CASE_SIGN_OP(SIMDI, Shr, "shr")
+ CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
+ CASE_I64x2_OP(Shl, "shl")
+ CASE_I64x2_OP(Splat, "splat")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
@@ -264,6 +293,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Not, "not")
CASE_S128_OP(Select, "select")
CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_S1x2_OP(AnyTrue, "any_true")
+ CASE_S1x2_OP(AllTrue, "all_true")
CASE_S1x4_OP(AnyTrue, "any_true")
CASE_S1x4_OP(AllTrue, "all_true")
CASE_S1x8_OP(AnyTrue, "any_true")
@@ -274,6 +305,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
+ CASE_OP(AtomicFence, "atomic.fence")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
@@ -295,7 +327,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_F32_OP
#undef CASE_F64_OP
#undef CASE_REF_OP
+#undef CASE_F64x2_OP
#undef CASE_F32x4_OP
+#undef CASE_I64x2_OP
#undef CASE_I32x4_OP
#undef CASE_I16x8_OP
#undef CASE_I8x16_OP
@@ -303,6 +337,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_S32x4_OP
#undef CASE_S16x8_OP
#undef CASE_S8x16_OP
+#undef CASE_S1x2_OP
#undef CASE_S1x4_OP
#undef CASE_S1x8_OP
#undef CASE_S1x16_OP
@@ -474,7 +509,8 @@ struct GetSimdOpcodeSigIndex {
struct GetAtomicOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
+ return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
+ kSigEnum_None;
#undef CASE
}
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 6f9cb70141..22bd47d54b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -6,7 +6,7 @@
#define V8_WASM_WASM_OPCODES_H_
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
@@ -51,8 +51,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TeeLocal, 0x22, _) \
V(GetGlobal, 0x23, _) \
V(SetGlobal, 0x24, _) \
- V(GetTable, 0x25, _) \
- V(SetTable, 0x26, _) \
+ V(TableGet, 0x25, _) \
+ V(TableSet, 0x26, _) \
V(I32Const, 0x41, _) \
V(I64Const, 0x42, _) \
V(F32Const, 0x43, _) \
@@ -272,7 +272,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I8x16Splat, 0xfd04, s_i) \
V(I16x8Splat, 0xfd08, s_i) \
V(I32x4Splat, 0xfd0c, s_i) \
+ V(I64x2Splat, 0xfd0f, s_l) \
V(F32x4Splat, 0xfd12, s_f) \
+ V(F64x2Splat, 0xfd15, s_d) \
V(I8x16Eq, 0xfd18, s_ss) \
V(I8x16Ne, 0xfd19, s_ss) \
V(I8x16LtS, 0xfd1a, s_ss) \
@@ -303,12 +305,28 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4LeU, 0xfd33, s_ss) \
V(I32x4GeS, 0xfd34, s_ss) \
V(I32x4GeU, 0xfd35, s_ss) \
+ V(I64x2Eq, 0xfd36, s_ss) \
+ V(I64x2Ne, 0xfd37, s_ss) \
+ V(I64x2LtS, 0xfd38, s_ss) \
+ V(I64x2LtU, 0xfd39, s_ss) \
+ V(I64x2GtS, 0xfd3a, s_ss) \
+ V(I64x2GtU, 0xfd3b, s_ss) \
+ V(I64x2LeS, 0xfd3c, s_ss) \
+ V(I64x2LeU, 0xfd3d, s_ss) \
+ V(I64x2GeS, 0xfd3e, s_ss) \
+ V(I64x2GeU, 0xfd3f, s_ss) \
V(F32x4Eq, 0xfd40, s_ss) \
V(F32x4Ne, 0xfd41, s_ss) \
V(F32x4Lt, 0xfd42, s_ss) \
V(F32x4Gt, 0xfd43, s_ss) \
V(F32x4Le, 0xfd44, s_ss) \
V(F32x4Ge, 0xfd45, s_ss) \
+ V(F64x2Eq, 0xfd46, s_ss) \
+ V(F64x2Ne, 0xfd47, s_ss) \
+ V(F64x2Lt, 0xfd48, s_ss) \
+ V(F64x2Gt, 0xfd49, s_ss) \
+ V(F64x2Le, 0xfd4a, s_ss) \
+ V(F64x2Ge, 0xfd4b, s_ss) \
V(S128Not, 0xfd4c, s_s) \
V(S128And, 0xfd4d, s_ss) \
V(S128Or, 0xfd4e, s_ss) \
@@ -352,6 +370,12 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4MinU, 0xfd81, s_ss) \
V(I32x4MaxS, 0xfd82, s_ss) \
V(I32x4MaxU, 0xfd83, s_ss) \
+ V(I64x2Neg, 0xfd84, s_s) \
+ V(S1x2AnyTrue, 0xfd85, i_s) \
+ V(S1x2AllTrue, 0xfd86, i_s) \
+ V(I64x2Add, 0xfd8a, s_ss) \
+ V(I64x2Sub, 0xfd8d, s_ss) \
+ V(I64x2Mul, 0xfd8c, s_ss) \
V(F32x4Abs, 0xfd95, s_s) \
V(F32x4Neg, 0xfd96, s_s) \
V(F32x4RecipApprox, 0xfd98, s_s) \
@@ -361,6 +385,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(F32x4Mul, 0xfd9c, s_ss) \
V(F32x4Min, 0xfd9e, s_ss) \
V(F32x4Max, 0xfd9f, s_ss) \
+ V(F64x2Abs, 0xfda0, s_s) \
+ V(F64x2Neg, 0xfda1, s_s) \
V(I32x4SConvertF32x4, 0xfdab, s_s) \
V(I32x4UConvertF32x4, 0xfdac, s_s) \
V(F32x4SConvertI32x4, 0xfdaf, s_s) \
@@ -385,7 +411,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I8x16ExtractLane, 0xfd05, _) \
V(I16x8ExtractLane, 0xfd09, _) \
V(I32x4ExtractLane, 0xfd0d, _) \
+ V(I64x2ExtractLane, 0xfd10, _) \
V(F32x4ExtractLane, 0xfd13, _) \
+ V(F64x2ExtractLane, 0xfd16, _) \
V(I8x16Shl, 0xfd54, _) \
V(I8x16ShrS, 0xfd55, _) \
V(I8x16ShrU, 0xfd56, _) \
@@ -394,13 +422,18 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I16x8ShrU, 0xfd67, _) \
V(I32x4Shl, 0xfd76, _) \
V(I32x4ShrS, 0xfd77, _) \
- V(I32x4ShrU, 0xfd78, _)
+ V(I32x4ShrU, 0xfd78, _) \
+ V(I64x2Shl, 0xfd87, _) \
+ V(I64x2ShrS, 0xfd88, _) \
+ V(I64x2ShrU, 0xfd89, _)
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
V(I8x16ReplaceLane, 0xfd07, _) \
V(I16x8ReplaceLane, 0xfd0b, _) \
V(I32x4ReplaceLane, 0xfd0e, _) \
- V(F32x4ReplaceLane, 0xfd14, _)
+ V(I64x2ReplaceLane, 0xfd11, _) \
+ V(F32x4ReplaceLane, 0xfd14, _) \
+ V(F64x2ReplaceLane, 0xfd17, _)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -424,7 +457,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TableCopy, 0xfc0e, v_iii) \
V(TableGrow, 0xfc0f, i_ai) \
V(TableSize, 0xfc10, i_v) \
- /*TableFill is polymorph in the second parameter. It's anyref or anyfunc.*/ \
+ /*TableFill is polymorph in the second parameter. It's anyref or funcref.*/ \
V(TableFill, 0xfc11, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
@@ -495,6 +528,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
+#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
+ /* AtomicFence does not target a particular linear memory. */ \
+ V(AtomicFence, 0xfe03, v_v)
+
// All opcodes.
#define FOREACH_OPCODE(V) \
FOREACH_CONTROL_OPCODE(V) \
@@ -510,6 +547,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V) \
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
FOREACH_NUMERIC_OPCODE(V)
// All signatures.
@@ -553,13 +591,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_ai, kWasmI32, kWasmAnyFunc, kWasmI32)
+ V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
V(s_f, kWasmS128, kWasmF32) \
+ V(s_d, kWasmS128, kWasmF64) \
V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
V(s_i, kWasmS128, kWasmI32) \
+ V(s_l, kWasmS128, kWasmI64) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \
V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 4688bcf8e1..42eee037d5 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -18,28 +18,28 @@ namespace wasm {
namespace {
PRINTF_FORMAT(3, 0)
-void VPrintFToString(std::string& str, size_t str_offset, const char* format,
+void VPrintFToString(std::string* str, size_t str_offset, const char* format,
va_list args) {
- DCHECK_LE(str_offset, str.size());
+ DCHECK_LE(str_offset, str->size());
size_t len = str_offset + strlen(format);
// Allocate increasingly large buffers until the message fits.
for (;; len = base::bits::RoundUpToPowerOfTwo64(len + 1)) {
DCHECK_GE(kMaxInt, len);
- str.resize(len);
+ str->resize(len);
va_list args_copy;
va_copy(args_copy, args);
- int written = VSNPrintF(Vector<char>(&str.front() + str_offset,
+ int written = VSNPrintF(Vector<char>(&str->front() + str_offset,
static_cast<int>(len - str_offset)),
format, args_copy);
va_end(args_copy);
if (written < 0) continue; // not enough space.
- str.resize(str_offset + written);
+ str->resize(str_offset + written);
return;
}
}
PRINTF_FORMAT(3, 4)
-void PrintFToString(std::string& str, size_t str_offset, const char* format,
+void PrintFToString(std::string* str, size_t str_offset, const char* format,
...) {
va_list args;
va_start(args, format);
@@ -52,7 +52,7 @@ void PrintFToString(std::string& str, size_t str_offset, const char* format,
// static
std::string WasmError::FormatError(const char* format, va_list args) {
std::string result;
- VPrintFToString(result, 0, format, args);
+ VPrintFToString(&result, 0, format, args);
return result;
}
@@ -63,10 +63,10 @@ void ErrorThrower::Format(ErrorType type, const char* format, va_list args) {
size_t context_len = 0;
if (context_) {
- PrintFToString(error_msg_, 0, "%s: ", context_);
+ PrintFToString(&error_msg_, 0, "%s: ", context_);
context_len = error_msg_.size();
}
- VPrintFToString(error_msg_, context_len, format, args);
+ VPrintFToString(&error_msg_, context_len, format, args);
error_type_ = type;
}
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 1cea08943b..a20b2f115a 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -645,6 +645,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
return module_object;
}
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index a79ae02fe2..e17d34e36f 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -105,7 +105,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
os << WasmOpcodes::OpcodeName(opcode);
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
os << " (type " << imm.sig_index << ")";
} else if (imm.out_arity() > 0) {
os << " " << ValueTypes::TypeName(imm.out_type(0));
@@ -140,16 +140,18 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
while (iterator.has_next()) os << ' ' << iterator.next();
break;
}
- case kExprCallIndirect: {
+ case kExprCallIndirect:
+ case kExprReturnCallIndirect: {
CallIndirectImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
DCHECK_EQ(0, imm.table_index);
- os << "call_indirect " << imm.sig_index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.sig_index;
break;
}
- case kExprCallFunction: {
+ case kExprCallFunction:
+ case kExprReturnCall: {
CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << "call " << imm.index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
case kExprGetLocal:
@@ -170,6 +172,18 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
+ case kExprTableGet:
+ case kExprTableSet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprSelectWithType: {
+ SelectTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << ValueTypes::TypeName(imm.type);
+ break;
+ }
#define CASE_CONST(type, str, cast_type) \
case kExpr##type##Const: { \
Imm##type##Immediate<Decoder::kNoValidate> imm(&i, i.pc()); \
@@ -182,6 +196,12 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
CASE_CONST(F64, f64, double)
#undef CASE_CONST
+ case kExprRefFunc: {
+ FunctionIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
@@ -193,6 +213,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
}
FOREACH_SIMPLE_OPCODE(CASE_OPCODE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE_OPCODE)
case kExprUnreachable:
case kExprNop:
case kExprReturn:
@@ -200,19 +221,150 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprMemoryGrow:
case kExprDrop:
case kExprSelect:
+ case kExprRethrow:
+ case kExprRefNull:
os << WasmOpcodes::OpcodeName(opcode);
break;
+
+ case kNumericPrefix: {
+ WasmOpcode numeric_opcode = i.prefixed_opcode();
+ switch (numeric_opcode) {
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
+ case kExprMemoryCopy:
+ case kExprMemoryFill:
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+ case kExprMemoryInit: {
+ MemoryInitImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << imm.data_segment_index;
+ break;
+ }
+ case kExprDataDrop: {
+ DataDropImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprTableInit: {
+ TableInitImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << imm.elem_segment_index << ' ' << imm.table.index;
+ break;
+ }
+ case kExprElemDrop: {
+ ElemDropImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprTableCopy: {
+ TableCopyImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.table_src.index
+ << ' ' << imm.table_dst.index;
+ break;
+ }
+ case kExprTableGrow:
+ case kExprTableSize:
+ case kExprTableFill: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+
+ case kSimdPrefix: {
+ WasmOpcode simd_opcode = i.prefixed_opcode();
+ switch (simd_opcode) {
+ case kExprS128LoadMem:
+ case kExprS128StoreMem: {
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc(),
+ kMaxUInt32);
+ os << WasmOpcodes::OpcodeName(opcode) << " offset=" << imm.offset
+ << " align=" << (1ULL << imm.alignment);
+ break;
+ }
+
+ case kExprS8x16Shuffle: {
+ Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode);
+ for (uint8_t v : imm.shuffle) {
+ os << ' ' << v;
+ }
+ break;
+ }
+
+ case kExprI8x16ExtractLane:
+ case kExprI16x8ExtractLane:
+ case kExprI32x4ExtractLane:
+ case kExprI64x2ExtractLane:
+ case kExprF32x4ExtractLane:
+ case kExprF64x2ExtractLane:
+ case kExprI8x16ReplaceLane:
+ case kExprI16x8ReplaceLane:
+ case kExprI32x4ReplaceLane:
+ case kExprI64x2ReplaceLane:
+ case kExprF32x4ReplaceLane:
+ case kExprF64x2ReplaceLane: {
+ SimdLaneImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.lane;
+ break;
+ }
+
+ case kExprI8x16Shl:
+ case kExprI8x16ShrS:
+ case kExprI8x16ShrU:
+ case kExprI16x8Shl:
+ case kExprI16x8ShrS:
+ case kExprI16x8ShrU:
+ case kExprI32x4Shl:
+ case kExprI32x4ShrS:
+ case kExprI32x4ShrU:
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU: {
+ SimdShiftImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.shift;
+ break;
+ }
+
+ FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+
case kAtomicPrefix: {
WasmOpcode atomic_opcode = i.prefixed_opcode();
switch (atomic_opcode) {
FOREACH_ATOMIC_OPCODE(CASE_OPCODE) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc(),
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1,
kMaxUInt32);
os << WasmOpcodes::OpcodeName(atomic_opcode)
<< " offset=" << imm.offset
<< " align=" << (1ULL << imm.alignment);
break;
}
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(atomic_opcode);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -222,14 +374,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
// This group is just printed by their internal opcode name, as they
// should never be shown to end-users.
- FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE)
- // TODO(wasm): Add correct printing for SIMD and atomic opcodes once
- // they are publicly available.
- FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_MEM_OPCODE(CASE_OPCODE)
- os << WasmOpcodes::OpcodeName(opcode);
+ FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(opcode);
+ }
break;
#undef CASE_OPCODE
diff --git a/deps/v8/src/wasm/wasm-text.h b/deps/v8/src/wasm/wasm-text.h
index 60957966ab..205df5e6fd 100644
--- a/deps/v8/src/wasm/wasm-text.h
+++ b/deps/v8/src/wasm/wasm-text.h
@@ -7,9 +7,10 @@
#include <cstdint>
#include <ostream>
-#include <tuple>
#include <vector>
+#include "src/common/globals.h"
+
namespace v8 {
namespace debug {
@@ -26,10 +27,10 @@ struct ModuleWireBytes;
// Generate disassembly according to official text format.
// Output disassembly to the given output stream, and optionally return an
// offset table of <byte offset, line, column> via the given pointer.
-void PrintWasmText(
- const WasmModule *module, const ModuleWireBytes &wire_bytes,
- uint32_t func_index, std::ostream &os,
- std::vector<debug::WasmDisassemblyOffsetTableEntry> *offset_table);
+V8_EXPORT_PRIVATE void PrintWasmText(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ uint32_t func_index, std::ostream& os,
+ std::vector<debug::WasmDisassemblyOffsetTableEntry>* offset_table);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 23f1aed7f0..8de53b96cf 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_WASM_VALUE_H_
#define V8_WASM_WASM_VALUE_H_
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/handles/handles.h"
#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
@@ -15,10 +15,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define FOREACH_SIMD_TYPE(V) \
- V(float, float4, f32x4, 4) \
- V(int32_t, int4, i32x4, 4) \
- V(int16_t, int8, i16x8, 8) \
+#define FOREACH_SIMD_TYPE(V) \
+ V(double, float2, f64x2, 2) \
+ V(float, float4, f32x4, 4) \
+ V(int64_t, int2, i64x2, 2) \
+ V(int32_t, int4, i32x4, 4) \
+ V(int16_t, int8, i16x8, 8) \
V(int8_t, int16, i8x16, 16)
#define DEFINE_SIMD_TYPE(cType, sType, name, kSize) \
@@ -35,12 +37,12 @@ class Simd128 {
val_[i] = 0;
}
}
-#define DEFINE_SIMD_TYPE_SPECIFIC_METHODS(cType, sType, name, size) \
- explicit Simd128(sType val) { \
- WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
- } \
- sType to_##name() { \
- return ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
+#define DEFINE_SIMD_TYPE_SPECIFIC_METHODS(cType, sType, name, size) \
+ explicit Simd128(sType val) { \
+ base::WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
+ } \
+ sType to_##name() { \
+ return base::ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
}
FOREACH_SIMD_TYPE(DEFINE_SIMD_TYPE_SPECIFIC_METHODS)
#undef DEFINE_SIMD_TYPE_SPECIFIC_METHODS
@@ -73,18 +75,20 @@ class WasmValue {
public:
WasmValue() : type_(kWasmStmt), bit_pattern_{} {}
-#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
- explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
- static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
- "size too big for WasmValue"); \
- WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), v); \
- } \
- ctype to_##name() const { \
- DCHECK_EQ(localtype, type_); \
- return to_##name##_unchecked(); \
- } \
- ctype to_##name##_unchecked() const { \
- return ReadUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_)); \
+#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
+ explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
+ static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
+ "size too big for WasmValue"); \
+ base::WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), \
+ v); \
+ } \
+ ctype to_##name() const { \
+ DCHECK_EQ(localtype, type_); \
+ return to_##name##_unchecked(); \
+ } \
+ ctype to_##name##_unchecked() const { \
+ return base::ReadUnalignedValue<ctype>( \
+ reinterpret_cast<Address>(bit_pattern_)); \
}
FOREACH_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS