aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
authorRefael Ackermann <refack@gmail.com>2019-05-28 08:46:21 -0400
committerRefael Ackermann <refack@gmail.com>2019-06-01 09:55:12 -0400
commited74896b1fae1c163b3906163f3bf46326618ddb (patch)
tree7fb05c5a19808e0c5cd95837528e9005999cf540 /deps/v8/src/wasm
parent2a850cd0664a4eee51f44d0bb8c2f7a3fe444154 (diff)
downloadandroid-node-v8-ed74896b1fae1c163b3906163f3bf46326618ddb.tar.gz
android-node-v8-ed74896b1fae1c163b3906163f3bf46326618ddb.tar.bz2
android-node-v8-ed74896b1fae1c163b3906163f3bf46326618ddb.zip
deps: update V8 to 7.5.288.22
PR-URL: https://github.com/nodejs/node/pull/27375 Reviewed-By: Michaƫl Zasso <targos@protonmail.com> Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/DEPS7
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h30
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h25
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h43
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h11
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc72
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h9
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h11
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h9
-rw-r--r--deps/v8/src/wasm/baseline/ppc/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h9
-rw-r--r--deps/v8/src/wasm/baseline/s390/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h9
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h17
-rw-r--r--deps/v8/src/wasm/c-api.cc3599
-rw-r--r--deps/v8/src/wasm/compilation-environment.h15
-rw-r--r--deps/v8/src/wasm/decoder.h8
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h88
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc3
-rw-r--r--deps/v8/src/wasm/function-compiler.cc128
-rw-r--r--deps/v8/src/wasm/function-compiler.h39
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc41
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h10
-rw-r--r--deps/v8/src/wasm/js-to-wasm-wrapper-cache.h (renamed from deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h)6
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc34
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h4
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc6
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1513
-rw-r--r--deps/v8/src/wasm/module-compiler.h15
-rw-r--r--deps/v8/src/wasm/module-decoder.cc164
-rw-r--r--deps/v8/src/wasm/module-decoder.h15
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc427
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc3
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h2
-rw-r--r--deps/v8/src/wasm/value-type.h30
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc730
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h255
-rw-r--r--deps/v8/src/wasm/wasm-constants.h5
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc88
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc296
-rw-r--r--deps/v8/src/wasm/wasm-engine.h40
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc29
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h61
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h45
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h52
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc46
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h49
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc707
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h7
-rw-r--r--deps/v8/src/wasm/wasm-js.cc425
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc508
-rw-r--r--deps/v8/src/wasm/wasm-memory.h162
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc6
-rw-r--r--deps/v8/src/wasm/wasm-module.cc5
-rw-r--r--deps/v8/src/wasm/wasm-module.h38
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h29
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc452
-rw-r--r--deps/v8/src/wasm/wasm-objects.h307
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc2
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h4
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc59
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h2
-rw-r--r--deps/v8/src/wasm/wasm-text.cc13
-rw-r--r--deps/v8/src/wasm/wasm-tier.h7
-rw-r--r--deps/v8/src/wasm/wasm-value.h6
67 files changed, 8512 insertions, 2341 deletions
diff --git a/deps/v8/src/wasm/DEPS b/deps/v8/src/wasm/DEPS
new file mode 100644
index 0000000000..8024d9097a
--- /dev/null
+++ b/deps/v8/src/wasm/DEPS
@@ -0,0 +1,7 @@
+specific_include_rules = {
+ "c-api\.cc": [
+ "+include/libplatform/libplatform.h",
+ "+third_party/wasm-api/wasm.h",
+ "+third_party/wasm-api/wasm.hh",
+ ],
+}
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 26f63ea302..21ec7fdeff 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -139,6 +139,27 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
+template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
+ Condition),
+ void (Assembler::*op_with_carry)(Register, Register, const Operand&,
+ SBit, Condition)>
+inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t imm) {
+ UseScratchRegisterScope temps(assm);
+ Register scratch = dst.low_gp();
+ bool can_use_dst = dst.low_gp() != lhs.high_gp();
+ if (!can_use_dst) {
+ scratch = temps.Acquire();
+ }
+ (assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
+ // Top half of the immediate sign extended, either 0 or -1.
+ (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(),
+ Operand(imm < 0 ? -1 : 0), LeaveCC, al);
+ if (!can_use_dst) {
+ assm->mov(dst.low_gp(), scratch);
+ }
+}
+
template <void (TurboAssembler::*op)(Register, Register, Register, Register,
Register),
bool is_left_shift>
@@ -658,6 +679,10 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef FP64_UNOP
#undef FP64_BINOP
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ add(dst, lhs, Operand(imm));
+}
+
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src);
return true;
@@ -790,6 +815,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>(this, dst, lhs, rhs);
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 0c093f2dcd..0fe0237653 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -345,12 +345,20 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
CPURegister src = CPURegister::no_reg();
switch (value.type()) {
case kWasmI32:
- src = temps.AcquireW();
- Mov(src.W(), value.to_i32());
+ if (value.to_i32() == 0) {
+ src = wzr;
+ } else {
+ src = temps.AcquireW();
+ Mov(src.W(), value.to_i32());
+ }
break;
case kWasmI64:
- src = temps.AcquireX();
- Mov(src.X(), value.to_i64());
+ if (value.to_i64() == 0) {
+ src = xzr;
+ } else {
+ src = temps.AcquireX();
+ Mov(src.X(), value.to_i64());
+ }
break;
default:
// We do not track f32 and f64 constants, hence they are unreachable.
@@ -572,6 +580,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w);
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ Add(dst.W(), lhs.W(), Immediate(imm));
+}
+
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 91e2139d44..8c5d8c918d 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -523,6 +523,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ if (lhs != dst) {
+ lea(dst, Operand(lhs, imm));
+ } else {
+ add(dst, Immediate(imm));
+ }
+}
+
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) {
// Default path.
@@ -793,6 +801,36 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
}
+
+template <void (Assembler::*op)(Register, const Immediate&),
+ void (Assembler::*op_with_carry)(Register, int32_t)>
+inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t imm) {
+ // First, compute the low half of the result, potentially into a temporary dst
+ // register if {dst.low_gp()} equals any register we need to
+ // keep alive for computing the upper half.
+ LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
+ Register dst_low = keep_alive.has(dst.low_gp())
+ ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
+ : dst.low_gp();
+
+ if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
+ (assm->*op)(dst_low, Immediate(imm));
+
+ // Now compute the upper half, while keeping alive the previous result.
+ keep_alive = LiftoffRegList::ForRegs(dst_low);
+ Register dst_high = keep_alive.has(dst.high_gp())
+ ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
+ : dst.high_gp();
+
+ if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
+ // Top half of the immediate sign extended, either 0 or -1.
+ (assm->*op_with_carry)(dst_high, imm < 0 ? -1 : 0);
+
+ // If necessary, move result into the right registers.
+ LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
+ if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+}
} // namespace liftoff
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -800,6 +838,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(this, dst, lhs, rhs);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 7ac25bf252..a3e4e4ce07 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -389,6 +389,7 @@ class LiftoffAssembler : public TurboAssembler {
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_add(Register dst, Register lhs, int32_t imm);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
@@ -419,6 +420,8 @@ class LiftoffAssembler : public TurboAssembler {
// i64 binops.
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -481,6 +484,14 @@ class LiftoffAssembler : public TurboAssembler {
}
}
+ inline void emit_ptrsize_add(Register dst, Register lhs, int32_t imm) {
+ if (kSystemPointerSize == 8) {
+ emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
+ } else {
+ emit_i32_add(dst, lhs, imm);
+ }
+ }
+
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 5ad9dc7315..d539fe481e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -785,6 +785,37 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
+ template <ValueType src_type, ValueType result_type, typename EmitFn,
+ typename EmitFnImm>
+ void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
+ static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass result_rc = reg_class_for(result_type);
+
+ LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
+ // Check if the RHS is an immediate.
+ if (rhs_slot.loc() == LiftoffAssembler::VarState::kIntConst) {
+ __ cache_state()->stack_state.pop_back();
+ int32_t imm = rhs_slot.i32_const();
+
+ LiftoffRegister lhs = __ PopToRegister();
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {lhs})
+ : __ GetUnusedRegister(result_rc);
+
+ fnImm(dst, lhs, imm);
+ __ PushRegister(result_type, dst);
+ } else {
+ // The RHS was not an immediate.
+ LiftoffRegister rhs = __ PopToRegister();
+ LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {lhs, rhs})
+ : __ GetUnusedRegister(result_rc);
+ fn(dst, lhs, rhs);
+ __ PushRegister(result_type, dst);
+ }
+ }
+
template <ValueType src_type, ValueType result_type, typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
@@ -830,12 +861,30 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
});
+#define CASE_I32_BINOPI(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpImm<kWasmI32, kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
+ }, \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), imm); \
+ });
#define CASE_I64_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
});
+#define CASE_I64_BINOPI(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpImm<kWasmI64, kWasmI64>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst, lhs, rhs); \
+ }, \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ __ emit_##fn(dst, lhs, imm); \
+ });
#define CASE_FLOAT_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasm##type, kWasm##type>( \
@@ -891,7 +940,7 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
});
switch (opcode) {
- CASE_I32_BINOP(I32Add, i32_add)
+ CASE_I32_BINOPI(I32Add, i32_add)
CASE_I32_BINOP(I32Sub, i32_sub)
CASE_I32_BINOP(I32Mul, i32_mul)
CASE_I32_BINOP(I32And, i32_and)
@@ -910,7 +959,7 @@ class LiftoffCompiler {
CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
- CASE_I64_BINOP(I64Add, i64_add)
+ CASE_I64_BINOPI(I64Add, i64_add)
CASE_I64_BINOP(I64Sub, i64_sub)
CASE_I64_BINOP(I64Mul, i64_mul)
CASE_I64_CMPOP(I64Eq, kEqual)
@@ -1060,7 +1109,9 @@ class LiftoffCompiler {
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_BINOP
+#undef CASE_I32_BINOPI
#undef CASE_I64_BINOP
+#undef CASE_I64_BINOPI
#undef CASE_FLOAT_BINOP
#undef CASE_I32_CMPOP
#undef CASE_I64_CMPOP
@@ -1553,8 +1604,7 @@ class LiftoffCompiler {
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
- __ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
- __ emit_ptrsize_add(index, index, tmp);
+ __ emit_ptrsize_add(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
@@ -1736,6 +1786,9 @@ class LiftoffCompiler {
if (imm.sig->return_count() > 1) {
return unsupported(decoder, "multi-return");
}
+ if (imm.table_index != 0) {
+ return unsupported(decoder, "table index != 0");
+ }
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return")) {
@@ -1980,8 +2033,8 @@ class LiftoffCompiler {
} // namespace
WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
- CompilationEnv* env, const FunctionBody& func_body, Counters* counters,
- WasmFeatures* detected) {
+ AccountingAllocator* allocator, CompilationEnv* env,
+ const FunctionBody& func_body, Counters* counters, WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
@@ -1989,7 +2042,7 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
compile_timer.Start();
}
- Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
+ Zone zone(allocator, "LiftoffCompilationZone");
const WasmModule* module = env ? env->module : nullptr;
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
@@ -2004,12 +2057,12 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) {
compiler->OnFirstError(&decoder);
- return WasmCompilationResult{decoder.error()};
+ return WasmCompilationResult{};
}
if (!compiler->ok()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
- return WasmCompilationResult{WasmError{0, "Liftoff bailout"}};
+ return WasmCompilationResult{};
}
counters->liftoff_compiled_functions()->Increment();
@@ -2029,6 +2082,7 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
result.protected_instructions = compiler->GetProtectedInstructions();
result.frame_slot_count = compiler->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
+ result.result_tier = ExecutionTier::kLiftoff;
DCHECK(result.succeeded());
return result;
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index e1fb79138f..1ae0b8e83a 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -11,6 +11,7 @@
namespace v8 {
namespace internal {
+class AccountingAllocator;
class Counters;
namespace wasm {
@@ -22,16 +23,14 @@ struct WasmFeatures;
class LiftoffCompilationUnit final {
public:
- explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit) {}
+ LiftoffCompilationUnit() = default;
- WasmCompilationResult ExecuteCompilation(CompilationEnv*, const FunctionBody&,
+ WasmCompilationResult ExecuteCompilation(AccountingAllocator*,
+ CompilationEnv*, const FunctionBody&,
Counters*,
WasmFeatures* detected_features);
private:
- WasmCompilationUnit* const wasm_unit_;
-
DISALLOW_COPY_AND_ASSIGN(LiftoffCompilationUnit);
};
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
index b455d9ef29..cab3679d65 100644
--- a/deps/v8/src/wasm/baseline/mips/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips/OWNERS
@@ -1,3 +1 @@
-arikalo@wavecomp.com
-prudic@wavecomp.com
-skovacevic@wavecomp.com
+xwafish@gmail.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4fecffb97d..530118c526 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -585,6 +585,10 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
lw(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ Addu(dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -675,6 +679,13 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
+ lhs.high_gp(), imm,
+ kScratchReg, kScratchReg2);
+}
+
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
index b455d9ef29..cab3679d65 100644
--- a/deps/v8/src/wasm/baseline/mips64/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips64/OWNERS
@@ -1,3 +1 @@
-arikalo@wavecomp.com
-prudic@wavecomp.com
-skovacevic@wavecomp.com
+xwafish@gmail.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 3a963cefd6..7bfa172def 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -500,6 +500,10 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ Addu(dst, lhs, Operand(imm));
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -590,6 +594,11 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ Daddu(dst.gp(), lhs.gp(), Operand(imm));
+}
+
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
diff --git a/deps/v8/src/wasm/baseline/ppc/OWNERS b/deps/v8/src/wasm/baseline/ppc/OWNERS
new file mode 100644
index 0000000000..85b6cb38f0
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/ppc/OWNERS
@@ -0,0 +1,4 @@
+jyan@ca.ibm.com
+joransiu@ca.ibm.com
+michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index d6c372e80f..b7b17afcfb 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -231,6 +231,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ BAILOUT("i64_add");
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ BAILOUT("i32_add");
+}
+
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
BAILOUT("i32_shr");
}
diff --git a/deps/v8/src/wasm/baseline/s390/OWNERS b/deps/v8/src/wasm/baseline/s390/OWNERS
new file mode 100644
index 0000000000..85b6cb38f0
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/s390/OWNERS
@@ -0,0 +1,4 @@
+jyan@ca.ibm.com
+joransiu@ca.ibm.com
+michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 9680d9664f..1cb8e97d89 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -231,6 +231,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ BAILOUT("i64_add");
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ BAILOUT("i32_add");
+}
+
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
BAILOUT("i32_shr");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 60924bfc1a..ccd352df7e 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -434,6 +434,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ if (lhs != dst) {
+ leal(dst, Operand(lhs, imm));
+ } else {
+ addl(dst, Immediate(imm));
+ }
+}
+
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) {
// Default path.
@@ -704,6 +712,15 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ if (lhs.gp() != dst.gp()) {
+ leaq(dst.gp(), Operand(lhs.gp(), imm));
+ } else {
+ addq(dst.gp(), Immediate(imm));
+ }
+}
+
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (dst.gp() == rhs.gp()) {
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
new file mode 100644
index 0000000000..18ab23dcce
--- /dev/null
+++ b/deps/v8/src/wasm/c-api.cc
@@ -0,0 +1,3599 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This implementation is originally from
+// https://github.com/WebAssembly/wasm-c-api/:
+
+// Copyright 2019 Andreas Rossberg
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstring>
+#include <iostream>
+
+#include "third_party/wasm-api/wasm.h"
+#include "third_party/wasm-api/wasm.hh"
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+#include "src/api-inl.h"
+#include "src/wasm/leb-helper.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-serialization.h"
+
+// BEGIN FILE wasm-bin.cc
+
+namespace wasm {
+namespace bin {
+
+////////////////////////////////////////////////////////////////////////////////
+// Encoding
+
+void encode_header(char*& ptr) {
+ std::memcpy(ptr,
+ "\x00"
+ "asm\x01\x00\x00\x00",
+ 8);
+ ptr += 8;
+}
+
+void encode_size32(char*& ptr, size_t n) {
+ assert(n <= 0xffffffff);
+ for (int i = 0; i < 5; ++i) {
+ *ptr++ = (n & 0x7f) | (i == 4 ? 0x00 : 0x80);
+ n = n >> 7;
+ }
+}
+
+void encode_valtype(char*& ptr, const ValType* type) {
+ switch (type->kind()) {
+ case I32:
+ *ptr++ = 0x7f;
+ break;
+ case I64:
+ *ptr++ = 0x7e;
+ break;
+ case F32:
+ *ptr++ = 0x7d;
+ break;
+ case F64:
+ *ptr++ = 0x7c;
+ break;
+ case FUNCREF:
+ *ptr++ = 0x70;
+ break;
+ case ANYREF:
+ *ptr++ = 0x6f;
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+auto zero_size(const ValType* type) -> size_t {
+ switch (type->kind()) {
+ case I32:
+ return 1;
+ case I64:
+ return 1;
+ case F32:
+ return 4;
+ case F64:
+ return 8;
+ case FUNCREF:
+ return 0;
+ case ANYREF:
+ return 0;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void encode_const_zero(char*& ptr, const ValType* type) {
+ switch (type->kind()) {
+ case I32:
+ *ptr++ = 0x41;
+ break;
+ case I64:
+ *ptr++ = 0x42;
+ break;
+ case F32:
+ *ptr++ = 0x43;
+ break;
+ case F64:
+ *ptr++ = 0x44;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ for (size_t i = 0; i < zero_size(type); ++i) *ptr++ = 0;
+}
+
+auto wrapper(const FuncType* type) -> vec<byte_t> {
+ auto in_arity = type->params().size();
+ auto out_arity = type->results().size();
+ auto size = 39 + in_arity + out_arity;
+ auto binary = vec<byte_t>::make_uninitialized(size);
+ auto ptr = binary.get();
+
+ encode_header(ptr);
+
+ *ptr++ = i::wasm::kTypeSectionCode;
+ encode_size32(ptr, 12 + in_arity + out_arity); // size
+ *ptr++ = 1; // length
+ *ptr++ = i::wasm::kWasmFunctionTypeCode;
+ encode_size32(ptr, in_arity);
+ for (size_t i = 0; i < in_arity; ++i) {
+ encode_valtype(ptr, type->params()[i].get());
+ }
+ encode_size32(ptr, out_arity);
+ for (size_t i = 0; i < out_arity; ++i) {
+ encode_valtype(ptr, type->results()[i].get());
+ }
+
+ *ptr++ = i::wasm::kImportSectionCode;
+ *ptr++ = 5; // size
+ *ptr++ = 1; // length
+ *ptr++ = 0; // module length
+ *ptr++ = 0; // name length
+ *ptr++ = i::wasm::kExternalFunction;
+ *ptr++ = 0; // type index
+
+ *ptr++ = i::wasm::kExportSectionCode;
+ *ptr++ = 4; // size
+ *ptr++ = 1; // length
+ *ptr++ = 0; // name length
+ *ptr++ = i::wasm::kExternalFunction;
+ *ptr++ = 0; // func index
+
+ assert(ptr - binary.get() == static_cast<ptrdiff_t>(size));
+ return binary;
+}
+
+auto wrapper(const GlobalType* type) -> vec<byte_t> {
+ auto size = 25 + zero_size(type->content());
+ auto binary = vec<byte_t>::make_uninitialized(size);
+ auto ptr = binary.get();
+
+ encode_header(ptr);
+
+ *ptr++ = i::wasm::kGlobalSectionCode;
+ encode_size32(ptr, 5 + zero_size(type->content())); // size
+ *ptr++ = 1; // length
+ encode_valtype(ptr, type->content());
+ *ptr++ = (type->mutability() == VAR);
+ encode_const_zero(ptr, type->content());
+ *ptr++ = 0x0b; // end
+
+ *ptr++ = i::wasm::kExportSectionCode;
+ *ptr++ = 4; // size
+ *ptr++ = 1; // length
+ *ptr++ = 0; // name length
+ *ptr++ = i::wasm::kExternalGlobal;
+ *ptr++ = 0; // func index
+
+ assert(ptr - binary.get() == static_cast<ptrdiff_t>(size));
+ return binary;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Decoding
+
+// Numbers
+
+auto u32(const byte_t*& pos) -> uint32_t {
+ uint32_t n = 0;
+ uint32_t shift = 0;
+ byte_t b;
+ do {
+ b = *pos++;
+ n += (b & 0x7f) << shift;
+ shift += 7;
+ } while ((b & 0x80) != 0);
+ return n;
+}
+
+auto u64(const byte_t*& pos) -> uint64_t {
+ uint64_t n = 0;
+ uint64_t shift = 0;
+ byte_t b;
+ do {
+ b = *pos++;
+ n += (b & 0x7f) << shift;
+ shift += 7;
+ } while ((b & 0x80) != 0);
+ return n;
+}
+
+void u32_skip(const byte_t*& pos) { bin::u32(pos); }
+
+// Names
+
+auto name(const byte_t*& pos) -> Name {
+ auto size = bin::u32(pos);
+ auto start = pos;
+ auto name = Name::make_uninitialized(size);
+ std::memcpy(name.get(), start, size);
+ pos += size;
+ return name;
+}
+
+void name_skip(const byte_t*& pos) {
+ auto size = bin::u32(pos);
+ pos += size;
+}
+
+// Types
+
+auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
+ switch (*pos++) {
+ case i::wasm::kLocalI32:
+ return ValType::make(I32);
+ case i::wasm::kLocalI64:
+ return ValType::make(I64);
+ case i::wasm::kLocalF32:
+ return ValType::make(F32);
+ case i::wasm::kLocalF64:
+ return ValType::make(F64);
+ case i::wasm::kLocalAnyFunc:
+ return ValType::make(FUNCREF);
+ case i::wasm::kLocalAnyRef:
+ return ValType::make(ANYREF);
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
+ return {};
+}
+
+auto mutability(const byte_t*& pos) -> Mutability {
+ return *pos++ ? VAR : CONST;
+}
+
+auto limits(const byte_t*& pos) -> Limits {
+ auto tag = *pos++;
+ auto min = bin::u32(pos);
+ if ((tag & 0x01) == 0) {
+ return Limits(min);
+ } else {
+ auto max = bin::u32(pos);
+ return Limits(min, max);
+ }
+}
+
+auto stacktype(const byte_t*& pos) -> vec<ValType*> {
+ size_t size = bin::u32(pos);
+ auto v = vec<ValType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; ++i) v[i] = bin::valtype(pos);
+ return v;
+}
+
+auto functype(const byte_t*& pos) -> own<FuncType*> {
+ assert(*pos == i::wasm::kWasmFunctionTypeCode);
+ ++pos;
+ auto params = bin::stacktype(pos);
+ auto results = bin::stacktype(pos);
+ return FuncType::make(std::move(params), std::move(results));
+}
+
+auto globaltype(const byte_t*& pos) -> own<GlobalType*> {
+ auto content = bin::valtype(pos);
+ auto mutability = bin::mutability(pos);
+ return GlobalType::make(std::move(content), mutability);
+}
+
+auto tabletype(const byte_t*& pos) -> own<TableType*> {
+ auto elem = bin::valtype(pos);
+ auto limits = bin::limits(pos);
+ return TableType::make(std::move(elem), limits);
+}
+
+auto memorytype(const byte_t*& pos) -> own<MemoryType*> {
+ auto limits = bin::limits(pos);
+ return MemoryType::make(limits);
+}
+
+// Expressions
+
+void expr_skip(const byte_t*& pos) {
+ switch (*pos++) {
+ case i::wasm::kExprI32Const:
+ case i::wasm::kExprI64Const:
+ case i::wasm::kExprGetGlobal: {
+ bin::u32_skip(pos);
+ } break;
+ case i::wasm::kExprF32Const: {
+ pos += 4;
+ } break;
+ case i::wasm::kExprF64Const: {
+ pos += 8;
+ } break;
+ default: {
+ // TODO(wasm+): support new expression forms
+ UNREACHABLE();
+ }
+ }
+ ++pos; // end
+}
+
+// Sections
+
+auto section(const vec<byte_t>& binary, i::wasm::SectionCode sec)
+ -> const byte_t* {
+ const byte_t* end = binary.get() + binary.size();
+ const byte_t* pos = binary.get() + 8; // skip header
+ while (pos < end && *pos++ != sec) {
+ auto size = bin::u32(pos);
+ pos += size;
+ }
+ if (pos == end) return nullptr;
+ bin::u32_skip(pos);
+ return pos;
+}
+
+// Only for asserts/DCHECKs.
+auto section_end(const vec<byte_t>& binary, i::wasm::SectionCode sec)
+ -> const byte_t* {
+ const byte_t* end = binary.get() + binary.size();
+ const byte_t* pos = binary.get() + 8; // skip header
+ while (pos < end && *pos != sec) {
+ ++pos;
+ auto size = bin::u32(pos);
+ pos += size;
+ }
+ if (pos == end) return nullptr;
+ ++pos;
+ auto size = bin::u32(pos);
+ return pos + size;
+}
+
+// Type section
+
+auto types(const vec<byte_t>& binary) -> vec<FuncType*> {
+ auto pos = bin::section(binary, i::wasm::kTypeSectionCode);
+ if (pos == nullptr) return vec<FuncType*>::make();
+ size_t size = bin::u32(pos);
+ // TODO(wasm+): support new deftypes
+ auto v = vec<FuncType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ v[i] = bin::functype(pos);
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kTypeSectionCode));
+ return v;
+}
+
+// Import section
+
+auto imports(const vec<byte_t>& binary, const vec<FuncType*>& types)
+ -> vec<ImportType*> {
+ auto pos = bin::section(binary, i::wasm::kImportSectionCode);
+ if (pos == nullptr) return vec<ImportType*>::make();
+ size_t size = bin::u32(pos);
+ auto v = vec<ImportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ auto module = bin::name(pos);
+ auto name = bin::name(pos);
+ own<ExternType*> type;
+ switch (*pos++) {
+ case i::wasm::kExternalFunction:
+ type = types[bin::u32(pos)]->copy();
+ break;
+ case i::wasm::kExternalTable:
+ type = bin::tabletype(pos);
+ break;
+ case i::wasm::kExternalMemory:
+ type = bin::memorytype(pos);
+ break;
+ case i::wasm::kExternalGlobal:
+ type = bin::globaltype(pos);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ v[i] =
+ ImportType::make(std::move(module), std::move(name), std::move(type));
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kImportSectionCode));
+ return v;
+}
+
+auto count(const vec<ImportType*>& imports, ExternKind kind) -> uint32_t {
+ uint32_t n = 0;
+ for (uint32_t i = 0; i < imports.size(); ++i) {
+ if (imports[i]->type()->kind() == kind) ++n;
+ }
+ return n;
+}
+
+// Function section
+
+auto funcs(const vec<byte_t>& binary, const vec<ImportType*>& imports,
+ const vec<FuncType*>& types) -> vec<FuncType*> {
+ auto pos = bin::section(binary, i::wasm::kFunctionSectionCode);
+ size_t size = pos != nullptr ? bin::u32(pos) : 0;
+ auto v =
+ vec<FuncType*>::make_uninitialized(size + count(imports, EXTERN_FUNC));
+ size_t j = 0;
+ for (uint32_t i = 0; i < imports.size(); ++i) {
+ auto et = imports[i]->type();
+ if (et->kind() == EXTERN_FUNC) {
+ v[j++] = et->func()->copy();
+ }
+ }
+ if (pos != nullptr) {
+ for (; j < v.size(); ++j) {
+ v[j] = types[bin::u32(pos)]->copy();
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kFunctionSectionCode));
+ }
+ return v;
+}
+
+// Global section
+
+auto globals(const vec<byte_t>& binary, const vec<ImportType*>& imports)
+ -> vec<GlobalType*> {
+ auto pos = bin::section(binary, i::wasm::kGlobalSectionCode);
+ size_t size = pos != nullptr ? bin::u32(pos) : 0;
+ auto v = vec<GlobalType*>::make_uninitialized(size +
+ count(imports, EXTERN_GLOBAL));
+ size_t j = 0;
+ for (uint32_t i = 0; i < imports.size(); ++i) {
+ auto et = imports[i]->type();
+ if (et->kind() == EXTERN_GLOBAL) {
+ v[j++] = et->global()->copy();
+ }
+ }
+ if (pos != nullptr) {
+ for (; j < v.size(); ++j) {
+ v[j] = bin::globaltype(pos);
+ expr_skip(pos);
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kGlobalSectionCode));
+ }
+ return v;
+}
+
+// Table section
+
+auto tables(const vec<byte_t>& binary, const vec<ImportType*>& imports)
+ -> vec<TableType*> {
+ auto pos = bin::section(binary, i::wasm::kTableSectionCode);
+ size_t size = pos != nullptr ? bin::u32(pos) : 0;
+ auto v =
+ vec<TableType*>::make_uninitialized(size + count(imports, EXTERN_TABLE));
+ size_t j = 0;
+ for (uint32_t i = 0; i < imports.size(); ++i) {
+ auto et = imports[i]->type();
+ if (et->kind() == EXTERN_TABLE) {
+ v[j++] = et->table()->copy();
+ }
+ }
+ if (pos != nullptr) {
+ for (; j < v.size(); ++j) {
+ v[j] = bin::tabletype(pos);
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kTableSectionCode));
+ }
+ return v;
+}
+
+// Memory section
+
+auto memories(const vec<byte_t>& binary, const vec<ImportType*>& imports)
+ -> vec<MemoryType*> {
+ auto pos = bin::section(binary, i::wasm::kMemorySectionCode);
+ size_t size = pos != nullptr ? bin::u32(pos) : 0;
+ auto v = vec<MemoryType*>::make_uninitialized(size +
+ count(imports, EXTERN_MEMORY));
+ size_t j = 0;
+ for (uint32_t i = 0; i < imports.size(); ++i) {
+ auto et = imports[i]->type();
+ if (et->kind() == EXTERN_MEMORY) {
+ v[j++] = et->memory()->copy();
+ }
+ }
+ if (pos != nullptr) {
+ for (; j < v.size(); ++j) {
+ v[j] = bin::memorytype(pos);
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kMemorySectionCode));
+ }
+ return v;
+}
+
+// Export section
+
+auto exports(const vec<byte_t>& binary, const vec<FuncType*>& funcs,
+ const vec<GlobalType*>& globals, const vec<TableType*>& tables,
+ const vec<MemoryType*>& memories) -> vec<ExportType*> {
+ auto pos = bin::section(binary, i::wasm::kExportSectionCode);
+ if (pos == nullptr) return vec<ExportType*>::make();
+ size_t size = bin::u32(pos);
+ auto exports = vec<ExportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ auto name = bin::name(pos);
+ auto tag = *pos++;
+ auto index = bin::u32(pos);
+ own<ExternType*> type;
+ switch (tag) {
+ case i::wasm::kExternalFunction:
+ type = funcs[index]->copy();
+ break;
+ case i::wasm::kExternalTable:
+ type = tables[index]->copy();
+ break;
+ case i::wasm::kExternalMemory:
+ type = memories[index]->copy();
+ break;
+ case i::wasm::kExternalGlobal:
+ type = globals[index]->copy();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ exports[i] = ExportType::make(std::move(name), std::move(type));
+ }
+ assert(pos == bin::section_end(binary, i::wasm::kExportSectionCode));
+ return exports;
+}
+
+auto imports(const vec<byte_t>& binary) -> vec<ImportType*> {
+ return bin::imports(binary, bin::types(binary));
+}
+
+auto exports(const vec<byte_t>& binary) -> vec<ExportType*> {
+ auto types = bin::types(binary);
+ auto imports = bin::imports(binary, types);
+ auto funcs = bin::funcs(binary, imports, types);
+ auto globals = bin::globals(binary, imports);
+ auto tables = bin::tables(binary, imports);
+ auto memories = bin::memories(binary, imports);
+ return bin::exports(binary, funcs, globals, tables, memories);
+}
+
+} // namespace bin
+} // namespace wasm
+
+// BEGIN FILE wasm-v8-lowlevel.cc
+
+namespace v8 {
+namespace wasm {
+
+// Objects
+
+auto object_isolate(const v8::Persistent<v8::Object>& obj) -> v8::Isolate* {
+ struct FakePersistent {
+ v8::Object* val;
+ };
+ auto v8_obj = reinterpret_cast<const FakePersistent*>(&obj)->val;
+ return v8_obj->GetIsolate();
+}
+
+template <class T>
+auto object_handle(T v8_obj) -> i::Handle<T> {
+ return handle(v8_obj, v8_obj->GetIsolate());
+}
+
+// Foreign pointers
+
+auto foreign_new(v8::Isolate* isolate, void* ptr) -> v8::Local<v8::Value> {
+ auto foreign = v8::FromCData(reinterpret_cast<i::Isolate*>(isolate),
+ reinterpret_cast<i::Address>(ptr));
+ return v8::Utils::ToLocal(foreign);
+}
+
+auto foreign_get(v8::Local<v8::Value> val) -> void* {
+ auto foreign = v8::Utils::OpenHandle(*val);
+ if (!foreign->IsForeign()) return nullptr;
+ auto addr = v8::ToCData<i::Address>(*foreign);
+ return reinterpret_cast<void*>(addr);
+}
+
+// Types
+
+auto v8_valtype_to_wasm(i::wasm::ValueType v8_valtype) -> ::wasm::ValKind {
+ switch (v8_valtype) {
+ case i::wasm::kWasmI32:
+ return ::wasm::I32;
+ case i::wasm::kWasmI64:
+ return ::wasm::I64;
+ case i::wasm::kWasmF32:
+ return ::wasm::F32;
+ case i::wasm::kWasmF64:
+ return ::wasm::F64;
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
+}
+
+auto func_type_param_arity(v8::Local<v8::Object> function) -> uint32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
+ auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
+ i::wasm::FunctionSig* sig = v8_function->instance()
+ ->module()
+ ->functions[v8_function->function_index()]
+ .sig;
+ return static_cast<uint32_t>(sig->parameter_count());
+}
+
+auto func_type_result_arity(v8::Local<v8::Object> function) -> uint32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
+ auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
+ i::wasm::FunctionSig* sig = v8_function->instance()
+ ->module()
+ ->functions[v8_function->function_index()]
+ .sig;
+ return static_cast<uint32_t>(sig->return_count());
+}
+
+auto func_type_param(v8::Local<v8::Object> function, size_t i)
+ -> ::wasm::ValKind {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
+ auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
+ i::wasm::FunctionSig* sig = v8_function->instance()
+ ->module()
+ ->functions[v8_function->function_index()]
+ .sig;
+ return v8_valtype_to_wasm(sig->GetParam(i));
+}
+
+auto func_type_result(v8::Local<v8::Object> function, size_t i)
+ -> ::wasm::ValKind {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
+ auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
+ i::wasm::FunctionSig* sig = v8_function->instance()
+ ->module()
+ ->functions[v8_function->function_index()]
+ .sig;
+ return v8_valtype_to_wasm(sig->GetReturn(i));
+}
+
+auto global_type_content(v8::Local<v8::Object> global) -> ::wasm::ValKind {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ return v8_valtype_to_wasm(v8_global->type());
+}
+
+auto global_type_mutable(v8::Local<v8::Object> global) -> bool {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ return v8_global->is_mutable();
+}
+
+auto table_type_min(v8::Local<v8::Object> table) -> uint32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
+ auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
+ return v8_table->current_length();
+}
+
+auto table_type_max(v8::Local<v8::Object> table) -> uint32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
+ auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
+ auto v8_max_obj = v8_table->maximum_length();
+ uint32_t max;
+ return v8_max_obj->ToUint32(&max) ? max : 0xffffffffu;
+}
+
+auto memory_size(v8::Local<v8::Object> memory) -> uint32_t;
+
+auto memory_type_min(v8::Local<v8::Object> memory) -> uint32_t {
+ return memory_size(memory);
+}
+
+auto memory_type_max(v8::Local<v8::Object> memory) -> uint32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
+ auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
+ return v8_memory->has_maximum_pages() ? v8_memory->maximum_pages()
+ : 0xffffffffu;
+}
+
+// Modules
+
+auto module_binary_size(v8::Local<v8::Object> module) -> size_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
+ auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
+ return v8_module->native_module()->wire_bytes().size();
+}
+
+auto module_binary(v8::Local<v8::Object> module) -> const char* {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
+ auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
+ return reinterpret_cast<const char*>(
+ v8_module->native_module()->wire_bytes().start());
+}
+
+auto module_serialize_size(v8::Local<v8::Object> module) -> size_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
+ auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
+ i::wasm::WasmSerializer serializer(v8_module->native_module());
+ return serializer.GetSerializedNativeModuleSize();
+}
+
+auto module_serialize(v8::Local<v8::Object> module, char* buffer, size_t size)
+ -> bool {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
+ auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
+ i::wasm::WasmSerializer serializer(v8_module->native_module());
+ return serializer.SerializeNativeModule(
+ {reinterpret_cast<uint8_t*>(buffer), size});
+}
+
+auto module_deserialize(v8::Isolate* isolate, const char* binary,
+ size_t binary_size, const char* buffer,
+ size_t buffer_size) -> v8::MaybeLocal<v8::Object> {
+ auto v8_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ auto maybe_v8_module = i::wasm::DeserializeNativeModule(
+ v8_isolate, {reinterpret_cast<const uint8_t*>(buffer), buffer_size},
+ {reinterpret_cast<const uint8_t*>(binary), binary_size});
+ if (maybe_v8_module.is_null()) return v8::MaybeLocal<v8::Object>();
+ auto v8_module =
+ i::Handle<i::JSObject>::cast(maybe_v8_module.ToHandleChecked());
+ return v8::MaybeLocal<v8::Object>(v8::Utils::ToLocal(v8_module));
+}
+
+// Instances
+
+auto instance_module(v8::Local<v8::Object> instance) -> v8::Local<v8::Object> {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(instance);
+ auto v8_instance = i::Handle<i::WasmInstanceObject>::cast(v8_object);
+ auto v8_module =
+ object_handle(i::JSObject::cast(v8_instance->module_object()));
+ return v8::Utils::ToLocal(v8_module);
+}
+
+auto instance_exports(v8::Local<v8::Object> instance) -> v8::Local<v8::Object> {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(instance);
+ auto v8_instance = i::Handle<i::WasmInstanceObject>::cast(v8_object);
+ auto v8_exports = object_handle(v8_instance->exports_object());
+ return v8::Utils::ToLocal(v8_exports);
+}
+
+// Externals
+
+auto extern_kind(v8::Local<v8::Object> external) -> ::wasm::ExternKind {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(external);
+
+ if (i::WasmExportedFunction::IsWasmExportedFunction(*v8_object))
+ return ::wasm::EXTERN_FUNC;
+ if (v8_object->IsWasmGlobalObject()) return ::wasm::EXTERN_GLOBAL;
+ if (v8_object->IsWasmTableObject()) return ::wasm::EXTERN_TABLE;
+ if (v8_object->IsWasmMemoryObject()) return ::wasm::EXTERN_MEMORY;
+ UNREACHABLE();
+}
+
+// Functions
+
+auto func_instance(v8::Local<v8::Function> function) -> v8::Local<v8::Object> {
+ auto v8_function = v8::Utils::OpenHandle(*function);
+ auto v8_func = i::Handle<i::WasmExportedFunction>::cast(v8_function);
+ auto v8_instance = object_handle(i::JSObject::cast(v8_func->instance()));
+ return v8::Utils::ToLocal(v8_instance);
+}
+
+// Globals
+
+auto global_get_i32(v8::Local<v8::Object> global) -> int32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ return v8_global->GetI32();
+}
+auto global_get_i64(v8::Local<v8::Object> global) -> int64_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ return v8_global->GetI64();
+}
+auto global_get_f32(v8::Local<v8::Object> global) -> float {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ return v8_global->GetF32();
+}
+auto global_get_f64(v8::Local<v8::Object> global) -> double {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ return v8_global->GetF64();
+}
+
+void global_set_i32(v8::Local<v8::Object> global, int32_t val) {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ v8_global->SetI32(val);
+}
+void global_set_i64(v8::Local<v8::Object> global, int64_t val) {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ v8_global->SetI64(val);
+}
+void global_set_f32(v8::Local<v8::Object> global, float val) {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ v8_global->SetF32(val);
+}
+void global_set_f64(v8::Local<v8::Object> global, double val) {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
+ auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
+ v8_global->SetF32(val);
+}
+
+// Tables
+
+auto table_get(v8::Local<v8::Object> table, size_t index)
+ -> v8::MaybeLocal<v8::Function> {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
+ auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
+ if (index > std::numeric_limits<int>::max()) return {};
+ i::Isolate* isolate = v8_table->GetIsolate();
+ i::MaybeHandle<i::Object> maybe_result =
+ i::WasmTableObject::Get(isolate, v8_table, static_cast<int>(index));
+ i::Handle<i::Object> result;
+ if (!maybe_result.ToHandle(&result)) {
+ // TODO(jkummerow): Clear pending exception?
+ return {};
+ }
+ if (!result->IsJSFunction()) return {};
+ return v8::MaybeLocal<v8::Function>(
+ v8::Utils::ToLocal(i::Handle<i::JSFunction>::cast(result)));
+}
+
+auto table_set(v8::Local<v8::Object> table, size_t index,
+ v8::MaybeLocal<v8::Function> maybe) -> bool {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
+ auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
+ i::Handle<i::Object> v8_function =
+ maybe.IsEmpty()
+ ? i::Handle<i::Object>::cast(
+ i::ReadOnlyRoots(v8_table->GetIsolate()).null_value_handle())
+ : i::Handle<i::Object>::cast(
+ v8::Utils::OpenHandle<v8::Function, i::JSReceiver>(
+ maybe.ToLocalChecked()));
+ if (index >= v8_table->current_length()) return false;
+
+ {
+ v8::TryCatch handler(table->GetIsolate());
+ i::WasmTableObject::Set(v8_table->GetIsolate(), v8_table,
+ static_cast<uint32_t>(index), v8_function);
+ if (handler.HasCaught()) return false;
+ }
+
+ return true;
+}
+
+auto table_size(v8::Local<v8::Object> table) -> size_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
+ auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
+ return v8_table->current_length();
+}
+
+auto table_grow(v8::Local<v8::Object> table, size_t delta,
+ v8::MaybeLocal<v8::Function> init) -> bool {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
+ auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
+ if (delta > 0xfffffffflu) return false;
+ auto old_size = v8_table->current_length();
+ // TODO(jkummerow): Overflow check.
+ auto new_size = old_size + static_cast<uint32_t>(delta);
+ // TODO(v8): This should happen in WasmTableObject::Grow.
+ if (new_size > table_type_max(table)) return false;
+
+ {
+ v8::TryCatch handler(table->GetIsolate());
+ v8_table->Grow(v8_table->GetIsolate(), static_cast<uint32_t>(delta));
+ if (handler.HasCaught()) return false;
+ }
+
+ // TODO(v8): This should happen in WasmTableObject::Grow.
+ if (new_size != old_size) {
+ auto isolate = v8_table->GetIsolate();
+ i::Handle<i::FixedArray> old_array(v8_table->elements(), isolate);
+ auto new_array =
+ isolate->factory()->NewFixedArray(static_cast<int>(new_size));
+ assert(static_cast<uint32_t>(old_array->length()) == old_size);
+ for (int i = 0; i < static_cast<int>(old_size); ++i)
+ new_array->set(i, old_array->get(i));
+ i::Handle<i::Object> val = isolate->factory()->null_value();
+ if (!init.IsEmpty())
+ val = v8::Utils::OpenHandle<v8::Function, i::JSReceiver>(
+ init.ToLocalChecked());
+ for (int i = old_size; i < static_cast<int>(new_size); ++i)
+ new_array->set(i, *val);
+ v8_table->set_elements(*new_array);
+ }
+
+ return true;
+}
+
+// Memory
+
+auto memory_data(v8::Local<v8::Object> memory) -> char* {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
+ auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
+ return reinterpret_cast<char*>(v8_memory->array_buffer()->backing_store());
+}
+
+auto memory_data_size(v8::Local<v8::Object> memory) -> size_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
+ auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
+ return v8_memory->array_buffer()->byte_length();
+}
+
+auto memory_size(v8::Local<v8::Object> memory) -> uint32_t {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
+ auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
+ return static_cast<uint32_t>(v8_memory->array_buffer()->byte_length() /
+ i::wasm::kWasmPageSize);
+}
+
+auto memory_grow(v8::Local<v8::Object> memory, uint32_t delta) -> bool {
+ auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
+ auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
+ auto old =
+ i::WasmMemoryObject::Grow(v8_memory->GetIsolate(), v8_memory, delta);
+ return old != -1;
+}
+
+} // namespace wasm
+} // namespace v8
+
+/// BEGIN FILE wasm-v8.cc
+
+namespace wasm {
+
+///////////////////////////////////////////////////////////////////////////////
+// Auxiliaries
+
+[[noreturn]] void WASM_UNIMPLEMENTED(const char* s) {
+ std::cerr << "Wasm API: " << s << " not supported yet!\n";
+ exit(1);
+}
+
+template <class T>
+void ignore(T) {}
+
+template <class C>
+struct implement;
+
+template <class C>
+auto impl(C* x) -> typename implement<C>::type* {
+ return reinterpret_cast<typename implement<C>::type*>(x);
+}
+
+template <class C>
+auto impl(const C* x) -> const typename implement<C>::type* {
+ return reinterpret_cast<const typename implement<C>::type*>(x);
+}
+
+template <class C>
+auto seal(typename implement<C>::type* x) -> C* {
+ return reinterpret_cast<C*>(x);
+}
+
+template <class C>
+auto seal(const typename implement<C>::type* x) -> const C* {
+ return reinterpret_cast<const C*>(x);
+}
+
+#ifdef DEBUG
+template <class T>
+void vec<T>::make_data() {}
+
+template <class T>
+void vec<T>::free_data() {}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Environment
+
+// Configuration
+
+struct ConfigImpl {
+ ConfigImpl() {}
+ ~ConfigImpl() {}
+};
+
+template <>
+struct implement<Config> {
+ using type = ConfigImpl;
+};
+
+Config::~Config() { impl(this)->~ConfigImpl(); }
+
+void Config::operator delete(void* p) { ::operator delete(p); }
+
+auto Config::make() -> own<Config*> {
+ return own<Config*>(seal<Config>(new (std::nothrow) ConfigImpl()));
+}
+
+// Engine
+
+struct EngineImpl {
+ static bool created;
+
+ std::unique_ptr<v8::Platform> platform;
+
+ EngineImpl() {
+ assert(!created);
+ created = true;
+ }
+
+ ~EngineImpl() {
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
+ }
+};
+
+bool EngineImpl::created = false;
+
+template <>
+struct implement<Engine> {
+ using type = EngineImpl;
+};
+
+Engine::~Engine() { impl(this)->~EngineImpl(); }
+
+void Engine::operator delete(void* p) { ::operator delete(p); }
+
+auto Engine::make(own<Config*>&& config) -> own<Engine*> {
+ i::FLAG_expose_gc = true;
+ i::FLAG_experimental_wasm_bigint = true;
+ i::FLAG_experimental_wasm_mv = true;
+ auto engine = new (std::nothrow) EngineImpl;
+ if (!engine) return own<Engine*>();
+ engine->platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(engine->platform.get());
+ v8::V8::Initialize();
+ return make_own(seal<Engine>(engine));
+}
+
+// Stores
+
+enum v8_string_t {
+ V8_S_EMPTY,
+ V8_S_I32,
+ V8_S_I64,
+ V8_S_F32,
+ V8_S_F64,
+ V8_S_ANYREF,
+ V8_S_ANYFUNC,
+ V8_S_VALUE,
+ V8_S_MUTABLE,
+ V8_S_ELEMENT,
+ V8_S_MINIMUM,
+ V8_S_MAXIMUM,
+ V8_S_COUNT
+};
+
+enum v8_symbol_t { V8_Y_CALLBACK, V8_Y_ENV, V8_Y_COUNT };
+
+enum v8_function_t {
+ V8_F_WEAKMAP,
+ V8_F_WEAKMAP_PROTO,
+ V8_F_WEAKMAP_GET,
+ V8_F_WEAKMAP_SET,
+ V8_F_MODULE,
+ V8_F_GLOBAL,
+ V8_F_TABLE,
+ V8_F_MEMORY,
+ V8_F_INSTANCE,
+ V8_F_VALIDATE,
+ V8_F_COUNT,
+};
+
+class StoreImpl {
+ friend own<Store*> Store::make(Engine*);
+
+ v8::Isolate::CreateParams create_params_;
+ v8::Isolate* isolate_;
+ v8::Eternal<v8::Context> context_;
+ v8::Eternal<v8::String> strings_[V8_S_COUNT];
+ v8::Eternal<v8::Symbol> symbols_[V8_Y_COUNT];
+ v8::Eternal<v8::Function> functions_[V8_F_COUNT];
+ v8::Eternal<v8::Object> host_data_map_;
+ v8::Eternal<v8::Symbol> callback_symbol_;
+
+ public:
+ StoreImpl() {}
+
+ ~StoreImpl() {
+#ifdef DEBUG
+ reinterpret_cast<i::Isolate*>(isolate_)->heap()->PreciseCollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
+ v8::kGCCallbackFlagForced);
+#endif
+ context()->Exit();
+ isolate_->Exit();
+ isolate_->Dispose();
+ delete create_params_.array_buffer_allocator;
+ }
+
+ auto isolate() const -> v8::Isolate* { return isolate_; }
+
+ auto context() const -> v8::Local<v8::Context> {
+ return context_.Get(isolate_);
+ }
+
+ auto v8_string(v8_string_t i) const -> v8::Local<v8::String> {
+ return strings_[i].Get(isolate_);
+ }
+ auto v8_string(v8_symbol_t i) const -> v8::Local<v8::Symbol> {
+ return symbols_[i].Get(isolate_);
+ }
+ auto v8_function(v8_function_t i) const -> v8::Local<v8::Function> {
+ return functions_[i].Get(isolate_);
+ }
+
+ auto host_data_map() const -> v8::Local<v8::Object> {
+ return host_data_map_.Get(isolate_);
+ }
+
+ static auto get(v8::Isolate* isolate) -> StoreImpl* {
+ return static_cast<StoreImpl*>(isolate->GetData(0));
+ }
+};
+
+template <>
+struct implement<Store> {
+ using type = StoreImpl;
+};
+
+Store::~Store() { impl(this)->~StoreImpl(); }
+
+void Store::operator delete(void* p) { ::operator delete(p); }
+
+auto Store::make(Engine*) -> own<Store*> {
+ auto store = make_own(new (std::nothrow) StoreImpl());
+ if (!store) return own<Store*>();
+
+ // Create isolate.
+ store->create_params_.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
+ auto isolate = v8::Isolate::New(store->create_params_);
+ if (!isolate) return own<Store*>();
+
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+
+ // Create context.
+ auto context = v8::Context::New(isolate);
+ if (context.IsEmpty()) return own<Store*>();
+ v8::Context::Scope context_scope(context);
+
+ store->isolate_ = isolate;
+ store->context_ = v8::Eternal<v8::Context>(isolate, context);
+
+ // Create strings.
+ static const char* const raw_strings[V8_S_COUNT] = {
+ "", "i32", "i64", "f32", "f64", "anyref",
+ "anyfunc", "value", "mutable", "element", "initial", "maximum",
+ };
+ for (int i = 0; i < V8_S_COUNT; ++i) {
+ auto maybe = v8::String::NewFromUtf8(isolate, raw_strings[i],
+ v8::NewStringType::kNormal);
+ if (maybe.IsEmpty()) return own<Store*>();
+ auto string = maybe.ToLocalChecked();
+ store->strings_[i] = v8::Eternal<v8::String>(isolate, string);
+ }
+
+ for (int i = 0; i < V8_Y_COUNT; ++i) {
+ auto symbol = v8::Symbol::New(isolate);
+ store->symbols_[i] = v8::Eternal<v8::Symbol>(isolate, symbol);
+ }
+
+ // Extract functions.
+ auto global = context->Global();
+ auto maybe_wasm_name = v8::String::NewFromUtf8(isolate, "WebAssembly",
+ v8::NewStringType::kNormal);
+ if (maybe_wasm_name.IsEmpty()) return own<Store*>();
+ auto wasm_name = maybe_wasm_name.ToLocalChecked();
+ auto maybe_wasm = global->Get(context, wasm_name);
+ if (maybe_wasm.IsEmpty()) return own<Store*>();
+ auto wasm = v8::Local<v8::Object>::Cast(maybe_wasm.ToLocalChecked());
+ v8::Local<v8::Object> weakmap;
+ v8::Local<v8::Object> weakmap_proto;
+
+ struct {
+ const char* name;
+ v8::Local<v8::Object>* carrier;
+ } raw_functions[V8_F_COUNT] = {
+ {"WeakMap", &global}, {"prototype", &weakmap},
+ {"get", &weakmap_proto}, {"set", &weakmap_proto},
+ {"Module", &wasm}, {"Global", &wasm},
+ {"Table", &wasm}, {"Memory", &wasm},
+ {"Instance", &wasm}, {"validate", &wasm},
+ };
+ for (int i = 0; i < V8_F_COUNT; ++i) {
+ auto maybe_name = v8::String::NewFromUtf8(isolate, raw_functions[i].name,
+ v8::NewStringType::kNormal);
+ if (maybe_name.IsEmpty()) return own<Store*>();
+ auto name = maybe_name.ToLocalChecked();
+ assert(!raw_functions[i].carrier->IsEmpty());
+ // TODO(wasm+): remove
+ if ((*raw_functions[i].carrier)->IsUndefined()) continue;
+ auto maybe_obj = (*raw_functions[i].carrier)->Get(context, name);
+ if (maybe_obj.IsEmpty()) return own<Store*>();
+ auto obj = v8::Local<v8::Object>::Cast(maybe_obj.ToLocalChecked());
+ if (i == V8_F_WEAKMAP_PROTO) {
+ assert(obj->IsObject());
+ weakmap_proto = obj;
+ } else {
+ assert(obj->IsFunction());
+ auto function = v8::Local<v8::Function>::Cast(obj);
+ store->functions_[i] = v8::Eternal<v8::Function>(isolate, function);
+ if (i == V8_F_WEAKMAP) weakmap = function;
+ }
+ }
+
+ // Create host data weak map.
+ v8::Local<v8::Value>* empty_args = nullptr;
+ auto maybe_weakmap =
+ store->v8_function(V8_F_WEAKMAP)->NewInstance(context, 0, empty_args);
+ if (maybe_weakmap.IsEmpty()) return own<Store*>();
+ auto map = v8::Local<v8::Object>::Cast(maybe_weakmap.ToLocalChecked());
+ assert(map->IsWeakMap());
+ store->host_data_map_ = v8::Eternal<v8::Object>(isolate, map);
+ }
+
+ store->isolate()->Enter();
+ store->context()->Enter();
+ isolate->SetData(0, store.get());
+
+ return make_own(seal<Store>(store.release()));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Type Representations
+
+// Value Types
+
+struct ValTypeImpl {
+ ValKind kind;
+
+ explicit ValTypeImpl(ValKind kind) : kind(kind) {}
+};
+
+template <>
+struct implement<ValType> {
+ using type = ValTypeImpl;
+};
+
+ValTypeImpl* valtypes[] = {
+ new ValTypeImpl(I32), new ValTypeImpl(I64), new ValTypeImpl(F32),
+ new ValTypeImpl(F64), new ValTypeImpl(ANYREF), new ValTypeImpl(FUNCREF),
+};
+
+ValType::~ValType() {}
+
+void ValType::operator delete(void*) {}
+
+auto ValType::make(ValKind k) -> own<ValType*> {
+ auto result = seal<ValType>(valtypes[k]);
+ return own<ValType*>(result);
+}
+
+auto ValType::copy() const -> own<ValType*> { return make(kind()); }
+
+auto ValType::kind() const -> ValKind { return impl(this)->kind; }
+
+// Extern Types
+
+struct ExternTypeImpl {
+ ExternKind kind;
+
+ explicit ExternTypeImpl(ExternKind kind) : kind(kind) {}
+ virtual ~ExternTypeImpl() {}
+};
+
+template <>
+struct implement<ExternType> {
+ using type = ExternTypeImpl;
+};
+
+ExternType::~ExternType() { impl(this)->~ExternTypeImpl(); }
+
+void ExternType::operator delete(void* p) { ::operator delete(p); }
+
+auto ExternType::copy() const -> own<ExternType*> {
+ switch (kind()) {
+ case EXTERN_FUNC:
+ return func()->copy();
+ case EXTERN_GLOBAL:
+ return global()->copy();
+ case EXTERN_TABLE:
+ return table()->copy();
+ case EXTERN_MEMORY:
+ return memory()->copy();
+ }
+}
+
+auto ExternType::kind() const -> ExternKind { return impl(this)->kind; }
+
+// Function Types
+
+struct FuncTypeImpl : ExternTypeImpl {
+ vec<ValType*> params;
+ vec<ValType*> results;
+
+ FuncTypeImpl(vec<ValType*>& params, vec<ValType*>& results)
+ : ExternTypeImpl(EXTERN_FUNC),
+ params(std::move(params)),
+ results(std::move(results)) {}
+
+ ~FuncTypeImpl() {}
+};
+
+template <>
+struct implement<FuncType> {
+ using type = FuncTypeImpl;
+};
+
+FuncType::~FuncType() {}
+
+auto FuncType::make(vec<ValType*>&& params, vec<ValType*>&& results)
+ -> own<FuncType*> {
+ return params && results
+ ? own<FuncType*>(seal<FuncType>(new (std::nothrow)
+ FuncTypeImpl(params, results)))
+ : own<FuncType*>();
+}
+
+auto FuncType::copy() const -> own<FuncType*> {
+ return make(params().copy(), results().copy());
+}
+
+auto FuncType::params() const -> const vec<ValType*>& {
+ return impl(this)->params;
+}
+
+auto FuncType::results() const -> const vec<ValType*>& {
+ return impl(this)->results;
+}
+
+auto ExternType::func() -> FuncType* {
+ return kind() == EXTERN_FUNC
+ ? seal<FuncType>(static_cast<FuncTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+auto ExternType::func() const -> const FuncType* {
+ return kind() == EXTERN_FUNC
+ ? seal<FuncType>(static_cast<const FuncTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+// Global Types
+
+struct GlobalTypeImpl : ExternTypeImpl {
+ own<ValType*> content;
+ Mutability mutability;
+
+ GlobalTypeImpl(own<ValType*>& content, Mutability mutability)
+ : ExternTypeImpl(EXTERN_GLOBAL),
+ content(std::move(content)),
+ mutability(mutability) {}
+
+ ~GlobalTypeImpl() {}
+};
+
+template <>
+struct implement<GlobalType> {
+ using type = GlobalTypeImpl;
+};
+
+GlobalType::~GlobalType() {}
+
+auto GlobalType::make(own<ValType*>&& content, Mutability mutability)
+ -> own<GlobalType*> {
+ return content ? own<GlobalType*>(seal<GlobalType>(
+ new (std::nothrow) GlobalTypeImpl(content, mutability)))
+ : own<GlobalType*>();
+}
+
+auto GlobalType::copy() const -> own<GlobalType*> {
+ return make(content()->copy(), mutability());
+}
+
+auto GlobalType::content() const -> const ValType* {
+ return impl(this)->content.get();
+}
+
+auto GlobalType::mutability() const -> Mutability {
+ return impl(this)->mutability;
+}
+
+auto ExternType::global() -> GlobalType* {
+ return kind() == EXTERN_GLOBAL
+ ? seal<GlobalType>(static_cast<GlobalTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+auto ExternType::global() const -> const GlobalType* {
+ return kind() == EXTERN_GLOBAL
+ ? seal<GlobalType>(static_cast<const GlobalTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+// Table Types
+
+struct TableTypeImpl : ExternTypeImpl {
+ own<ValType*> element;
+ Limits limits;
+
+ TableTypeImpl(own<ValType*>& element, Limits limits)
+ : ExternTypeImpl(EXTERN_TABLE),
+ element(std::move(element)),
+ limits(limits) {}
+
+ ~TableTypeImpl() {}
+};
+
+template <>
+struct implement<TableType> {
+ using type = TableTypeImpl;
+};
+
+TableType::~TableType() {}
+
+auto TableType::make(own<ValType*>&& element, Limits limits)
+ -> own<TableType*> {
+ return element ? own<TableType*>(seal<TableType>(
+ new (std::nothrow) TableTypeImpl(element, limits)))
+ : own<TableType*>();
+}
+
+auto TableType::copy() const -> own<TableType*> {
+ return make(element()->copy(), limits());
+}
+
+auto TableType::element() const -> const ValType* {
+ return impl(this)->element.get();
+}
+
+auto TableType::limits() const -> const Limits& { return impl(this)->limits; }
+
+auto ExternType::table() -> TableType* {
+ return kind() == EXTERN_TABLE
+ ? seal<TableType>(static_cast<TableTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+auto ExternType::table() const -> const TableType* {
+ return kind() == EXTERN_TABLE
+ ? seal<TableType>(static_cast<const TableTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+// Memory Types
+
+struct MemoryTypeImpl : ExternTypeImpl {
+ Limits limits;
+
+ explicit MemoryTypeImpl(Limits limits)
+ : ExternTypeImpl(EXTERN_MEMORY), limits(limits) {}
+
+ ~MemoryTypeImpl() {}
+};
+
+template <>
+struct implement<MemoryType> {
+ using type = MemoryTypeImpl;
+};
+
+MemoryType::~MemoryType() {}
+
+auto MemoryType::make(Limits limits) -> own<MemoryType*> {
+ return own<MemoryType*>(
+ seal<MemoryType>(new (std::nothrow) MemoryTypeImpl(limits)));
+}
+
+auto MemoryType::copy() const -> own<MemoryType*> {
+ return MemoryType::make(limits());
+}
+
+auto MemoryType::limits() const -> const Limits& { return impl(this)->limits; }
+
+auto ExternType::memory() -> MemoryType* {
+ return kind() == EXTERN_MEMORY
+ ? seal<MemoryType>(static_cast<MemoryTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+auto ExternType::memory() const -> const MemoryType* {
+ return kind() == EXTERN_MEMORY
+ ? seal<MemoryType>(static_cast<const MemoryTypeImpl*>(impl(this)))
+ : nullptr;
+}
+
+// Import Types
+
+struct ImportTypeImpl {
+ Name module;
+ Name name;
+ own<ExternType*> type;
+
+ ImportTypeImpl(Name& module, Name& name, own<ExternType*>& type)
+ : module(std::move(module)),
+ name(std::move(name)),
+ type(std::move(type)) {}
+
+ ~ImportTypeImpl() {}
+};
+
+template <>
+struct implement<ImportType> {
+ using type = ImportTypeImpl;
+};
+
+ImportType::~ImportType() { impl(this)->~ImportTypeImpl(); }
+
+void ImportType::operator delete(void* p) { ::operator delete(p); }
+
+auto ImportType::make(Name&& module, Name&& name, own<ExternType*>&& type)
+ -> own<ImportType*> {
+ return module && name && type
+ ? own<ImportType*>(seal<ImportType>(
+ new (std::nothrow) ImportTypeImpl(module, name, type)))
+ : own<ImportType*>();
+}
+
+auto ImportType::copy() const -> own<ImportType*> {
+ return make(module().copy(), name().copy(), type()->copy());
+}
+
+auto ImportType::module() const -> const Name& { return impl(this)->module; }
+
+auto ImportType::name() const -> const Name& { return impl(this)->name; }
+
+auto ImportType::type() const -> const ExternType* {
+ return impl(this)->type.get();
+}
+
+// Export Types
+
+struct ExportTypeImpl {
+ Name name;
+ own<ExternType*> type;
+
+ ExportTypeImpl(Name& name, own<ExternType*>& type)
+ : name(std::move(name)), type(std::move(type)) {}
+
+ ~ExportTypeImpl() {}
+};
+
+template <>
+struct implement<ExportType> {
+ using type = ExportTypeImpl;
+};
+
+ExportType::~ExportType() { impl(this)->~ExportTypeImpl(); }
+
+void ExportType::operator delete(void* p) { ::operator delete(p); }
+
+auto ExportType::make(Name&& name, own<ExternType*>&& type)
+ -> own<ExportType*> {
+ return name && type ? own<ExportType*>(seal<ExportType>(
+ new (std::nothrow) ExportTypeImpl(name, type)))
+ : own<ExportType*>();
+}
+
+auto ExportType::copy() const -> own<ExportType*> {
+ return make(name().copy(), type()->copy());
+}
+
+auto ExportType::name() const -> const Name& { return impl(this)->name; }
+
+auto ExportType::type() const -> const ExternType* {
+ return impl(this)->type.get();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Conversions of types from and to V8 objects
+
+// Types
+
+auto valtype_to_v8(StoreImpl* store, const ValType* type)
+ -> v8::Local<v8::Value> {
+ v8_string_t string;
+ switch (type->kind()) {
+ case I32:
+ string = V8_S_I32;
+ break;
+ case I64:
+ string = V8_S_I64;
+ break;
+ case F32:
+ string = V8_S_F32;
+ break;
+ case F64:
+ string = V8_S_F64;
+ break;
+ case ANYREF:
+ string = V8_S_ANYREF;
+ break;
+ case FUNCREF:
+ string = V8_S_ANYFUNC;
+ break;
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
+ return store->v8_string(string);
+}
+
+auto mutability_to_v8(StoreImpl* store, Mutability mutability)
+ -> v8::Local<v8::Boolean> {
+ return v8::Boolean::New(store->isolate(), mutability == VAR);
+}
+
+void limits_to_v8(StoreImpl* store, Limits limits, v8::Local<v8::Object> desc) {
+ auto isolate = store->isolate();
+ auto context = store->context();
+ ignore(desc->DefineOwnProperty(
+ context, store->v8_string(V8_S_MINIMUM),
+ v8::Integer::NewFromUnsigned(isolate, limits.min)));
+ if (limits.max != Limits(0).max) {
+ ignore(desc->DefineOwnProperty(
+ context, store->v8_string(V8_S_MAXIMUM),
+ v8::Integer::NewFromUnsigned(isolate, limits.max)));
+ }
+}
+
+auto globaltype_to_v8(StoreImpl* store, const GlobalType* type)
+ -> v8::Local<v8::Object> {
+ auto isolate = store->isolate();
+ auto context = store->context();
+ auto desc = v8::Object::New(isolate);
+ ignore(desc->DefineOwnProperty(context, store->v8_string(V8_S_VALUE),
+ valtype_to_v8(store, type->content())));
+ ignore(desc->DefineOwnProperty(context, store->v8_string(V8_S_MUTABLE),
+ mutability_to_v8(store, type->mutability())));
+ return desc;
+}
+
+auto tabletype_to_v8(StoreImpl* store, const TableType* type)
+ -> v8::Local<v8::Object> {
+ auto isolate = store->isolate();
+ auto context = store->context();
+ auto desc = v8::Object::New(isolate);
+ ignore(desc->DefineOwnProperty(context, store->v8_string(V8_S_ELEMENT),
+ valtype_to_v8(store, type->element())));
+ limits_to_v8(store, type->limits(), desc);
+ return desc;
+}
+
+auto memorytype_to_v8(StoreImpl* store, const MemoryType* type)
+ -> v8::Local<v8::Object> {
+ auto isolate = store->isolate();
+ auto desc = v8::Object::New(isolate);
+ limits_to_v8(store, type->limits(), desc);
+ return desc;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Values
+
+// Values
+
+auto val_to_v8(StoreImpl* store, const Val& v) -> v8::Local<v8::Value> {
+ auto isolate = store->isolate();
+ switch (v.kind()) {
+ case I32:
+ return v8::Integer::NewFromUnsigned(isolate, v.i32());
+ case I64:
+ return v8::BigInt::New(isolate, v.i64());
+ case F32:
+ return v8::Number::New(isolate, v.f32());
+ case F64:
+ return v8::Number::New(isolate, v.f64());
+ case ANYREF:
+ case FUNCREF: {
+ if (v.ref() == nullptr) {
+ return v8::Null(isolate);
+ } else {
+ WASM_UNIMPLEMENTED("ref value");
+ }
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+auto v8_to_val(StoreImpl* store, v8::Local<v8::Value> value, const ValType* t)
+ -> own<Val> {
+ auto context = store->context();
+ switch (t->kind()) {
+ case I32:
+ return Val(value->Int32Value(context).ToChecked());
+ case I64: {
+ auto bigint = value->ToBigInt(context).ToLocalChecked();
+ return Val(bigint->Int64Value());
+ }
+ case F32: {
+ auto number = value->NumberValue(context).ToChecked();
+ return Val(static_cast<float32_t>(number));
+ }
+ case F64:
+ return Val(value->NumberValue(context).ToChecked());
+ case ANYREF:
+ case FUNCREF: {
+ if (value->IsNull()) {
+ return Val(nullptr);
+ } else {
+ WASM_UNIMPLEMENTED("ref value");
+ }
+ }
+ }
+}
+
+// References
+
+template <class Ref>
+class RefImpl : public v8::Persistent<v8::Object> {
+ public:
+ RefImpl() = delete;
+
+ static auto make(StoreImpl* store, v8::Local<v8::Object> obj) -> own<Ref*> {
+ static_assert(sizeof(RefImpl) == sizeof(v8::Persistent<v8::Object>),
+ "incompatible object layout");
+ RefImpl* self =
+ static_cast<RefImpl*>(new (std::nothrow) v8::Persistent<v8::Object>());
+ if (!self) return nullptr;
+ self->Reset(store->isolate(), obj);
+ return make_own(seal<Ref>(self));
+ }
+
+ auto copy() const -> own<Ref*> {
+ v8::HandleScope handle_scope(isolate());
+ return make(store(), v8_object());
+ }
+
+ auto store() const -> StoreImpl* { return StoreImpl::get(isolate()); }
+
+ auto isolate() const -> v8::Isolate* {
+ return v8::wasm::object_isolate(*this);
+ }
+
+ auto v8_object() const -> v8::Local<v8::Object> { return Get(isolate()); }
+
+ auto get_host_info() const -> void* {
+ v8::HandleScope handle_scope(isolate());
+ auto store = this->store();
+
+ v8::Local<v8::Value> args[] = {v8_object()};
+ auto maybe_result =
+ store->v8_function(V8_F_WEAKMAP_GET)
+ ->Call(store->context(), store->host_data_map(), 1, args);
+ if (maybe_result.IsEmpty()) return nullptr;
+
+ auto data = v8::wasm::foreign_get(maybe_result.ToLocalChecked());
+ return reinterpret_cast<HostData*>(data)->info;
+ }
+
+ void set_host_info(void* info, void (*finalizer)(void*)) {
+ v8::HandleScope handle_scope(isolate());
+ auto store = this->store();
+
+ // V8 attaches finalizers to handles instead of objects, and such handles
+ // cannot be reused after the finalizer has been invoked.
+ // So we need to create them separately from the pool.
+ auto data = new HostData(store->isolate(), v8_object(), info, finalizer);
+ data->handle.template SetWeak<HostData>(data, &v8_finalizer,
+ v8::WeakCallbackType::kParameter);
+ auto foreign = v8::wasm::foreign_new(store->isolate(), data);
+ v8::Local<v8::Value> args[] = {v8_object(), foreign};
+ auto maybe_result =
+ store->v8_function(V8_F_WEAKMAP_SET)
+ ->Call(store->context(), store->host_data_map(), 2, args);
+ if (maybe_result.IsEmpty()) return;
+ }
+
+ private:
+ struct HostData {
+ HostData(v8::Isolate* isolate, v8::Local<v8::Object> object, void* info,
+ void (*finalizer)(void*))
+ : handle(isolate, object), info(info), finalizer(finalizer) {}
+ v8::Persistent<v8::Object> handle;
+ void* info;
+ void (*finalizer)(void*);
+ };
+
+ static void v8_finalizer(const v8::WeakCallbackInfo<HostData>& info) {
+ auto data = info.GetParameter();
+ data->handle.Reset(); // Must reset weak handle before deleting it!
+ if (data->finalizer) (*data->finalizer)(data->info);
+ delete data;
+ }
+};
+
+template <>
+struct implement<Ref> {
+ using type = RefImpl<Ref>;
+};
+
+Ref::~Ref() {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ impl(this)->Reset();
+ delete impl(this);
+}
+
+void Ref::operator delete(void* p) {}
+
+auto Ref::copy() const -> own<Ref*> { return impl(this)->copy(); }
+
+auto Ref::get_host_info() const -> void* { return impl(this)->get_host_info(); }
+
+void Ref::set_host_info(void* info, void (*finalizer)(void*)) {
+ impl(this)->set_host_info(info, finalizer);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Objects
+
+// Traps
+
+template <>
+struct implement<Trap> {
+ using type = RefImpl<Trap>;
+};
+
+Trap::~Trap() {}
+
+auto Trap::copy() const -> own<Trap*> { return impl(this)->copy(); }
+
+auto Trap::make(Store* store_abs, const Message& message) -> own<Trap*> {
+ auto store = impl(store_abs);
+ v8::Isolate* isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ auto maybe_string = v8::String::NewFromUtf8(isolate, message.get(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(message.size()));
+ if (maybe_string.IsEmpty()) return own<Trap*>();
+ auto exception = v8::Exception::Error(maybe_string.ToLocalChecked());
+ return RefImpl<Trap>::make(store, v8::Local<v8::Object>::Cast(exception));
+}
+
+auto Trap::message() const -> Message {
+ auto isolate = impl(this)->isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ auto message = v8::Exception::CreateMessage(isolate, impl(this)->v8_object());
+ v8::String::Utf8Value string(isolate, message->Get());
+ return vec<byte_t>::make(std::string(*string));
+}
+
+// Foreign Objects
+
+template <>
+struct implement<Foreign> {
+ using type = RefImpl<Foreign>;
+};
+
+Foreign::~Foreign() {}
+
+auto Foreign::copy() const -> own<Foreign*> { return impl(this)->copy(); }
+
+auto Foreign::make(Store* store_abs) -> own<Foreign*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ auto obj = v8::Object::New(isolate);
+ return RefImpl<Foreign>::make(store, obj);
+}
+
+// Modules
+
+template <>
+struct implement<Module> {
+ using type = RefImpl<Module>;
+};
+
+Module::~Module() {}
+
+auto Module::copy() const -> own<Module*> { return impl(this)->copy(); }
+
+auto Module::validate(Store* store_abs, const vec<byte_t>& binary) -> bool {
+ auto store = impl(store_abs);
+ v8::Isolate* isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ auto array_buffer = v8::ArrayBuffer::New(
+ isolate, const_cast<byte_t*>(binary.get()), binary.size());
+
+ v8::Local<v8::Value> args[] = {array_buffer};
+ auto result = store->v8_function(V8_F_VALIDATE)
+ ->Call(store->context(), v8::Undefined(isolate), 1, args);
+ if (result.IsEmpty()) return false;
+
+ return result.ToLocalChecked()->IsTrue();
+}
+
+auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ auto context = store->context();
+ v8::HandleScope handle_scope(isolate);
+
+ auto array_buffer = v8::ArrayBuffer::New(
+ isolate, const_cast<byte_t*>(binary.get()), binary.size());
+
+ v8::Local<v8::Value> args[] = {array_buffer};
+ auto maybe_obj =
+ store->v8_function(V8_F_MODULE)->NewInstance(context, 1, args);
+ if (maybe_obj.IsEmpty()) return nullptr;
+ return RefImpl<Module>::make(store, maybe_obj.ToLocalChecked());
+}
+
+auto Module::imports() const -> vec<ImportType*> {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto module = impl(this)->v8_object();
+ auto binary =
+ vec<byte_t>::adopt(v8::wasm::module_binary_size(module),
+ const_cast<byte_t*>(v8::wasm::module_binary(module)));
+ auto imports = wasm::bin::imports(binary);
+ binary.release();
+ return imports;
+}
+
+auto Module::exports() const -> vec<ExportType*> {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto module = impl(this)->v8_object();
+ auto binary =
+ vec<byte_t>::adopt(v8::wasm::module_binary_size(module),
+ const_cast<byte_t*>(v8::wasm::module_binary(module)));
+ auto exports = wasm::bin::exports(binary);
+ binary.release();
+ return exports;
+}
+
+auto Module::serialize() const -> vec<byte_t> {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto module = impl(this)->v8_object();
+ auto binary_size = v8::wasm::module_binary_size(module);
+ auto serial_size = v8::wasm::module_serialize_size(module);
+ auto size_size = i::wasm::LEBHelper::sizeof_u64v(binary_size);
+ auto buffer =
+ vec<byte_t>::make_uninitialized(size_size + binary_size + serial_size);
+ auto ptr = buffer.get();
+ i::wasm::LEBHelper::write_u64v(reinterpret_cast<uint8_t**>(&ptr),
+ binary_size);
+ std::memcpy(ptr, v8::wasm::module_binary(module), binary_size);
+ ptr += binary_size;
+ if (!v8::wasm::module_serialize(module, ptr, serial_size)) buffer.reset();
+ return std::move(buffer);
+}
+
+auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
+ -> own<Module*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+ auto ptr = serialized.get();
+ auto binary_size = wasm::bin::u64(ptr);
+ auto size_size = ptr - serialized.get();
+ auto serial_size = serialized.size() - size_size - binary_size;
+ auto maybe_obj = v8::wasm::module_deserialize(isolate, ptr, binary_size,
+ ptr + binary_size, serial_size);
+ if (maybe_obj.IsEmpty()) return nullptr;
+ return RefImpl<Module>::make(store, maybe_obj.ToLocalChecked());
+}
+
+// TODO(v8): do better when V8 can do better.
+template <>
+struct implement<Shared<Module>> {
+ using type = vec<byte_t>;
+};
+
+template <>
+Shared<Module>::~Shared() {
+ impl(this)->~vec();
+}
+
+template <>
+void Shared<Module>::operator delete(void* p) {
+ ::operator delete(p);
+}
+
+auto Module::share() const -> own<Shared<Module>*> {
+ auto shared = seal<Shared<Module>>(new vec<byte_t>(serialize()));
+ return make_own(shared);
+}
+
+auto Module::obtain(Store* store, const Shared<Module>* shared)
+ -> own<Module*> {
+ return Module::deserialize(store, *impl(shared));
+}
+
+// Externals
+
+template <>
+struct implement<Extern> {
+ using type = RefImpl<Extern>;
+};
+
+Extern::~Extern() {}
+
+auto Extern::copy() const -> own<Extern*> { return impl(this)->copy(); }
+
+auto Extern::kind() const -> ExternKind {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::extern_kind(impl(this)->v8_object());
+}
+
+auto Extern::type() const -> own<ExternType*> {
+ switch (kind()) {
+ case EXTERN_FUNC:
+ return func()->type();
+ case EXTERN_GLOBAL:
+ return global()->type();
+ case EXTERN_TABLE:
+ return table()->type();
+ case EXTERN_MEMORY:
+ return memory()->type();
+ }
+}
+
+auto Extern::func() -> Func* {
+ return kind() == EXTERN_FUNC ? static_cast<Func*>(this) : nullptr;
+}
+
+auto Extern::global() -> Global* {
+ return kind() == EXTERN_GLOBAL ? static_cast<Global*>(this) : nullptr;
+}
+
+auto Extern::table() -> Table* {
+ return kind() == EXTERN_TABLE ? static_cast<Table*>(this) : nullptr;
+}
+
+auto Extern::memory() -> Memory* {
+ return kind() == EXTERN_MEMORY ? static_cast<Memory*>(this) : nullptr;
+}
+
+auto Extern::func() const -> const Func* {
+ return kind() == EXTERN_FUNC ? static_cast<const Func*>(this) : nullptr;
+}
+
+auto Extern::global() const -> const Global* {
+ return kind() == EXTERN_GLOBAL ? static_cast<const Global*>(this) : nullptr;
+}
+
+auto Extern::table() const -> const Table* {
+ return kind() == EXTERN_TABLE ? static_cast<const Table*>(this) : nullptr;
+}
+
+auto Extern::memory() const -> const Memory* {
+ return kind() == EXTERN_MEMORY ? static_cast<const Memory*>(this) : nullptr;
+}
+
+auto extern_to_v8(const Extern* ex) -> v8::Local<v8::Value> {
+ return impl(ex)->v8_object();
+}
+
+// Function Instances
+
+template <>
+struct implement<Func> {
+ using type = RefImpl<Func>;
+};
+
+Func::~Func() {}
+
+auto Func::copy() const -> own<Func*> { return impl(this)->copy(); }
+
+struct FuncData {
+ Store* store;
+ own<FuncType*> type;
+ enum Kind { kCallback, kCallbackWithEnv } kind;
+ union {
+ Func::callback callback;
+ Func::callback_with_env callback_with_env;
+ };
+ void (*finalizer)(void*);
+ void* env;
+
+ FuncData(Store* store, const FuncType* type, Kind kind)
+ : store(store),
+ type(type->copy()),
+ kind(kind),
+ finalizer(nullptr),
+ env(nullptr) {}
+
+ ~FuncData() {
+ if (finalizer) (*finalizer)(env);
+ }
+
+ static void v8_callback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void finalize_func_data(void* data);
+};
+
+namespace {
+
+auto make_func(Store* store_abs, FuncData* data) -> own<Func*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+ auto context = store->context();
+
+ // Create V8 function
+ auto v8_data = v8::wasm::foreign_new(isolate, data);
+ auto function_template =
+ v8::FunctionTemplate::New(isolate, &FuncData::v8_callback, v8_data);
+ auto maybe_func_obj = function_template->GetFunction(context);
+ if (maybe_func_obj.IsEmpty()) return own<Func*>();
+ auto func_obj = maybe_func_obj.ToLocalChecked();
+
+ // Create wrapper instance
+ auto binary = wasm::bin::wrapper(data->type.get());
+ auto module = Module::make(store_abs, binary);
+
+ auto imports_obj = v8::Object::New(isolate);
+ auto module_obj = v8::Object::New(isolate);
+ auto str = store->v8_string(V8_S_EMPTY);
+ ignore(imports_obj->DefineOwnProperty(context, str, module_obj));
+ ignore(module_obj->DefineOwnProperty(context, str, func_obj));
+
+ v8::Local<v8::Value> instantiate_args[] = {impl(module.get())->v8_object(),
+ imports_obj};
+ auto instance_obj = store->v8_function(V8_F_INSTANCE)
+ ->NewInstance(context, 2, instantiate_args)
+ .ToLocalChecked();
+ assert(!instance_obj.IsEmpty());
+ assert(instance_obj->IsObject());
+ auto exports_obj = v8::wasm::instance_exports(instance_obj);
+ assert(!exports_obj.IsEmpty());
+ assert(exports_obj->IsObject());
+ auto wrapped_func_obj = v8::Local<v8::Function>::Cast(
+ exports_obj->Get(context, str).ToLocalChecked());
+ assert(!wrapped_func_obj.IsEmpty());
+ assert(wrapped_func_obj->IsFunction());
+
+ auto func = RefImpl<Func>::make(store, wrapped_func_obj);
+ func->set_host_info(data, &FuncData::finalize_func_data);
+ return func;
+}
+
+auto func_type(v8::Local<v8::Object> v8_func) -> own<FuncType*> {
+ auto param_arity = v8::wasm::func_type_param_arity(v8_func);
+ auto result_arity = v8::wasm::func_type_result_arity(v8_func);
+ auto params = vec<ValType*>::make_uninitialized(param_arity);
+ auto results = vec<ValType*>::make_uninitialized(result_arity);
+
+ for (size_t i = 0; i < params.size(); ++i) {
+ auto kind = v8::wasm::func_type_param(v8_func, i);
+ params[i] = ValType::make(kind);
+ }
+ for (size_t i = 0; i < results.size(); ++i) {
+ auto kind = v8::wasm::func_type_result(v8_func, i);
+ results[i] = ValType::make(kind);
+ }
+
+ return FuncType::make(std::move(params), std::move(results));
+}
+
+} // namespace
+
+auto Func::make(Store* store, const FuncType* type, Func::callback callback)
+ -> own<Func*> {
+ auto data = new FuncData(store, type, FuncData::kCallback);
+ data->callback = callback;
+ return make_func(store, data);
+}
+
+auto Func::make(Store* store, const FuncType* type, callback_with_env callback,
+ void* env, void (*finalizer)(void*)) -> own<Func*> {
+ auto data = new FuncData(store, type, FuncData::kCallbackWithEnv);
+ data->callback_with_env = callback;
+ data->env = env;
+ data->finalizer = finalizer;
+ return make_func(store, data);
+}
+
+auto Func::type() const -> own<FuncType*> {
+ // return impl(this)->data->type->copy();
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return func_type(impl(this)->v8_object());
+}
+
+auto Func::param_arity() const -> size_t {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::func_type_param_arity(impl(this)->v8_object());
+}
+
+auto Func::result_arity() const -> size_t {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::func_type_result_arity(impl(this)->v8_object());
+}
+
+auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
+ auto func = impl(this);
+ auto store = func->store();
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ auto context = store->context();
+ auto type = this->type();
+ auto& param_types = type->params();
+ auto& result_types = type->results();
+
+ // TODO(rossberg): cache v8_args array per thread.
+ auto v8_args = std::unique_ptr<v8::Local<v8::Value>[]>(
+ new (std::nothrow) v8::Local<v8::Value>[param_types.size()]);
+ for (size_t i = 0; i < param_types.size(); ++i) {
+ assert(args[i].kind() == param_types[i]->kind());
+ v8_args[i] = val_to_v8(store, args[i]);
+ }
+
+ v8::TryCatch handler(isolate);
+ auto v8_function = v8::Local<v8::Function>::Cast(func->v8_object());
+ auto maybe_val =
+ v8_function->Call(context, v8::Undefined(isolate),
+ static_cast<int>(param_types.size()), v8_args.get());
+
+ if (handler.HasCaught()) {
+ auto exception = handler.Exception();
+ if (!exception->IsObject()) {
+ auto maybe_string = exception->ToString(store->context());
+ auto string = maybe_string.IsEmpty() ? store->v8_string(V8_S_EMPTY)
+ : maybe_string.ToLocalChecked();
+ exception = v8::Exception::Error(string);
+ }
+ return RefImpl<Trap>::make(store, v8::Local<v8::Object>::Cast(exception));
+ }
+
+ auto val = maybe_val.ToLocalChecked();
+ if (result_types.size() == 0) {
+ assert(val->IsUndefined());
+ } else if (result_types.size() == 1) {
+ assert(!val->IsUndefined());
+ new (&results[0]) Val(v8_to_val(store, val, result_types[0]));
+ } else {
+ WASM_UNIMPLEMENTED("multiple results");
+ }
+ return nullptr;
+}
+
+void FuncData::v8_callback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ auto self = reinterpret_cast<FuncData*>(v8::wasm::foreign_get(info.Data()));
+ auto store = impl(self->store);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+
+ auto& param_types = self->type->params();
+ auto& result_types = self->type->results();
+
+ assert(param_types.size() == static_cast<size_t>(info.Length()));
+ int num_param_types = static_cast<int>(param_types.size());
+ int num_result_types = static_cast<int>(result_types.size());
+
+ // TODO(rossberg): cache params and result arrays per thread.
+ std::unique_ptr<Val[]> args(new Val[num_param_types]);
+ std::unique_ptr<Val[]> results(new Val[num_result_types]);
+ for (int i = 0; i < num_param_types; ++i) {
+ args[i] = v8_to_val(store, info[i], param_types[i]);
+ }
+
+ own<Trap*> trap;
+ if (self->kind == kCallbackWithEnv) {
+ trap = self->callback_with_env(self->env, args.get(), results.get());
+ } else {
+ trap = self->callback(args.get(), results.get());
+ }
+
+ if (trap) {
+ isolate->ThrowException(impl(trap.get())->v8_object());
+ return;
+ }
+
+ auto ret = info.GetReturnValue();
+ if (result_types.size() == 0) {
+ ret.SetUndefined();
+ } else if (result_types.size() == 1) {
+ assert(results[0].kind() == result_types[0]->kind());
+ ret.Set(val_to_v8(store, results[0]));
+ } else {
+ WASM_UNIMPLEMENTED("multiple results");
+ }
+}
+
+void FuncData::finalize_func_data(void* data) {
+ delete reinterpret_cast<FuncData*>(data);
+}
+
+// Global Instances
+
+template <>
+struct implement<Global> {
+ using type = RefImpl<Global>;
+};
+
+Global::~Global() {}
+
+auto Global::copy() const -> own<Global*> { return impl(this)->copy(); }
+
+auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
+ -> own<Global*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+ auto context = store->context();
+
+ assert(type->content()->kind() == val.kind());
+
+ // Create wrapper instance
+ auto binary = wasm::bin::wrapper(type);
+ auto module = Module::make(store_abs, binary);
+
+ v8::Local<v8::Value> instantiate_args[] = {impl(module.get())->v8_object()};
+ auto instance_obj = store->v8_function(V8_F_INSTANCE)
+ ->NewInstance(context, 1, instantiate_args)
+ .ToLocalChecked();
+ auto exports_obj = v8::wasm::instance_exports(instance_obj);
+ auto obj = v8::Local<v8::Object>::Cast(
+ exports_obj->Get(context, store->v8_string(V8_S_EMPTY)).ToLocalChecked());
+ assert(!obj.IsEmpty() && obj->IsObject());
+
+ auto global = RefImpl<Global>::make(store, obj);
+ assert(global);
+ global->set(val);
+ return global;
+}
+
+auto Global::type() const -> own<GlobalType*> {
+ // return impl(this)->data->type->copy();
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto v8_global = impl(this)->v8_object();
+ auto kind = v8::wasm::global_type_content(v8_global);
+ auto mutability = v8::wasm::global_type_mutable(v8_global) ? VAR : CONST;
+ return GlobalType::make(ValType::make(kind), mutability);
+}
+
+auto Global::get() const -> Val {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto v8_global = impl(this)->v8_object();
+ switch (type()->content()->kind()) {
+ case I32:
+ return Val(v8::wasm::global_get_i32(v8_global));
+ case I64:
+ return Val(v8::wasm::global_get_i64(v8_global));
+ case F32:
+ return Val(v8::wasm::global_get_f32(v8_global));
+ case F64:
+ return Val(v8::wasm::global_get_f64(v8_global));
+ case ANYREF:
+ case FUNCREF:
+ WASM_UNIMPLEMENTED("globals of reference type");
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
+}
+
+void Global::set(const Val& val) {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto v8_global = impl(this)->v8_object();
+ switch (val.kind()) {
+ case I32:
+ return v8::wasm::global_set_i32(v8_global, val.i32());
+ case I64:
+ return v8::wasm::global_set_i64(v8_global, val.i64());
+ case F32:
+ return v8::wasm::global_set_f32(v8_global, val.f32());
+ case F64:
+ return v8::wasm::global_set_f64(v8_global, val.f64());
+ case ANYREF:
+ case FUNCREF:
+ WASM_UNIMPLEMENTED("globals of reference type");
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
+}
+
+// Table Instances
+
+template <>
+struct implement<Table> {
+ using type = RefImpl<Table>;
+};
+
+Table::~Table() {}
+
+auto Table::copy() const -> own<Table*> { return impl(this)->copy(); }
+
+auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
+ -> own<Table*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+ auto context = store->context();
+
+ v8::Local<v8::Value> init = v8::Null(isolate);
+ if (ref) init = impl(ref)->v8_object();
+ v8::Local<v8::Value> args[] = {tabletype_to_v8(store, type), init};
+ auto maybe_obj =
+ store->v8_function(V8_F_TABLE)->NewInstance(context, 2, args);
+ if (maybe_obj.IsEmpty()) return own<Table*>();
+ auto table = RefImpl<Table>::make(store, maybe_obj.ToLocalChecked());
+ // TODO(wasm+): pass reference initialiser as parameter
+ if (table && ref) {
+ auto size = type->limits().min;
+ auto obj = maybe_obj.ToLocalChecked();
+ auto maybe_func =
+ v8::MaybeLocal<v8::Function>(v8::Local<v8::Function>::Cast(init));
+ for (size_t i = 0; i < size; ++i) {
+ v8::wasm::table_set(obj, i, maybe_func);
+ }
+ }
+ return table;
+}
+
+auto Table::type() const -> own<TableType*> {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto v8_table = impl(this)->v8_object();
+ uint32_t min = v8::wasm::table_type_min(v8_table);
+ uint32_t max = v8::wasm::table_type_max(v8_table);
+ // TODO(wasm+): support new element types.
+ return TableType::make(ValType::make(FUNCREF), Limits(min, max));
+}
+
+auto Table::get(size_t index) const -> own<Ref*> {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto maybe = v8::wasm::table_get(impl(this)->v8_object(), index);
+ if (maybe.IsEmpty() || maybe.ToLocalChecked()->IsNull()) return own<Ref*>();
+ // TODO(wasm+): other references
+ auto obj = maybe.ToLocalChecked();
+ assert(obj->IsFunction());
+ return RefImpl<Func>::make(impl(this)->store(), obj);
+}
+
+auto Table::set(size_t index, const Ref* ref) -> bool {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ if (ref && !impl(ref)->v8_object()->IsFunction()) {
+ WASM_UNIMPLEMENTED("non-function table elements");
+ }
+ auto obj = ref ? v8::MaybeLocal<v8::Function>(
+ v8::Local<v8::Function>::Cast(impl(ref)->v8_object()))
+ : v8::MaybeLocal<v8::Function>();
+ return v8::wasm::table_set(impl(this)->v8_object(), index, obj);
+}
+
+auto Table::size() const -> size_t {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ // TODO(jkummerow): Having Table::size_t shadowing "std" size_t is ugly.
+ return static_cast<Table::size_t>(
+ v8::wasm::table_size(impl(this)->v8_object()));
+}
+
+auto Table::grow(size_t delta, const Ref* ref) -> bool {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto obj = ref ? v8::MaybeLocal<v8::Function>(
+ v8::Local<v8::Function>::Cast(impl(ref)->v8_object()))
+ : v8::MaybeLocal<v8::Function>();
+ return v8::wasm::table_grow(impl(this)->v8_object(), delta, obj);
+}
+
+// Memory Instances
+
+template <>
+struct implement<Memory> {
+ using type = RefImpl<Memory>;
+};
+
+Memory::~Memory() {}
+
+auto Memory::copy() const -> own<Memory*> { return impl(this)->copy(); }
+
+auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory*> {
+ auto store = impl(store_abs);
+ auto isolate = store->isolate();
+ v8::HandleScope handle_scope(isolate);
+ auto context = store->context();
+
+ v8::Local<v8::Value> args[] = {memorytype_to_v8(store, type)};
+ auto maybe_obj =
+ store->v8_function(V8_F_MEMORY)->NewInstance(context, 1, args);
+ if (maybe_obj.IsEmpty()) return own<Memory*>();
+ return RefImpl<Memory>::make(store, maybe_obj.ToLocalChecked());
+}
+
+auto Memory::type() const -> own<MemoryType*> {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ auto v8_memory = impl(this)->v8_object();
+ uint32_t min = v8::wasm::memory_type_min(v8_memory);
+ uint32_t max = v8::wasm::memory_type_max(v8_memory);
+ return MemoryType::make(Limits(min, max));
+}
+
+auto Memory::data() const -> byte_t* {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::memory_data(impl(this)->v8_object());
+}
+
+auto Memory::data_size() const -> size_t {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::memory_data_size(impl(this)->v8_object());
+}
+
+auto Memory::size() const -> pages_t {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::memory_size(impl(this)->v8_object());
+}
+
+auto Memory::grow(pages_t delta) -> bool {
+ v8::HandleScope handle_scope(impl(this)->isolate());
+ return v8::wasm::memory_grow(impl(this)->v8_object(), delta);
+}
+
+// Module Instances
+
+template <>
+struct implement<Instance> {
+ using type = RefImpl<Instance>;
+};
+
+Instance::~Instance() {}
+
+auto Instance::copy() const -> own<Instance*> { return impl(this)->copy(); }
+
+auto Instance::make(Store* store_abs, const Module* module_abs,
+ const Extern* const imports[]) -> own<Instance*> {
+ auto store = impl(store_abs);
+ auto module = impl(module_abs);
+ auto isolate = store->isolate();
+ auto context = store->context();
+ v8::HandleScope handle_scope(isolate);
+
+ assert(module->v8_object()->GetIsolate() == isolate);
+
+ auto import_types = module_abs->imports();
+ auto imports_obj = v8::Object::New(isolate);
+ for (size_t i = 0; i < import_types.size(); ++i) {
+ auto type = import_types[i];
+ auto maybe_module = v8::String::NewFromOneByte(
+ isolate, reinterpret_cast<const uint8_t*>(type->module().get()),
+ v8::NewStringType::kNormal, static_cast<int>(type->module().size()));
+ if (maybe_module.IsEmpty()) return own<Instance*>();
+ auto module_str = maybe_module.ToLocalChecked();
+ auto maybe_name = v8::String::NewFromOneByte(
+ isolate, reinterpret_cast<const uint8_t*>(type->name().get()),
+ v8::NewStringType::kNormal, static_cast<int>(type->name().size()));
+ if (maybe_name.IsEmpty()) return own<Instance*>();
+ auto name_str = maybe_name.ToLocalChecked();
+
+ v8::Local<v8::Object> module_obj;
+ if (imports_obj->HasOwnProperty(context, module_str).ToChecked()) {
+ module_obj = v8::Local<v8::Object>::Cast(
+ imports_obj->Get(context, module_str).ToLocalChecked());
+ } else {
+ module_obj = v8::Object::New(isolate);
+ ignore(imports_obj->DefineOwnProperty(context, module_str, module_obj));
+ }
+
+ ignore(module_obj->DefineOwnProperty(context, name_str,
+ extern_to_v8(imports[i])));
+ }
+
+ v8::Local<v8::Value> instantiate_args[] = {module->v8_object(), imports_obj};
+ auto obj = store->v8_function(V8_F_INSTANCE)
+ ->NewInstance(context, 2, instantiate_args)
+ .ToLocalChecked();
+ return RefImpl<Instance>::make(store, obj);
+}
+
+auto Instance::exports() const -> vec<Extern*> {
+ auto instance = impl(this);
+ auto store = instance->store();
+ auto isolate = store->isolate();
+ auto context = store->context();
+ v8::HandleScope handle_scope(isolate);
+
+ auto module_obj = v8::wasm::instance_module(instance->v8_object());
+ auto exports_obj = v8::wasm::instance_exports(instance->v8_object());
+ assert(!module_obj.IsEmpty() && module_obj->IsObject());
+ assert(!exports_obj.IsEmpty() && exports_obj->IsObject());
+
+ auto module = RefImpl<Module>::make(store, module_obj);
+ auto export_types = module->exports();
+ auto exports = vec<Extern*>::make_uninitialized(export_types.size());
+ if (!exports) return vec<Extern*>::invalid();
+
+ for (size_t i = 0; i < export_types.size(); ++i) {
+ auto& name = export_types[i]->name();
+ auto maybe_name_obj =
+ v8::String::NewFromUtf8(isolate, name.get(), v8::NewStringType::kNormal,
+ static_cast<int>(name.size()));
+ if (maybe_name_obj.IsEmpty()) return vec<Extern*>::invalid();
+ auto name_obj = maybe_name_obj.ToLocalChecked();
+ auto obj = v8::Local<v8::Object>::Cast(
+ exports_obj->Get(context, name_obj).ToLocalChecked());
+
+ auto type = export_types[i]->type();
+ assert(type->kind() == v8::wasm::extern_kind(obj));
+ switch (type->kind()) {
+ case EXTERN_FUNC: {
+ exports[i].reset(RefImpl<Func>::make(store, obj));
+ } break;
+ case EXTERN_GLOBAL: {
+ exports[i].reset(RefImpl<Global>::make(store, obj));
+ } break;
+ case EXTERN_TABLE: {
+ exports[i].reset(RefImpl<Table>::make(store, obj));
+ } break;
+ case EXTERN_MEMORY: {
+ exports[i].reset(RefImpl<Memory>::make(store, obj));
+ } break;
+ }
+ }
+
+ return exports;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace wasm
+
+// BEGIN FILE wasm-c.cc
+
+extern "C" {
+
+///////////////////////////////////////////////////////////////////////////////
+// Auxiliaries
+
+// Backing implementation
+
+extern "C++" {
+
+template <class T>
+struct borrowed_vec {
+ wasm::vec<T> it;
+ explicit borrowed_vec(wasm::vec<T>&& v) : it(std::move(v)) {}
+ borrowed_vec(borrowed_vec<T>&& that) : it(std::move(that.it)) {}
+ ~borrowed_vec() { it.release(); }
+};
+
+} // extern "C++"
+
+#define WASM_DEFINE_OWN(name, Name) \
+ struct wasm_##name##_t : Name {}; \
+ \
+ void wasm_##name##_delete(wasm_##name##_t* x) { delete x; } \
+ \
+ extern "C++" inline auto hide(Name* x)->wasm_##name##_t* { \
+ return static_cast<wasm_##name##_t*>(x); \
+ } \
+ extern "C++" inline auto hide(const Name* x)->const wasm_##name##_t* { \
+ return static_cast<const wasm_##name##_t*>(x); \
+ } \
+ extern "C++" inline auto reveal(wasm_##name##_t* x)->Name* { return x; } \
+ extern "C++" inline auto reveal(const wasm_##name##_t* x)->const Name* { \
+ return x; \
+ } \
+ extern "C++" inline auto get(wasm::own<Name*>& x)->wasm_##name##_t* { \
+ return hide(x.get()); \
+ } \
+ extern "C++" inline auto get(const wasm::own<Name*>& x) \
+ ->const wasm_##name##_t* { \
+ return hide(x.get()); \
+ } \
+ extern "C++" inline auto release(wasm::own<Name*>&& x)->wasm_##name##_t* { \
+ return hide(x.release()); \
+ } \
+ extern "C++" inline auto adopt(wasm_##name##_t* x)->wasm::own<Name*> { \
+ return make_own(x); \
+ }
+
+// Vectors
+
+#define WASM_DEFINE_VEC_BASE(name, Name, ptr_or_none) \
+ extern "C++" inline auto hide(wasm::vec<Name ptr_or_none>& v) \
+ ->wasm_##name##_vec_t* { \
+ static_assert(sizeof(wasm_##name##_vec_t) == sizeof(wasm::vec<Name>), \
+ "C/C++ incompatibility"); \
+ return reinterpret_cast<wasm_##name##_vec_t*>(&v); \
+ } \
+ extern "C++" inline auto hide(const wasm::vec<Name ptr_or_none>& v) \
+ ->const wasm_##name##_vec_t* { \
+ static_assert(sizeof(wasm_##name##_vec_t) == sizeof(wasm::vec<Name>), \
+ "C/C++ incompatibility"); \
+ return reinterpret_cast<const wasm_##name##_vec_t*>(&v); \
+ } \
+ extern "C++" inline auto hide(Name ptr_or_none* v) \
+ ->wasm_##name##_t ptr_or_none* { \
+ static_assert( \
+ sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none), \
+ "C/C++ incompatibility"); \
+ return reinterpret_cast<wasm_##name##_t ptr_or_none*>(v); \
+ } \
+ extern "C++" inline auto hide(Name ptr_or_none const* v) \
+ ->wasm_##name##_t ptr_or_none const* { \
+ static_assert( \
+ sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none), \
+ "C/C++ incompatibility"); \
+ return reinterpret_cast<wasm_##name##_t ptr_or_none const*>(v); \
+ } \
+ extern "C++" inline auto reveal(wasm_##name##_t ptr_or_none* v) \
+ ->Name ptr_or_none* { \
+ static_assert( \
+ sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none), \
+ "C/C++ incompatibility"); \
+ return reinterpret_cast<Name ptr_or_none*>(v); \
+ } \
+ extern "C++" inline auto reveal(wasm_##name##_t ptr_or_none const* v) \
+ ->Name ptr_or_none const* { \
+ static_assert( \
+ sizeof(wasm_##name##_t ptr_or_none) == sizeof(Name ptr_or_none), \
+ "C/C++ incompatibility"); \
+ return reinterpret_cast<Name ptr_or_none const*>(v); \
+ } \
+ extern "C++" inline auto get(wasm::vec<Name ptr_or_none>& v) \
+ ->wasm_##name##_vec_t { \
+ wasm_##name##_vec_t v2 = {v.size(), hide(v.get())}; \
+ return v2; \
+ } \
+ extern "C++" inline auto get(const wasm::vec<Name ptr_or_none>& v) \
+ ->const wasm_##name##_vec_t { \
+ wasm_##name##_vec_t v2 = { \
+ v.size(), const_cast<wasm_##name##_t ptr_or_none*>(hide(v.get()))}; \
+ return v2; \
+ } \
+ extern "C++" inline auto release(wasm::vec<Name ptr_or_none>&& v) \
+ ->wasm_##name##_vec_t { \
+ wasm_##name##_vec_t v2 = {v.size(), hide(v.release())}; \
+ return v2; \
+ } \
+ extern "C++" inline auto adopt(wasm_##name##_vec_t* v) \
+ ->wasm::vec<Name ptr_or_none> { \
+ return wasm::vec<Name ptr_or_none>::adopt(v->size, reveal(v->data)); \
+ } \
+ extern "C++" inline auto borrow(const wasm_##name##_vec_t* v) \
+ ->borrowed_vec<Name ptr_or_none> { \
+ return borrowed_vec<Name ptr_or_none>( \
+ wasm::vec<Name ptr_or_none>::adopt(v->size, reveal(v->data))); \
+ } \
+ \
+ void wasm_##name##_vec_new_uninitialized(wasm_##name##_vec_t* out, \
+ size_t size) { \
+ *out = release(wasm::vec<Name ptr_or_none>::make_uninitialized(size)); \
+ } \
+ void wasm_##name##_vec_new_empty(wasm_##name##_vec_t* out) { \
+ wasm_##name##_vec_new_uninitialized(out, 0); \
+ } \
+ \
+ void wasm_##name##_vec_delete(wasm_##name##_vec_t* v) { adopt(v); }
+
+// Vectors with no ownership management of elements
+#define WASM_DEFINE_VEC_PLAIN(name, Name, ptr_or_none) \
+ WASM_DEFINE_VEC_BASE(name, Name, ptr_or_none) \
+ \
+ void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size, \
+ wasm_##name##_t ptr_or_none const data[]) { \
+ auto v2 = wasm::vec<Name ptr_or_none>::make_uninitialized(size); \
+ if (v2.size() != 0) { \
+ memcpy(v2.get(), data, size * sizeof(wasm_##name##_t ptr_or_none)); \
+ } \
+ *out = release(std::move(v2)); \
+ } \
+ \
+ void wasm_##name##_vec_copy(wasm_##name##_vec_t* out, \
+ wasm_##name##_vec_t* v) { \
+ wasm_##name##_vec_new(out, v->size, v->data); \
+ }
+
+// Vectors who own their elements
+#define WASM_DEFINE_VEC(name, Name, ptr_or_none) \
+ WASM_DEFINE_VEC_BASE(name, Name, ptr_or_none) \
+ \
+ void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size, \
+ wasm_##name##_t ptr_or_none const data[]) { \
+ auto v2 = wasm::vec<Name ptr_or_none>::make_uninitialized(size); \
+ for (size_t i = 0; i < v2.size(); ++i) { \
+ v2[i] = adopt(data[i]); \
+ } \
+ *out = release(std::move(v2)); \
+ } \
+ \
+ void wasm_##name##_vec_copy(wasm_##name##_vec_t* out, \
+ wasm_##name##_vec_t* v) { \
+ auto v2 = wasm::vec<Name ptr_or_none>::make_uninitialized(v->size); \
+ for (size_t i = 0; i < v2.size(); ++i) { \
+ v2[i] = adopt(wasm_##name##_copy(v->data[i])); \
+ } \
+ *out = release(std::move(v2)); \
+ }
+
+extern "C++" {
+template <class T>
+inline auto is_empty(T* p) -> bool {
+ return !p;
+}
+}
+
+// Byte vectors
+
+using byte = byte_t;
+WASM_DEFINE_VEC_PLAIN(byte, byte, )
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Environment
+
+// Configuration
+
+WASM_DEFINE_OWN(config, wasm::Config)
+
+wasm_config_t* wasm_config_new() { return release(wasm::Config::make()); }
+
+// Engine
+
+WASM_DEFINE_OWN(engine, wasm::Engine)
+
+wasm_engine_t* wasm_engine_new() { return release(wasm::Engine::make()); }
+
+wasm_engine_t* wasm_engine_new_with_config(wasm_config_t* config) {
+ return release(wasm::Engine::make(adopt(config)));
+}
+
+// Stores
+
+WASM_DEFINE_OWN(store, wasm::Store)
+
+wasm_store_t* wasm_store_new(wasm_engine_t* engine) {
+ return release(wasm::Store::make(engine));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Type Representations
+
+// Type attributes
+
+extern "C++" inline auto hide(wasm::Mutability mutability)
+ -> wasm_mutability_t {
+ return static_cast<wasm_mutability_t>(mutability);
+}
+
+extern "C++" inline auto reveal(wasm_mutability_t mutability)
+ -> wasm::Mutability {
+ return static_cast<wasm::Mutability>(mutability);
+}
+
+extern "C++" inline auto hide(const wasm::Limits& limits)
+ -> const wasm_limits_t* {
+ return reinterpret_cast<const wasm_limits_t*>(&limits);
+}
+
+extern "C++" inline auto reveal(wasm_limits_t limits) -> wasm::Limits {
+ return wasm::Limits(limits.min, limits.max);
+}
+
+extern "C++" inline auto hide(wasm::ValKind kind) -> wasm_valkind_t {
+ return static_cast<wasm_valkind_t>(kind);
+}
+
+extern "C++" inline auto reveal(wasm_valkind_t kind) -> wasm::ValKind {
+ return static_cast<wasm::ValKind>(kind);
+}
+
+extern "C++" inline auto hide(wasm::ExternKind kind) -> wasm_externkind_t {
+ return static_cast<wasm_externkind_t>(kind);
+}
+
+extern "C++" inline auto reveal(wasm_externkind_t kind) -> wasm::ExternKind {
+ return static_cast<wasm::ExternKind>(kind);
+}
+
+// Generic
+
+#define WASM_DEFINE_TYPE(name, Name) \
+ WASM_DEFINE_OWN(name, Name) \
+ WASM_DEFINE_VEC(name, Name, *) \
+ \
+ wasm_##name##_t* wasm_##name##_copy(wasm_##name##_t* t) { \
+ return release(t->copy()); \
+ }
+
+// Value Types
+
+WASM_DEFINE_TYPE(valtype, wasm::ValType)
+
+wasm_valtype_t* wasm_valtype_new(wasm_valkind_t k) {
+ return release(wasm::ValType::make(reveal(k)));
+}
+
+wasm_valkind_t wasm_valtype_kind(const wasm_valtype_t* t) {
+ return hide(t->kind());
+}
+
+// Function Types
+
+WASM_DEFINE_TYPE(functype, wasm::FuncType)
+
+wasm_functype_t* wasm_functype_new(wasm_valtype_vec_t* params,
+ wasm_valtype_vec_t* results) {
+ return release(wasm::FuncType::make(adopt(params), adopt(results)));
+}
+
+const wasm_valtype_vec_t* wasm_functype_params(const wasm_functype_t* ft) {
+ return hide(ft->params());
+}
+
+const wasm_valtype_vec_t* wasm_functype_results(const wasm_functype_t* ft) {
+ return hide(ft->results());
+}
+
+// Global Types
+
+WASM_DEFINE_TYPE(globaltype, wasm::GlobalType)
+
+wasm_globaltype_t* wasm_globaltype_new(wasm_valtype_t* content,
+ wasm_mutability_t mutability) {
+ return release(wasm::GlobalType::make(adopt(content), reveal(mutability)));
+}
+
+const wasm_valtype_t* wasm_globaltype_content(const wasm_globaltype_t* gt) {
+ return hide(gt->content());
+}
+
+wasm_mutability_t wasm_globaltype_mutability(const wasm_globaltype_t* gt) {
+ return hide(gt->mutability());
+}
+
+// Table Types
+
+WASM_DEFINE_TYPE(tabletype, wasm::TableType)
+
+wasm_tabletype_t* wasm_tabletype_new(wasm_valtype_t* element,
+ const wasm_limits_t* limits) {
+ return release(wasm::TableType::make(adopt(element), reveal(*limits)));
+}
+
+const wasm_valtype_t* wasm_tabletype_element(const wasm_tabletype_t* tt) {
+ return hide(tt->element());
+}
+
+const wasm_limits_t* wasm_tabletype_limits(const wasm_tabletype_t* tt) {
+ return hide(tt->limits());
+}
+
+// Memory Types
+
+WASM_DEFINE_TYPE(memorytype, wasm::MemoryType)
+
+wasm_memorytype_t* wasm_memorytype_new(const wasm_limits_t* limits) {
+ return release(wasm::MemoryType::make(reveal(*limits)));
+}
+
+const wasm_limits_t* wasm_memorytype_limits(const wasm_memorytype_t* mt) {
+ return hide(mt->limits());
+}
+
+// Extern Types
+
+WASM_DEFINE_TYPE(externtype, wasm::ExternType)
+
+wasm_externkind_t wasm_externtype_kind(const wasm_externtype_t* et) {
+ return hide(et->kind());
+}
+
+wasm_externtype_t* wasm_functype_as_externtype(wasm_functype_t* ft) {
+ return hide(static_cast<wasm::ExternType*>(ft));
+}
+wasm_externtype_t* wasm_globaltype_as_externtype(wasm_globaltype_t* gt) {
+ return hide(static_cast<wasm::ExternType*>(gt));
+}
+wasm_externtype_t* wasm_tabletype_as_externtype(wasm_tabletype_t* tt) {
+ return hide(static_cast<wasm::ExternType*>(tt));
+}
+wasm_externtype_t* wasm_memorytype_as_externtype(wasm_memorytype_t* mt) {
+ return hide(static_cast<wasm::ExternType*>(mt));
+}
+
+const wasm_externtype_t* wasm_functype_as_externtype_const(
+ const wasm_functype_t* ft) {
+ return hide(static_cast<const wasm::ExternType*>(ft));
+}
+const wasm_externtype_t* wasm_globaltype_as_externtype_const(
+ const wasm_globaltype_t* gt) {
+ return hide(static_cast<const wasm::ExternType*>(gt));
+}
+const wasm_externtype_t* wasm_tabletype_as_externtype_const(
+ const wasm_tabletype_t* tt) {
+ return hide(static_cast<const wasm::ExternType*>(tt));
+}
+const wasm_externtype_t* wasm_memorytype_as_externtype_const(
+ const wasm_memorytype_t* mt) {
+ return hide(static_cast<const wasm::ExternType*>(mt));
+}
+
+wasm_functype_t* wasm_externtype_as_functype(wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_FUNC
+ ? hide(static_cast<wasm::FuncType*>(reveal(et)))
+ : nullptr;
+}
+wasm_globaltype_t* wasm_externtype_as_globaltype(wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_GLOBAL
+ ? hide(static_cast<wasm::GlobalType*>(reveal(et)))
+ : nullptr;
+}
+wasm_tabletype_t* wasm_externtype_as_tabletype(wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_TABLE
+ ? hide(static_cast<wasm::TableType*>(reveal(et)))
+ : nullptr;
+}
+wasm_memorytype_t* wasm_externtype_as_memorytype(wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_MEMORY
+ ? hide(static_cast<wasm::MemoryType*>(reveal(et)))
+ : nullptr;
+}
+
+const wasm_functype_t* wasm_externtype_as_functype_const(
+ const wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_FUNC
+ ? hide(static_cast<const wasm::FuncType*>(reveal(et)))
+ : nullptr;
+}
+const wasm_globaltype_t* wasm_externtype_as_globaltype_const(
+ const wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_GLOBAL
+ ? hide(static_cast<const wasm::GlobalType*>(reveal(et)))
+ : nullptr;
+}
+const wasm_tabletype_t* wasm_externtype_as_tabletype_const(
+ const wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_TABLE
+ ? hide(static_cast<const wasm::TableType*>(reveal(et)))
+ : nullptr;
+}
+const wasm_memorytype_t* wasm_externtype_as_memorytype_const(
+ const wasm_externtype_t* et) {
+ return et->kind() == wasm::EXTERN_MEMORY
+ ? hide(static_cast<const wasm::MemoryType*>(reveal(et)))
+ : nullptr;
+}
+
+// Import Types
+
+WASM_DEFINE_TYPE(importtype, wasm::ImportType)
+
+wasm_importtype_t* wasm_importtype_new(wasm_name_t* module, wasm_name_t* name,
+ wasm_externtype_t* type) {
+ return release(
+ wasm::ImportType::make(adopt(module), adopt(name), adopt(type)));
+}
+
+const wasm_name_t* wasm_importtype_module(const wasm_importtype_t* it) {
+ return hide(it->module());
+}
+
+const wasm_name_t* wasm_importtype_name(const wasm_importtype_t* it) {
+ return hide(it->name());
+}
+
+const wasm_externtype_t* wasm_importtype_type(const wasm_importtype_t* it) {
+ return hide(it->type());
+}
+
+// Export Types
+
+WASM_DEFINE_TYPE(exporttype, wasm::ExportType)
+
+wasm_exporttype_t* wasm_exporttype_new(wasm_name_t* name,
+ wasm_externtype_t* type) {
+ return release(wasm::ExportType::make(adopt(name), adopt(type)));
+}
+
+const wasm_name_t* wasm_exporttype_name(const wasm_exporttype_t* et) {
+ return hide(et->name());
+}
+
+const wasm_externtype_t* wasm_exporttype_type(const wasm_exporttype_t* et) {
+ return hide(et->type());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Values
+
+// References
+
+#define WASM_DEFINE_REF_BASE(name, Name) \
+ WASM_DEFINE_OWN(name, Name) \
+ \
+ wasm_##name##_t* wasm_##name##_copy(const wasm_##name##_t* t) { \
+ return release(t->copy()); \
+ } \
+ \
+ void* wasm_##name##_get_host_info(const wasm_##name##_t* r) { \
+ return r->get_host_info(); \
+ } \
+ void wasm_##name##_set_host_info(wasm_##name##_t* r, void* info) { \
+ r->set_host_info(info); \
+ } \
+ void wasm_##name##_set_host_info_with_finalizer( \
+ wasm_##name##_t* r, void* info, void (*finalizer)(void*)) { \
+ r->set_host_info(info, finalizer); \
+ }
+
+#define WASM_DEFINE_REF(name, Name) \
+ WASM_DEFINE_REF_BASE(name, Name) \
+ \
+ wasm_ref_t* wasm_##name##_as_ref(wasm_##name##_t* r) { \
+ return hide(static_cast<wasm::Ref*>(reveal(r))); \
+ } \
+ wasm_##name##_t* wasm_ref_as_##name(wasm_ref_t* r) { \
+ return hide(static_cast<Name*>(reveal(r))); \
+ } \
+ \
+ const wasm_ref_t* wasm_##name##_as_ref_const(const wasm_##name##_t* r) { \
+ return hide(static_cast<const wasm::Ref*>(reveal(r))); \
+ } \
+ const wasm_##name##_t* wasm_ref_as_##name##_const(const wasm_ref_t* r) { \
+ return hide(static_cast<const Name*>(reveal(r))); \
+ }
+
+#define WASM_DEFINE_SHARABLE_REF(name, Name) \
+ WASM_DEFINE_REF(name, Name) \
+ WASM_DEFINE_OWN(shared_##name, wasm::Shared<Name>)
+
+WASM_DEFINE_REF_BASE(ref, wasm::Ref)
+
+// Values
+
+extern "C++" {
+
+inline auto is_empty(wasm_val_t v) -> bool {
+ return !is_ref(reveal(v.kind)) || !v.of.ref;
+}
+
+inline auto hide(wasm::Val v) -> wasm_val_t {
+ wasm_val_t v2 = {hide(v.kind()), {}};
+ switch (v.kind()) {
+ case wasm::I32:
+ v2.of.i32 = v.i32();
+ break;
+ case wasm::I64:
+ v2.of.i64 = v.i64();
+ break;
+ case wasm::F32:
+ v2.of.f32 = v.f32();
+ break;
+ case wasm::F64:
+ v2.of.f64 = v.f64();
+ break;
+ case wasm::ANYREF:
+ case wasm::FUNCREF:
+ v2.of.ref = hide(v.ref());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return v2;
+}
+
+inline auto release(wasm::Val v) -> wasm_val_t {
+ wasm_val_t v2 = {hide(v.kind()), {}};
+ switch (v.kind()) {
+ case wasm::I32:
+ v2.of.i32 = v.i32();
+ break;
+ case wasm::I64:
+ v2.of.i64 = v.i64();
+ break;
+ case wasm::F32:
+ v2.of.f32 = v.f32();
+ break;
+ case wasm::F64:
+ v2.of.f64 = v.f64();
+ break;
+ case wasm::ANYREF:
+ case wasm::FUNCREF:
+ v2.of.ref = release(v.release_ref());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return v2;
+}
+
+inline auto adopt(wasm_val_t v) -> wasm::Val {
+ switch (reveal(v.kind)) {
+ case wasm::I32:
+ return wasm::Val(v.of.i32);
+ case wasm::I64:
+ return wasm::Val(v.of.i64);
+ case wasm::F32:
+ return wasm::Val(v.of.f32);
+ case wasm::F64:
+ return wasm::Val(v.of.f64);
+ case wasm::ANYREF:
+ case wasm::FUNCREF:
+ return wasm::Val(adopt(v.of.ref));
+ default:
+ UNREACHABLE();
+ }
+}
+
+struct borrowed_val {
+ wasm::Val it;
+ explicit borrowed_val(wasm::Val&& v) : it(std::move(v)) {}
+ borrowed_val(borrowed_val&& that) : it(std::move(that.it)) {}
+ ~borrowed_val() {
+ if (it.is_ref()) it.release_ref();
+ }
+};
+
+inline auto borrow(const wasm_val_t* v) -> borrowed_val {
+ wasm::Val v2;
+ switch (reveal(v->kind)) {
+ case wasm::I32:
+ v2 = wasm::Val(v->of.i32);
+ break;
+ case wasm::I64:
+ v2 = wasm::Val(v->of.i64);
+ break;
+ case wasm::F32:
+ v2 = wasm::Val(v->of.f32);
+ break;
+ case wasm::F64:
+ v2 = wasm::Val(v->of.f64);
+ break;
+ case wasm::ANYREF:
+ case wasm::FUNCREF:
+ v2 = wasm::Val(adopt(v->of.ref));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return borrowed_val(std::move(v2));
+}
+
+} // extern "C++"
+
+WASM_DEFINE_VEC_BASE(val, wasm::Val, )
+
+void wasm_val_vec_new(wasm_val_vec_t* out, size_t size,
+ wasm_val_t const data[]) {
+ auto v2 = wasm::vec<wasm::Val>::make_uninitialized(size);
+ for (size_t i = 0; i < v2.size(); ++i) {
+ v2[i] = adopt(data[i]);
+ }
+ *out = release(std::move(v2));
+}
+
+void wasm_val_vec_copy(wasm_val_vec_t* out, wasm_val_vec_t* v) {
+ auto v2 = wasm::vec<wasm::Val>::make_uninitialized(v->size);
+ for (size_t i = 0; i < v2.size(); ++i) {
+ wasm_val_t val;
+ wasm_val_copy(&v->data[i], &val);
+ v2[i] = adopt(val);
+ }
+ *out = release(std::move(v2));
+}
+
+void wasm_val_delete(wasm_val_t* v) {
+ if (is_ref(reveal(v->kind))) adopt(v->of.ref);
+}
+
+void wasm_val_copy(wasm_val_t* out, const wasm_val_t* v) {
+ *out = *v;
+ if (is_ref(reveal(v->kind))) {
+ out->of.ref = release(v->of.ref->copy());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Objects
+
+// Traps
+
+WASM_DEFINE_REF(trap, wasm::Trap)
+
+wasm_trap_t* wasm_trap_new(wasm_store_t* store, const wasm_message_t* message) {
+ auto message_ = borrow(message);
+ return release(wasm::Trap::make(store, message_.it));
+}
+
+void wasm_trap_message(const wasm_trap_t* trap, wasm_message_t* out) {
+ *out = release(reveal(trap)->message());
+}
+
+// Foreign Objects
+
+WASM_DEFINE_REF(foreign, wasm::Foreign)
+
+wasm_foreign_t* wasm_foreign_new(wasm_store_t* store) {
+ return release(wasm::Foreign::make(store));
+}
+
+// Modules
+
+WASM_DEFINE_SHARABLE_REF(module, wasm::Module)
+
+bool wasm_module_validate(wasm_store_t* store, const wasm_byte_vec_t* binary) {
+ auto binary_ = borrow(binary);
+ return wasm::Module::validate(store, binary_.it);
+}
+
+wasm_module_t* wasm_module_new(wasm_store_t* store,
+ const wasm_byte_vec_t* binary) {
+ auto binary_ = borrow(binary);
+ return release(wasm::Module::make(store, binary_.it));
+}
+
+void wasm_module_imports(const wasm_module_t* module,
+ wasm_importtype_vec_t* out) {
+ *out = release(reveal(module)->imports());
+}
+
+void wasm_module_exports(const wasm_module_t* module,
+ wasm_exporttype_vec_t* out) {
+ *out = release(reveal(module)->exports());
+}
+
+void wasm_module_serialize(const wasm_module_t* module, wasm_byte_vec_t* out) {
+ *out = release(reveal(module)->serialize());
+}
+
+wasm_module_t* wasm_module_deserialize(wasm_store_t* store,
+ const wasm_byte_vec_t* binary) {
+ auto binary_ = borrow(binary);
+ return release(wasm::Module::deserialize(store, binary_.it));
+}
+
+wasm_shared_module_t* wasm_module_share(const wasm_module_t* module) {
+ return release(reveal(module)->share());
+}
+
+wasm_module_t* wasm_module_obtain(wasm_store_t* store,
+ const wasm_shared_module_t* shared) {
+ return release(wasm::Module::obtain(store, shared));
+}
+
+// Function Instances
+
+WASM_DEFINE_REF(func, wasm::Func)
+
+extern "C++" {
+
+auto wasm_callback(void* env, const wasm::Val args[], wasm::Val results[])
+ -> wasm::own<wasm::Trap*> {
+ auto f = reinterpret_cast<wasm_func_callback_t>(env);
+ return adopt(f(hide(args), hide(results)));
+}
+
+struct wasm_callback_env_t {
+ wasm_func_callback_with_env_t callback;
+ void* env;
+ void (*finalizer)(void*);
+};
+
+auto wasm_callback_with_env(void* env, const wasm::Val args[],
+ wasm::Val results[]) -> wasm::own<wasm::Trap*> {
+ auto t = static_cast<wasm_callback_env_t*>(env);
+ return adopt(t->callback(t->env, hide(args), hide(results)));
+}
+
+void wasm_callback_env_finalizer(void* env) {
+ auto t = static_cast<wasm_callback_env_t*>(env);
+ if (t->finalizer) t->finalizer(t->env);
+ delete t;
+}
+
+} // extern "C++"
+
+wasm_func_t* wasm_func_new(wasm_store_t* store, const wasm_functype_t* type,
+ wasm_func_callback_t callback) {
+ return release(wasm::Func::make(store, type, wasm_callback,
+ reinterpret_cast<void*>(callback)));
+}
+
+wasm_func_t* wasm_func_new_with_env(wasm_store_t* store,
+ const wasm_functype_t* type,
+ wasm_func_callback_with_env_t callback,
+ void* env, void (*finalizer)(void*)) {
+ auto env2 = new wasm_callback_env_t{callback, env, finalizer};
+ return release(wasm::Func::make(store, type, wasm_callback_with_env, env2,
+ wasm_callback_env_finalizer));
+}
+
+wasm_functype_t* wasm_func_type(const wasm_func_t* func) {
+ return release(func->type());
+}
+
+size_t wasm_func_param_arity(const wasm_func_t* func) {
+ return func->param_arity();
+}
+
+size_t wasm_func_result_arity(const wasm_func_t* func) {
+ return func->result_arity();
+}
+
+wasm_trap_t* wasm_func_call(const wasm_func_t* func, const wasm_val_t args[],
+ wasm_val_t results[]) {
+ return release(func->call(reveal(args), reveal(results)));
+}
+
+// Global Instances
+
+WASM_DEFINE_REF(global, wasm::Global)
+
+wasm_global_t* wasm_global_new(wasm_store_t* store,
+ const wasm_globaltype_t* type,
+ const wasm_val_t* val) {
+ auto val_ = borrow(val);
+ return release(wasm::Global::make(store, type, val_.it));
+}
+
+wasm_globaltype_t* wasm_global_type(const wasm_global_t* global) {
+ return release(global->type());
+}
+
+void wasm_global_get(const wasm_global_t* global, wasm_val_t* out) {
+ *out = release(global->get());
+}
+
+void wasm_global_set(wasm_global_t* global, const wasm_val_t* val) {
+ auto val_ = borrow(val);
+ global->set(val_.it);
+}
+
+// Table Instances
+
+WASM_DEFINE_REF(table, wasm::Table)
+
+wasm_table_t* wasm_table_new(wasm_store_t* store, const wasm_tabletype_t* type,
+ wasm_ref_t* ref) {
+ return release(wasm::Table::make(store, type, ref));
+}
+
+wasm_tabletype_t* wasm_table_type(const wasm_table_t* table) {
+ return release(table->type());
+}
+
+wasm_ref_t* wasm_table_get(const wasm_table_t* table, wasm_table_size_t index) {
+ return release(table->get(index));
+}
+
+bool wasm_table_set(wasm_table_t* table, wasm_table_size_t index,
+ wasm_ref_t* ref) {
+ return table->set(index, ref);
+}
+
+wasm_table_size_t wasm_table_size(const wasm_table_t* table) {
+ return table->size();
+}
+
+bool wasm_table_grow(wasm_table_t* table, wasm_table_size_t delta,
+ wasm_ref_t* ref) {
+ return table->grow(delta, ref);
+}
+
+// Memory Instances
+
+WASM_DEFINE_REF(memory, wasm::Memory)
+
+wasm_memory_t* wasm_memory_new(wasm_store_t* store,
+ const wasm_memorytype_t* type) {
+ return release(wasm::Memory::make(store, type));
+}
+
+wasm_memorytype_t* wasm_memory_type(const wasm_memory_t* memory) {
+ return release(memory->type());
+}
+
+wasm_byte_t* wasm_memory_data(wasm_memory_t* memory) { return memory->data(); }
+
+size_t wasm_memory_data_size(const wasm_memory_t* memory) {
+ return memory->data_size();
+}
+
+wasm_memory_pages_t wasm_memory_size(const wasm_memory_t* memory) {
+ return memory->size();
+}
+
+bool wasm_memory_grow(wasm_memory_t* memory, wasm_memory_pages_t delta) {
+ return memory->grow(delta);
+}
+
+// Externals
+
+WASM_DEFINE_REF(extern, wasm::Extern)
+WASM_DEFINE_VEC(extern, wasm::Extern, *)
+
+wasm_externkind_t wasm_extern_kind(const wasm_extern_t* external) {
+ return hide(external->kind());
+}
+wasm_externtype_t* wasm_extern_type(const wasm_extern_t* external) {
+ return release(external->type());
+}
+
+wasm_extern_t* wasm_func_as_extern(wasm_func_t* func) {
+ return hide(static_cast<wasm::Extern*>(reveal(func)));
+}
+wasm_extern_t* wasm_global_as_extern(wasm_global_t* global) {
+ return hide(static_cast<wasm::Extern*>(reveal(global)));
+}
+wasm_extern_t* wasm_table_as_extern(wasm_table_t* table) {
+ return hide(static_cast<wasm::Extern*>(reveal(table)));
+}
+wasm_extern_t* wasm_memory_as_extern(wasm_memory_t* memory) {
+ return hide(static_cast<wasm::Extern*>(reveal(memory)));
+}
+
+const wasm_extern_t* wasm_func_as_extern_const(const wasm_func_t* func) {
+ return hide(static_cast<const wasm::Extern*>(reveal(func)));
+}
+const wasm_extern_t* wasm_global_as_extern_const(const wasm_global_t* global) {
+ return hide(static_cast<const wasm::Extern*>(reveal(global)));
+}
+const wasm_extern_t* wasm_table_as_extern_const(const wasm_table_t* table) {
+ return hide(static_cast<const wasm::Extern*>(reveal(table)));
+}
+const wasm_extern_t* wasm_memory_as_extern_const(const wasm_memory_t* memory) {
+ return hide(static_cast<const wasm::Extern*>(reveal(memory)));
+}
+
+wasm_func_t* wasm_extern_as_func(wasm_extern_t* external) {
+ return hide(external->func());
+}
+wasm_global_t* wasm_extern_as_global(wasm_extern_t* external) {
+ return hide(external->global());
+}
+wasm_table_t* wasm_extern_as_table(wasm_extern_t* external) {
+ return hide(external->table());
+}
+wasm_memory_t* wasm_extern_as_memory(wasm_extern_t* external) {
+ return hide(external->memory());
+}
+
+const wasm_func_t* wasm_extern_as_func_const(const wasm_extern_t* external) {
+ return hide(external->func());
+}
+const wasm_global_t* wasm_extern_as_global_const(
+ const wasm_extern_t* external) {
+ return hide(external->global());
+}
+const wasm_table_t* wasm_extern_as_table_const(const wasm_extern_t* external) {
+ return hide(external->table());
+}
+const wasm_memory_t* wasm_extern_as_memory_const(
+ const wasm_extern_t* external) {
+ return hide(external->memory());
+}
+
+// Module Instances
+
+WASM_DEFINE_REF(instance, wasm::Instance)
+
+wasm_instance_t* wasm_instance_new(wasm_store_t* store,
+ const wasm_module_t* module,
+ const wasm_extern_t* const imports[]) {
+ return release(wasm::Instance::make(
+ store, module, reinterpret_cast<const wasm::Extern* const*>(imports)));
+}
+
+void wasm_instance_exports(const wasm_instance_t* instance,
+ wasm_extern_vec_t* out) {
+ *out = release(instance->exports());
+}
+
+#undef WASM_DEFINE_OWN
+#undef WASM_DEFINE_VEC_BASE
+#undef WASM_DEFINE_VEC_PLAIN
+#undef WASM_DEFINE_VEC
+#undef WASM_DEFINE_TYPE
+#undef WASM_DEFINE_REF_BASE
+#undef WASM_DEFINE_REF
+#undef WASM_DEFINE_SHARABLE_REF
+
+} // extern "C"
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index bbcbf9f25e..1df93c8296 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -109,25 +109,30 @@ class CompilationState {
void AbortCompilation();
- void SetError(uint32_t func_index, const WasmError& error);
+ void SetError();
void SetWireBytesStorage(std::shared_ptr<WireBytesStorage>);
- std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const;
+ V8_EXPORT_PRIVATE std::shared_ptr<WireBytesStorage> GetWireBytesStorage()
+ const;
void AddCallback(callback_t);
bool failed() const;
- void OnFinishedUnit(ExecutionTier, WasmCode*);
+ void OnFinishedUnit(WasmCode*);
+ void OnFinishedUnits(Vector<WasmCode*>);
private:
friend class NativeModule;
friend class WasmCompilationUnit;
CompilationState() = delete;
- static std::unique_ptr<CompilationState> New(NativeModule*,
- std::shared_ptr<Counters>);
+ // The CompilationState keeps a {std::weak_ptr} back to the {NativeModule}
+ // such that it can keep it alive (by regaining a {std::shared_ptr}) in
+ // certain scopes.
+ static std::unique_ptr<CompilationState> New(
+ const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index f1071dc1b0..315f504761 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -59,8 +59,7 @@ class Decoder {
inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
- DCHECK_LE(pc, end_);
- if (V8_UNLIKELY(length > static_cast<uint32_t>(end_ - pc))) {
+ if (V8_UNLIKELY(pc > end_ || length > static_cast<uint32_t>(end_ - pc))) {
error(pc, msg);
return false;
}
@@ -336,14 +335,13 @@ class Decoder {
static_assert(byte_index < kMaxLength, "invalid template instantiation");
constexpr int shift = byte_index * 7;
constexpr bool is_last_byte = byte_index == kMaxLength - 1;
- DCHECK_LE(pc, end_);
- const bool at_end = validate && pc == end_;
+ const bool at_end = validate && pc >= end_;
byte b = 0;
if (!at_end) {
DCHECK_LT(pc, end_);
b = *pc;
TRACE_IF(trace, "%02x ", b);
- typedef typename std::make_unsigned<IntType>::type Unsigned;
+ using Unsigned = typename std::make_unsigned<IntType>::type;
result = result |
(static_cast<Unsigned>(static_cast<IntType>(b) & 0x7f) << shift);
}
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index f23fb81049..a5214513fc 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -61,7 +61,7 @@ struct WasmException;
(message)))
#define ATOMIC_OP_LIST(V) \
- V(AtomicWake, Uint32) \
+ V(AtomicNotify, Uint32) \
V(I32AtomicWait, Uint32) \
V(I64AtomicWait, Uint32) \
V(I32AtomicLoad, Uint32) \
@@ -297,17 +297,29 @@ struct BranchDepthImmediate {
};
template <Decoder::ValidateFlag validate>
+struct BranchOnExceptionImmediate {
+ BranchDepthImmediate<validate> depth;
+ ExceptionIndexImmediate<validate> index;
+ uint32_t length = 0;
+ inline BranchOnExceptionImmediate(Decoder* decoder, const byte* pc)
+ : depth(BranchDepthImmediate<validate>(decoder, pc)),
+ index(ExceptionIndexImmediate<validate>(decoder, pc + depth.length)) {
+ length = depth.length + index.length;
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
uint32_t table_index;
uint32_t sig_index;
FunctionSig* sig = nullptr;
uint32_t length = 0;
- inline CallIndirectImmediate(Decoder* decoder, const byte* pc) {
+ inline CallIndirectImmediate(const WasmFeatures enabled, Decoder* decoder,
+ const byte* pc) {
uint32_t len = 0;
sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
- if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
- if (!VALIDATE(table_index == 0)) {
+ if (!VALIDATE(table_index == 0 || enabled.anyref)) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
table_index);
}
@@ -416,7 +428,6 @@ struct MemoryAccessImmediate {
"actual alignment is %u",
max_alignment, alignment);
}
- if (!VALIDATE(decoder->ok())) return;
uint32_t offset_length;
offset = decoder->read_u32v<validate>(pc + 1 + alignment_length,
&offset_length, "offset");
@@ -454,7 +465,6 @@ struct Simd8x16ShuffleImmediate {
inline Simd8x16ShuffleImmediate(Decoder* decoder, const byte* pc) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
shuffle[i] = decoder->read_u8<validate>(pc + 2 + i, "shuffle");
- if (!VALIDATE(decoder->ok())) return;
}
}
};
@@ -469,7 +479,6 @@ struct MemoryInitImmediate {
uint32_t len = 0;
data_segment_index =
decoder->read_i32v<validate>(pc + 2, &len, "data segment index");
- if (!VALIDATE(decoder->ok())) return;
memory = MemoryIndexImmediate<validate>(decoder, pc + 1 + len);
length = len + memory.length;
}
@@ -493,10 +502,8 @@ struct MemoryCopyImmediate {
inline MemoryCopyImmediate(Decoder* decoder, const byte* pc) {
memory_src = MemoryIndexImmediate<validate>(decoder, pc + 1);
- if (!VALIDATE(decoder->ok())) return;
memory_dst =
MemoryIndexImmediate<validate>(decoder, pc + 1 + memory_src.length);
- if (!VALIDATE(decoder->ok())) return;
length = memory_src.length + memory_dst.length;
}
};
@@ -511,7 +518,6 @@ struct TableInitImmediate {
uint32_t len = 0;
elem_segment_index =
decoder->read_i32v<validate>(pc + 2, &len, "elem segment index");
- if (!VALIDATE(decoder->ok())) return;
table = TableIndexImmediate<validate>(decoder, pc + 1 + len);
length = len + table.length;
}
@@ -535,10 +541,8 @@ struct TableCopyImmediate {
inline TableCopyImmediate(Decoder* decoder, const byte* pc) {
table_src = TableIndexImmediate<validate>(decoder, pc + 1);
- if (!VALIDATE(decoder->ok())) return;
table_dst =
TableIndexImmediate<validate>(decoder, pc + 1 + table_src.length);
- if (!VALIDATE(decoder->ok())) return;
length = table_src.length + table_dst.length;
}
};
@@ -768,7 +772,7 @@ class WasmDecoder : public Decoder {
if (decoder->failed()) return false;
TRACE("local decls count: %u\n", entries);
- while (entries-- > 0 && VALIDATE(decoder->ok()) && decoder->more()) {
+ while (entries-- > 0 && decoder->more()) {
uint32_t count = decoder->consume_u32v("local count");
if (decoder->failed()) return false;
@@ -961,10 +965,16 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && !module_->tables.empty())) {
+ if (!VALIDATE(module_ != nullptr &&
+ imm.table_index < module_->tables.size())) {
error("function table has to exist to execute call_indirect");
return false;
}
+ if (!VALIDATE(module_ != nullptr &&
+ module_->tables[imm.table_index].type == kWasmAnyFunc)) {
+ error("table of call_indirect must be of type anyfunc");
+ return false;
+ }
if (!Complete(pc, imm)) {
errorf(pc + 1, "invalid signature index: #%u", imm.sig_index);
return false;
@@ -1068,7 +1078,7 @@ class WasmDecoder : public Decoder {
inline bool Complete(BlockTypeImmediate<validate>& imm) {
if (imm.type != kWasmVar) return true;
- if (!VALIDATE((module_ && imm.sig_index < module_->signatures.size()))) {
+ if (!VALIDATE(module_ && imm.sig_index < module_->signatures.size())) {
return false;
}
imm.sig = module_->signatures[imm.sig_index];
@@ -1187,7 +1197,7 @@ class WasmDecoder : public Decoder {
}
case kExprCallIndirect:
case kExprReturnCallIndirect: {
- CallIndirectImmediate<validate> imm(decoder, pc);
+ CallIndirectImmediate<validate> imm(kAllWasmFeatures, decoder, pc);
return 1 + imm.length;
}
@@ -1205,10 +1215,8 @@ class WasmDecoder : public Decoder {
}
case kExprBrOnExn: {
- BranchDepthImmediate<validate> imm_br(decoder, pc);
- if (!VALIDATE(decoder->ok())) return 1 + imm_br.length;
- ExceptionIndexImmediate<validate> imm_idx(decoder, pc + imm_br.length);
- return 1 + imm_br.length + imm_idx.length;
+ BranchOnExceptionImmediate<validate> imm(decoder, pc);
+ return 1 + imm.length;
}
case kExprSetLocal:
@@ -1245,7 +1253,6 @@ class WasmDecoder : public Decoder {
case kNumericPrefix: {
byte numeric_index =
decoder->read_u8<validate>(pc + 1, "numeric_index");
- if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
switch (opcode) {
@@ -1293,7 +1300,6 @@ class WasmDecoder : public Decoder {
}
case kSimdPrefix: {
byte simd_index = decoder->read_u8<validate>(pc + 1, "simd_index");
- if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
switch (opcode) {
@@ -1322,7 +1328,6 @@ class WasmDecoder : public Decoder {
}
case kAtomicPrefix: {
byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
- if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
switch (opcode) {
@@ -1386,7 +1391,7 @@ class WasmDecoder : public Decoder {
return {imm.sig->parameter_count(), imm.sig->return_count()};
}
case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this, pc);
+ CallIndirectImmediate<validate> imm(this->enabled_, this, pc);
CHECK(Complete(pc, imm));
// Indirect calls pop an additional argument for the table index.
return {imm.sig->parameter_count() + 1,
@@ -1742,14 +1747,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprBrOnExn: {
CHECK_PROTOTYPE_OPCODE(eh);
- BranchDepthImmediate<validate> imm_br(this, this->pc_);
- if (!this->Validate(this->pc_, imm_br, control_.size())) break;
- ExceptionIndexImmediate<validate> imm_idx(this,
- this->pc_ + imm_br.length);
- if (!this->Validate(this->pc_ + imm_br.length, imm_idx)) break;
- Control* c = control_at(imm_br.depth);
+ BranchOnExceptionImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
+ if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
+ Control* c = control_at(imm.depth.depth);
auto exception = Pop(0, kWasmExceptRef);
- const WasmExceptionSig* sig = imm_idx.exception->sig;
+ const WasmExceptionSig* sig = imm.index.exception->sig;
size_t value_count = sig->parameter_count();
// TODO(mstarzinger): This operand stack mutation is an ugly hack to
// make both type checking here as well as environment merging in the
@@ -1759,11 +1762,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Vector<Value> values(stack_.data() + c->stack_depth, value_count);
if (!TypeCheckBranch(c)) break;
if (control_.back().reachable()) {
- CALL_INTERFACE(BrOnException, exception, imm_idx, imm_br.depth,
+ CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
values);
c->br_merge()->reached = true;
}
- len = 1 + imm_br.length + imm_idx.length;
+ len = 1 + imm.length;
for (size_t i = 0; i < value_count; ++i) Pop();
auto* pexception = Push(kWasmExceptRef);
*pexception = exception;
@@ -2160,7 +2163,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this, this->pc_);
+ CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto index = Pop(0, kWasmI32);
@@ -2189,7 +2192,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprReturnCallIndirect: {
CHECK_PROTOTYPE_OPCODE(return_call);
- CallIndirectImmediate<validate> imm(this, this->pc_);
+ CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
if (!this->CanReturnCall(imm.sig)) {
@@ -2696,17 +2699,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return stack_.data() + old_size;
}
- V8_INLINE bool IsSubType(ValueType expected, ValueType actual) {
- return (expected == actual) ||
- (expected == kWasmAnyRef && actual == kWasmNullRef) ||
- (expected == kWasmAnyRef && actual == kWasmAnyFunc) ||
- (expected == kWasmAnyFunc && actual == kWasmNullRef);
- }
-
V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (!VALIDATE(IsSubType(expected, val.type) || val.type == kWasmVar ||
- expected == kWasmVar)) {
+ if (!VALIDATE(ValueTypes::IsSubType(expected, val.type) ||
+ val.type == kWasmVar || expected == kWasmVar)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
ValueTypes::TypeName(expected), SafeOpcodeNameAt(val.pc),
@@ -2752,7 +2748,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (IsSubType(old.type, val.type)) continue;
+ if (ValueTypes::IsSubType(old.type, val.type)) continue;
// If {val.type} is polymorphic, which results from unreachable, make
// it more specific by using the merge value's expected type.
// If it is not polymorphic, this is a type error.
@@ -2823,7 +2819,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < num_returns; ++i) {
auto& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (IsSubType(expected_type, val.type)) continue;
+ if (ValueTypes::IsSubType(expected_type, val.type)) continue;
// If {val.type} is polymorphic, which results from unreachable,
// make it more specific by using the return's expected type.
// If it is not polymorphic, this is a type error.
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 27cbe10b7e..1e5cb86f49 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -234,7 +234,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ CallIndirectImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
os << " // sig #" << imm.sig_index;
if (decoder.Complete(i.pc(), imm)) {
os << ": " << *imm.sig;
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index deb4caee15..a74cb43e66 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -16,18 +16,6 @@ namespace wasm {
namespace {
-const char* GetExecutionTierAsString(ExecutionTier tier) {
- switch (tier) {
- case ExecutionTier::kBaseline:
- return "liftoff";
- case ExecutionTier::kOptimized:
- return "turbofan";
- case ExecutionTier::kInterpreter:
- return "interpreter";
- }
- UNREACHABLE();
-}
-
class WasmInstructionBufferImpl {
public:
class View : public AssemblerBuffer {
@@ -82,8 +70,8 @@ class WasmInstructionBufferImpl {
OwnedVector<uint8_t> buffer_ =
OwnedVector<uint8_t>::New(AssemblerBase::kMinimalBufferSize);
- // While the buffer is grown, we need to temporarily also keep the old
- // buffer alive.
+ // While the buffer is grown, we need to temporarily also keep the old buffer
+ // alive.
OwnedVector<uint8_t> old_buffer_;
};
@@ -117,17 +105,18 @@ std::unique_ptr<WasmInstructionBuffer> WasmInstructionBuffer::New() {
// static
ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
const WasmModule* module) {
- return FLAG_liftoff && module->origin == kWasmOrigin
- ? ExecutionTier::kBaseline
- : ExecutionTier::kOptimized;
+ // Liftoff does not support the special asm.js opcodes, thus always compile
+ // asm.js modules with TurboFan.
+ if (module->origin == kAsmJsOrigin) return ExecutionTier::kTurbofan;
+ if (FLAG_wasm_interpret_all) return ExecutionTier::kInterpreter;
+ return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
-WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine, int index,
- ExecutionTier tier)
- : wasm_engine_(wasm_engine), func_index_(index), requested_tier_(tier) {
+WasmCompilationUnit::WasmCompilationUnit(int index, ExecutionTier tier)
+ : func_index_(index), tier_(tier) {
if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
(FLAG_wasm_tier_mask_for_testing & (1 << index))) {
- tier = ExecutionTier::kOptimized;
+ tier = ExecutionTier::kTurbofan;
}
SwitchTier(tier);
}
@@ -137,7 +126,7 @@ WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine, int index,
WasmCompilationUnit::~WasmCompilationUnit() = default;
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
- CompilationEnv* env,
+ WasmEngine* wasm_engine, CompilationEnv* env,
const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
auto* func = &env->module->functions[func_index_];
@@ -152,29 +141,37 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
wasm_compile, function_time);
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
+ // Exactly one compiler-specific unit must be set.
+ DCHECK_EQ(1, !!liftoff_unit_ + !!turbofan_unit_ + !!interpreter_unit_);
+
if (FLAG_trace_wasm_compiler) {
- PrintF("Compiling wasm function %d with %s\n\n", func_index_,
- GetExecutionTierAsString(executed_tier_));
+ const char* tier =
+ liftoff_unit_ ? "liftoff" : turbofan_unit_ ? "turbofan" : "interpreter";
+ PrintF("Compiling wasm function %d with %s\n\n", func_index_, tier);
}
WasmCompilationResult result;
- switch (executed_tier_) {
- case ExecutionTier::kBaseline:
- result =
- liftoff_unit_->ExecuteCompilation(env, func_body, counters, detected);
- if (result.succeeded()) break;
- // Otherwise, fall back to turbofan.
- SwitchTier(ExecutionTier::kOptimized);
+ if (liftoff_unit_) {
+ result = liftoff_unit_->ExecuteCompilation(wasm_engine->allocator(), env,
+ func_body, counters, detected);
+ if (!result.succeeded()) {
+ // If Liftoff failed, fall back to turbofan.
// TODO(wasm): We could actually stop or remove the tiering unit for this
// function to avoid compiling it twice with TurboFan.
- V8_FALLTHROUGH;
- case ExecutionTier::kOptimized:
- result = turbofan_unit_->ExecuteCompilation(env, func_body, counters,
- detected);
- break;
- case ExecutionTier::kInterpreter:
- UNREACHABLE(); // TODO(titzer): compile interpreter entry stub.
+ SwitchTier(ExecutionTier::kTurbofan);
+ DCHECK_NOT_NULL(turbofan_unit_);
+ }
+ }
+ if (turbofan_unit_) {
+ result = turbofan_unit_->ExecuteCompilation(wasm_engine, env, func_body,
+ counters, detected);
+ }
+ if (interpreter_unit_) {
+ result = interpreter_unit_->ExecuteCompilation(wasm_engine, env, func_body,
+ counters, detected);
}
+ result.func_index = func_index_;
+ result.requested_tier = tier_;
if (result.succeeded()) {
counters->wasm_generated_code_size()->Increment(
@@ -185,45 +182,32 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
return result;
}
-WasmCode* WasmCompilationUnit::Publish(WasmCompilationResult result,
- NativeModule* native_module) {
- if (!result.succeeded()) {
- native_module->compilation_state()->SetError(func_index_,
- std::move(result.error));
- return nullptr;
- }
-
- DCHECK(result.succeeded());
- WasmCode::Tier code_tier = executed_tier_ == ExecutionTier::kBaseline
- ? WasmCode::kLiftoff
- : WasmCode::kTurbofan;
- DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
-
- WasmCode* code = native_module->AddCode(
- func_index_, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), WasmCode::kFunction, code_tier);
- return code;
-}
-
void WasmCompilationUnit::SwitchTier(ExecutionTier new_tier) {
// This method is being called in the constructor, where neither
- // {liftoff_unit_} nor {turbofan_unit_} are set, or to switch tier from
- // kLiftoff to kTurbofan, in which case {liftoff_unit_} is already set.
- executed_tier_ = new_tier;
+ // {liftoff_unit_} nor {turbofan_unit_} nor {interpreter_unit_} are set, or to
+ // switch tier from kLiftoff to kTurbofan, in which case {liftoff_unit_} is
+ // already set.
switch (new_tier) {
- case ExecutionTier::kBaseline:
+ case ExecutionTier::kLiftoff:
DCHECK(!turbofan_unit_);
DCHECK(!liftoff_unit_);
- liftoff_unit_.reset(new LiftoffCompilationUnit(this));
+ DCHECK(!interpreter_unit_);
+ liftoff_unit_.reset(new LiftoffCompilationUnit());
return;
- case ExecutionTier::kOptimized:
+ case ExecutionTier::kTurbofan:
DCHECK(!turbofan_unit_);
+ DCHECK(!interpreter_unit_);
liftoff_unit_.reset();
turbofan_unit_.reset(new compiler::TurbofanWasmCompilationUnit(this));
return;
case ExecutionTier::kInterpreter:
- UNREACHABLE(); // TODO(titzer): allow compiling interpreter entry stub.
+ DCHECK(!turbofan_unit_);
+ DCHECK(!liftoff_unit_);
+ DCHECK(!interpreter_unit_);
+ interpreter_unit_.reset(new compiler::InterpreterCompilationUnit(this));
+ return;
+ case ExecutionTier::kNone:
+ UNREACHABLE();
}
UNREACHABLE();
}
@@ -239,12 +223,18 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
- WasmCompilationUnit unit(isolate->wasm_engine(), function->func_index, tier);
+ WasmCompilationUnit unit(function->func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
- &env, native_module->compilation_state()->GetWireBytesStorage(),
+ isolate->wasm_engine(), &env,
+ native_module->compilation_state()->GetWireBytesStorage(),
isolate->counters(), detected);
- unit.Publish(std::move(result), native_module);
+ if (result.succeeded()) {
+ WasmCodeRefScope code_ref_scope;
+ native_module->AddCompiledCode(std::move(result));
+ } else {
+ native_module->compilation_state()->SetError();
+ }
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index c7d1d5e21d..ae577e8ee0 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -20,6 +20,7 @@ class AssemblerBuffer;
class Counters;
namespace compiler {
+class InterpreterCompilationUnit;
class Pipeline;
class TurbofanWasmCompilationUnit;
} // namespace compiler
@@ -50,12 +51,8 @@ struct WasmCompilationResult {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
- explicit WasmCompilationResult(WasmError error) : error(std::move(error)) {}
-
- bool succeeded() const {
- DCHECK_EQ(code_desc.buffer != nullptr, error.empty());
- return error.empty();
- }
+ bool succeeded() const { return code_desc.buffer != nullptr; }
+ bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); }
CodeDesc code_desc;
@@ -64,31 +61,24 @@ struct WasmCompilationResult {
uint32_t tagged_parameter_slots = 0;
OwnedVector<byte> source_positions;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
-
- WasmError error;
+ int func_index;
+ ExecutionTier requested_tier;
+ ExecutionTier result_tier;
};
-class WasmCompilationUnit final {
+class V8_EXPORT_PRIVATE WasmCompilationUnit final {
public:
static ExecutionTier GetDefaultExecutionTier(const WasmModule*);
- // If constructing from a background thread, pass in a Counters*, and ensure
- // that the Counters live at least as long as this compilation unit (which
- // typically means to hold a std::shared_ptr<Counters>).
- // If used exclusively from a foreground thread, Isolate::counters() may be
- // used by callers to pass Counters.
- WasmCompilationUnit(WasmEngine*, int index, ExecutionTier);
+ WasmCompilationUnit(int index, ExecutionTier);
~WasmCompilationUnit();
WasmCompilationResult ExecuteCompilation(
- CompilationEnv*, const std::shared_ptr<WireBytesStorage>&, Counters*,
- WasmFeatures* detected);
-
- WasmCode* Publish(WasmCompilationResult, NativeModule*);
+ WasmEngine*, CompilationEnv*, const std::shared_ptr<WireBytesStorage>&,
+ Counters*, WasmFeatures* detected);
- ExecutionTier requested_tier() const { return requested_tier_; }
- ExecutionTier executed_tier() const { return executed_tier_; }
+ ExecutionTier tier() const { return tier_; }
static void CompileWasmFunction(Isolate*, NativeModule*,
WasmFeatures* detected, const WasmFunction*,
@@ -97,16 +87,17 @@ class WasmCompilationUnit final {
private:
friend class LiftoffCompilationUnit;
friend class compiler::TurbofanWasmCompilationUnit;
+ friend class compiler::InterpreterCompilationUnit;
- WasmEngine* const wasm_engine_;
const int func_index_;
- ExecutionTier requested_tier_;
- ExecutionTier executed_tier_;
+ ExecutionTier tier_;
// LiftoffCompilationUnit, set if {tier_ == kLiftoff}.
std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_;
// TurbofanWasmCompilationUnit, set if {tier_ == kTurbofan}.
std::unique_ptr<compiler::TurbofanWasmCompilationUnit> turbofan_unit_;
+ // InterpreterCompilationUnit, set if {tier_ == kInterpreter}.
+ std::unique_ptr<compiler::InterpreterCompilationUnit> interpreter_unit_;
void SwitchTier(ExecutionTier new_tier);
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index b64dd0351f..1f870598a9 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -402,25 +402,27 @@ class WasmGraphBuildingInterface {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, nullptr, imm.sig, imm.index, args, returns);
+ DoCall(decoder, 0, nullptr, imm.sig, imm.index, args, returns);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- UNIMPLEMENTED();
+ DoReturnCall(decoder, 0, nullptr, imm.sig, imm.index, args);
}
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns);
+ DoCall(decoder, imm.table_index, index.node, imm.sig, imm.sig_index, args,
+ returns);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- UNIMPLEMENTED();
+ DoReturnCall(decoder, imm.table_index, index.node, imm.sig, imm.sig_index,
+ args);
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -850,8 +852,9 @@ class WasmGraphBuildingInterface {
return result;
}
- void DoCall(FullDecoder* decoder, TFNode* index_node, FunctionSig* sig,
- uint32_t index, const Value args[], Value returns[]) {
+ void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node,
+ FunctionSig* sig, uint32_t sig_index, const Value args[],
+ Value returns[]) {
int param_count = static_cast<int>(sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
@@ -860,9 +863,11 @@ class WasmGraphBuildingInterface {
arg_nodes[i + 1] = args[i].node;
}
if (index_node) {
- BUILD(CallIndirect, index, arg_nodes, &return_nodes, decoder->position());
+ BUILD(CallIndirect, table_index, sig_index, arg_nodes, &return_nodes,
+ decoder->position());
} else {
- BUILD(CallDirect, index, arg_nodes, &return_nodes, decoder->position());
+ BUILD(CallDirect, sig_index, arg_nodes, &return_nodes,
+ decoder->position());
}
int return_count = static_cast<int>(sig->return_count());
for (int i = 0; i < return_count; ++i) {
@@ -872,13 +877,29 @@ class WasmGraphBuildingInterface {
// reload mem_size and mem_start.
LoadContextIntoSsa(ssa_env_);
}
+
+ void DoReturnCall(FullDecoder* decoder, uint32_t table_index,
+ TFNode* index_node, FunctionSig* sig, uint32_t sig_index,
+ const Value args[]) {
+ int arg_count = static_cast<int>(sig->parameter_count());
+ TFNode** arg_nodes = builder_->Buffer(arg_count + 1);
+ arg_nodes[0] = index_node;
+ for (int i = 0; i < arg_count; ++i) {
+ arg_nodes[i + 1] = args[i].node;
+ }
+ if (index_node) {
+ BUILD(ReturnCallIndirect, table_index, sig_index, arg_nodes,
+ decoder->position());
+ } else {
+ BUILD(ReturnCall, sig_index, arg_nodes, decoder->position());
+ }
+ }
};
} // namespace
DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- const WasmFeatures& enabled,
- const wasm::WasmModule* module,
+ const WasmFeatures& enabled, const WasmModule* module,
compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
compiler::NodeOriginTable* node_origins) {
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index 53885fef38..165953b7d1 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -23,11 +23,11 @@ struct FunctionBody;
struct WasmModule;
struct WasmFeatures;
-DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- const WasmFeatures& enabled, const WasmModule* module,
- compiler::WasmGraphBuilder* builder,
- WasmFeatures* detected, const FunctionBody& body,
- compiler::NodeOriginTable* node_origins);
+V8_EXPORT_PRIVATE DecodeResult
+BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
+ const WasmModule* module, compiler::WasmGraphBuilder* builder,
+ WasmFeatures* detected, const FunctionBody& body,
+ compiler::NodeOriginTable* node_origins);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
index c50183d33e..ff5fb8de72 100644
--- a/deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h
+++ b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
-#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
+#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
+#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
@@ -38,4 +38,4 @@ class JSToWasmWrapperCache {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
+#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index ec230ff742..988c22d6fc 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -16,28 +16,9 @@ namespace wasm {
#if V8_TARGET_ARCH_X64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- // TODO(clemensh): Try more efficient sequences.
- // Alternative 1:
- // [header]: mov r10, [lazy_compile_target]
- // jmp r10
- // [slot 0]: push [0]
- // jmp [header] // pc-relative --> slot size: 10 bytes
- //
- // Alternative 2:
- // [header]: lea r10, [rip - [header]]
- // shr r10, 3 // compute index from offset
- // push r10
- // mov r10, [lazy_compile_target]
- // jmp r10
- // [slot 0]: call [header]
- // ret // -> slot size: 5 bytes
-
// Use a push, because mov to an extended register takes 6 bytes.
- pushq(Immediate(func_index)); // max 5 bytes
- movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
- jmp(kScratchRegister); // 3 bytes
-
- PatchConstPool(); // force patching entries for partial const pool
+ pushq(Immediate(func_index)); // max 5 bytes
+ EmitJumpSlot(lazy_compile_target); // always 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -45,8 +26,12 @@ void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
- movq(kScratchRegister, static_cast<uint64_t>(target));
- jmp(kScratchRegister);
+ // On x64, all code is allocated within a single code section, so we can use
+ // relative jumps.
+ static_assert(kMaxWasmCodeMemory <= size_t{2} * GB, "can use relative jump");
+ intptr_t displacement = static_cast<intptr_t>(
+ reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize);
+ near_jmp(displacement, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -125,6 +110,9 @@ void JumpTableAssembler::EmitJumpSlot(Address target) {
// TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
// patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
// sure concurrent patching is still supported.
+ DCHECK(TurboAssembler::IsNearCallOffset(
+ (reinterpret_cast<byte*>(target) - pc_) / kInstrSize));
+
Jump(target, RelocInfo::NONE);
}
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 548639a1ba..f3d4f954bf 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -27,7 +27,7 @@ namespace wasm {
//
// The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes.
-class JumpTableAssembler : public MacroAssembler {
+class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
public:
// Translate an offset into the continuous jump table to a jump table index.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
@@ -115,7 +115,7 @@ class JumpTableAssembler : public MacroAssembler {
// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 18;
+ static constexpr int kJumpTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 3f8e8b3db5..4dc5b80dbc 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -37,15 +37,17 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
}
const char* eng = "?";
switch (tier) {
- case ExecutionTier::kOptimized:
+ case ExecutionTier::kTurbofan:
eng = "turbofan";
break;
- case ExecutionTier::kBaseline:
+ case ExecutionTier::kLiftoff:
eng = "liftoff";
break;
case ExecutionTier::kInterpreter:
eng = "interpreter";
break;
+ case ExecutionTier::kNone:
+ UNREACHABLE();
}
printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position,
info->is_store ? " store to" : "load from", info->address,
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index c39ca2406c..568f9eb0f8 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -4,10 +4,14 @@
#include "src/wasm/module-compiler.h"
+#include <algorithm>
+
#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/base/enum-set.h"
#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
@@ -18,12 +22,12 @@
#include "src/task-utils.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
+#include "src/wasm/js-to-wasm-wrapper-cache.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -61,22 +65,23 @@ enum class CompileMode : uint8_t { kRegular, kTiering };
// on background compile jobs.
class BackgroundCompileToken {
public:
- explicit BackgroundCompileToken(NativeModule* native_module)
+ explicit BackgroundCompileToken(
+ const std::shared_ptr<NativeModule>& native_module)
: native_module_(native_module) {}
void Cancel() {
base::SharedMutexGuard<base::kExclusive> mutex_guard(&mutex_);
- native_module_ = nullptr;
+ native_module_.reset();
}
private:
friend class BackgroundCompileScope;
base::SharedMutex mutex_;
- NativeModule* native_module_;
+ std::weak_ptr<NativeModule> native_module_;
- NativeModule* StartScope() {
+ std::shared_ptr<NativeModule> StartScope() {
mutex_.LockShared();
- return native_module_;
+ return native_module_.lock();
}
void ExitScope() { mutex_.UnlockShared(); }
@@ -99,25 +104,197 @@ class BackgroundCompileScope {
NativeModule* native_module() {
DCHECK(!cancelled());
- return native_module_;
+ return native_module_.get();
}
inline CompilationStateImpl* compilation_state();
private:
BackgroundCompileToken* const token_;
- NativeModule* const native_module_;
+ // Keep the native module alive while in this scope.
+ std::shared_ptr<NativeModule> const native_module_;
+};
+
+enum CompileBaselineOnly : bool {
+ kBaselineOnly = true,
+ kBaselineOrTopTier = false
+};
+
+// A set of work-stealing queues (vectors of units). Each background compile
+// task owns one of the queues and steals from all others once its own queue
+// runs empty.
+class CompilationUnitQueues {
+ public:
+ explicit CompilationUnitQueues(int max_tasks) : queues_(max_tasks) {
+ DCHECK_LT(0, max_tasks);
+ for (int task_id = 0; task_id < max_tasks; ++task_id) {
+ queues_[task_id].next_steal_task_id_ = next_task_id(task_id);
+ }
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
+ }
+
+ std::unique_ptr<WasmCompilationUnit> GetNextUnit(
+ int task_id, CompileBaselineOnly baseline_only) {
+ DCHECK_LE(0, task_id);
+ DCHECK_GT(queues_.size(), task_id);
+
+ // As long as any lower-tier units are outstanding we need to steal them
+ // before executing own higher-tier units.
+ int max_tier = baseline_only ? kBaseline : kTopTier;
+ for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
+ Queue* queue = &queues_[task_id];
+ // First, check whether our own queue has a unit of the wanted tier. If
+ // so, return it, otherwise get the task id to steal from.
+ int steal_task_id;
+ {
+ base::MutexGuard mutex_guard(&queue->mutex_);
+ if (!queue->units_[tier].empty()) {
+ auto unit = std::move(queue->units_[tier].back());
+ queue->units_[tier].pop_back();
+ DecrementUnitCount(tier);
+ return unit;
+ }
+ steal_task_id = queue->next_steal_task_id_;
+ }
+
+ // Try to steal from all other queues. If none of this succeeds, the outer
+ // loop increases the tier and retries.
+ size_t steal_trials = queues_.size();
+ for (; steal_trials > 0;
+ --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
+ if (steal_task_id == task_id) continue;
+ if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
+ DecrementUnitCount(tier);
+ return unit;
+ }
+ }
+ }
+ return {};
+ }
+
+ void AddUnits(Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
+ Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units) {
+ DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
+ // Add to the individual queues in a round-robin fashion. No special care is
+ // taken to balance them; they will be balanced by work stealing.
+ int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
+ while (!next_queue_to_add.compare_exchange_weak(
+ queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
+ // Retry with updated {queue_to_add}.
+ }
+
+ Queue* queue = &queues_[queue_to_add];
+ base::MutexGuard guard(&queue->mutex_);
+ if (!baseline_units.empty()) {
+ queue->units_[kBaseline].insert(
+ queue->units_[kBaseline].end(),
+ std::make_move_iterator(baseline_units.begin()),
+ std::make_move_iterator(baseline_units.end()));
+ num_units_[kBaseline].fetch_add(baseline_units.size(),
+ std::memory_order_relaxed);
+ }
+ if (!top_tier_units.empty()) {
+ queue->units_[kTopTier].insert(
+ queue->units_[kTopTier].end(),
+ std::make_move_iterator(top_tier_units.begin()),
+ std::make_move_iterator(top_tier_units.end()));
+ num_units_[kTopTier].fetch_add(top_tier_units.size(),
+ std::memory_order_relaxed);
+ }
+ }
+
+ // Get the current total number of units in all queues. This is only a
+ // momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit
+ // if this method returns non-zero.
+ size_t GetTotalSize() const {
+ size_t total = 0;
+ for (auto& atomic_counter : num_units_) {
+ total += atomic_counter.load(std::memory_order_relaxed);
+ }
+ return total;
+ }
+
+ private:
+ // Store tier in int so we can easily loop over it:
+ static constexpr int kBaseline = 0;
+ static constexpr int kTopTier = 1;
+ static constexpr int kNumTiers = kTopTier + 1;
+
+ struct Queue {
+ base::Mutex mutex_;
+
+ // Protected by {mutex_}:
+ std::vector<std::unique_ptr<WasmCompilationUnit>> units_[kNumTiers];
+ int next_steal_task_id_;
+ // End of fields protected by {mutex_}.
+ };
+
+ std::vector<Queue> queues_;
+
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
+
+ int next_task_id(int task_id) const {
+ int next = task_id + 1;
+ return next == static_cast<int>(queues_.size()) ? 0 : next;
+ }
+
+ int GetLowestTierWithUnits() const {
+ for (int tier = 0; tier < kNumTiers; ++tier) {
+ if (num_units_[tier].load(std::memory_order_relaxed) > 0) return tier;
+ }
+ return kNumTiers;
+ }
+
+ void DecrementUnitCount(int tier) {
+ size_t old_units_count = num_units_[tier].fetch_sub(1);
+ DCHECK_LE(1, old_units_count);
+ USE(old_units_count);
+ }
+
+ // Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return
+ // first stolen unit (rest put in queue of {task_id}), or {nullptr} if
+ // {steal_from_task_id} had no units of {wanted_tier}.
+ std::unique_ptr<WasmCompilationUnit> StealUnitsAndGetFirst(
+ int task_id, int steal_from_task_id, int wanted_tier) {
+ DCHECK_NE(task_id, steal_from_task_id);
+ std::vector<std::unique_ptr<WasmCompilationUnit>> stolen;
+ {
+ Queue* steal_queue = &queues_[steal_from_task_id];
+ base::MutexGuard guard(&steal_queue->mutex_);
+ if (steal_queue->units_[wanted_tier].empty()) return {};
+ auto* steal_from_vector = &steal_queue->units_[wanted_tier];
+ size_t remaining = steal_from_vector->size() / 2;
+ stolen.assign(
+ std::make_move_iterator(steal_from_vector->begin()) + remaining,
+ std::make_move_iterator(steal_from_vector->end()));
+ steal_from_vector->resize(remaining);
+ }
+ DCHECK(!stolen.empty());
+ auto returned_unit = std::move(stolen.back());
+ stolen.pop_back();
+ Queue* queue = &queues_[task_id];
+ base::MutexGuard guard(&queue->mutex_);
+ auto* target_queue = &queue->units_[wanted_tier];
+ target_queue->insert(target_queue->end(),
+ std::make_move_iterator(stolen.begin()),
+ std::make_move_iterator(stolen.end()));
+ queue->next_steal_task_id_ = next_task_id(steal_from_task_id);
+ return returned_unit;
+ }
};
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
// compilation of functions.
-// It's public interface {CompilationState} lives in compilation-environment.h.
+// Its public interface {CompilationState} lives in compilation-environment.h.
class CompilationStateImpl {
public:
- CompilationStateImpl(NativeModule*, std::shared_ptr<Counters> async_counters);
- ~CompilationStateImpl();
+ CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
+ std::shared_ptr<Counters> async_counters);
// Cancel all background compilation and wait for all tasks to finish. Call
// this before destructing this object.
@@ -126,7 +303,7 @@ class CompilationStateImpl {
// Set the number of compilations unit expected to be executed. Needs to be
// set before {AddCompilationUnits} is run, which triggers background
// compilation.
- void SetNumberOfFunctionsToCompile(int num_functions);
+ void SetNumberOfFunctionsToCompile(int num_functions, int num_lazy_functions);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -135,55 +312,36 @@ class CompilationStateImpl {
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(
- std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
- std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units);
- std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit();
-
- void OnFinishedUnit(ExecutionTier, WasmCode*);
-
- void ReportDetectedFeatures(const WasmFeatures& detected);
- void OnBackgroundTaskStopped(const WasmFeatures& detected);
- void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected);
- void RestartBackgroundCompileTask();
+ Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
+ Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units);
+ void AddTopTierCompilationUnit(std::unique_ptr<WasmCompilationUnit>);
+ std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit(
+ int task_id, CompileBaselineOnly baseline_only);
+
+ void OnFinishedUnit(WasmCode*);
+ void OnFinishedUnits(Vector<WasmCode*>);
+
+ void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
+ void UpdateDetectedFeatures(const WasmFeatures& detected);
+ void PublishDetectedFeatures(Isolate*);
void RestartBackgroundTasks();
- void SetError(uint32_t func_index, const WasmError& error);
+ void SetError();
bool failed() const {
- return compile_error_.load(std::memory_order_relaxed) != nullptr;
+ return compile_failed_.load(std::memory_order_relaxed);
}
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- return outstanding_baseline_units_ == 0 ||
- (compile_mode_ == CompileMode::kTiering &&
- outstanding_tiering_units_ == 0);
+ DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+ return outstanding_baseline_functions_ == 0;
}
CompileMode compile_mode() const { return compile_mode_; }
+ Counters* counters() const { return async_counters_.get(); }
WasmFeatures* detected_features() { return &detected_features_; }
- // Call {GetCompileError} from foreground threads only, since we access
- // NativeModule::wire_bytes, which is set from the foreground thread once the
- // stream has finished.
- WasmError GetCompileError() {
- CompilationError* error = compile_error_.load(std::memory_order_acquire);
- DCHECK_NOT_NULL(error);
- std::ostringstream error_msg;
- error_msg << "Compiling wasm function \"";
- wasm::ModuleWireBytes wire_bytes(native_module_->wire_bytes());
- wasm::WireBytesRef name_ref = native_module_->module()->LookupFunctionName(
- wire_bytes, error->func_index);
- if (name_ref.is_set()) {
- wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
- error_msg.write(name.start(), name.length());
- } else {
- error_msg << "wasm-function[" << error->func_index << "]";
- }
- error_msg << "\" failed: " << error->error.message();
- return WasmError{error->error.offset(), error_msg.str()};
- }
-
void SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
base::MutexGuard guard(&mutex_);
@@ -196,23 +354,24 @@ class CompilationStateImpl {
return wire_bytes_storage_;
}
- private:
- struct CompilationError {
- uint32_t const func_index;
- WasmError const error;
- CompilationError(uint32_t func_index, WasmError error)
- : func_index(func_index), error(std::move(error)) {}
- };
+ const std::shared_ptr<BackgroundCompileToken>& background_compile_token()
+ const {
+ return background_compile_token_;
+ }
+ private:
NativeModule* const native_module_;
const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
const CompileMode compile_mode_;
const std::shared_ptr<Counters> async_counters_;
- // Compilation error, atomically updated, but at most once (nullptr -> error).
- // Uses acquire-release semantics (acquire on load, release on update).
- // For checking whether an error is set, relaxed semantics can be used.
- std::atomic<CompilationError*> compile_error_{nullptr};
+ // Compilation error, atomically updated. This flag can be updated and read
+ // using relaxed semantics.
+ std::atomic<bool> compile_failed_{false};
+
+ const int max_background_tasks_ = 0;
+
+ CompilationUnitQueues compilation_unit_queues_;
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
@@ -221,10 +380,8 @@ class CompilationStateImpl {
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
- std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_compilation_units_;
- std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_compilation_units_;
-
- int num_background_tasks_ = 0;
+ // Set of unused task ids; <= {max_background_tasks_} many.
+ std::vector<int> available_task_ids_;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
@@ -249,13 +406,12 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
- int outstanding_baseline_units_ = 0;
- int outstanding_tiering_units_ = 0;
+ int outstanding_baseline_functions_ = 0;
+ int outstanding_top_tier_functions_ = 0;
+ std::vector<ExecutionTier> highest_execution_tier_;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- const int max_background_tasks_ = 0;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -284,9 +440,7 @@ CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
void CompilationState::AbortCompilation() { Impl(this)->AbortCompilation(); }
-void CompilationState::SetError(uint32_t func_index, const WasmError& error) {
- Impl(this)->SetError(func_index, error);
-}
+void CompilationState::SetError() { Impl(this)->SetError(); }
void CompilationState::SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
@@ -304,13 +458,18 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
bool CompilationState::failed() const { return Impl(this)->failed(); }
-void CompilationState::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
- Impl(this)->OnFinishedUnit(tier, code);
+void CompilationState::OnFinishedUnit(WasmCode* code) {
+ Impl(this)->OnFinishedUnit(code);
+}
+
+void CompilationState::OnFinishedUnits(Vector<WasmCode*> code_vector) {
+ Impl(this)->OnFinishedUnits(code_vector);
}
// static
std::unique_ptr<CompilationState> CompilationState::New(
- NativeModule* native_module, std::shared_ptr<Counters> async_counters) {
+ const std::shared_ptr<NativeModule>& native_module,
+ std::shared_ptr<Counters> async_counters) {
return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
new CompilationStateImpl(native_module, std::move(async_counters))));
}
@@ -318,103 +477,123 @@ std::unique_ptr<CompilationState> CompilationState::New(
// End of PIMPL implementation of {CompilationState}.
//////////////////////////////////////////////////////
-WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
- int func_index) {
- base::ElapsedTimer compilation_timer;
- DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
-
- compilation_timer.Start();
-
- TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
-
- const uint8_t* module_start = native_module->wire_bytes().start();
-
- const WasmFunction* func = &native_module->module()->functions[func_index];
- FunctionBody func_body{func->sig, func->code.offset(),
- module_start + func->code.offset(),
- module_start + func->code.end_offset()};
-
- ExecutionTier tier =
- WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
- WasmCompilationUnit unit(isolate->wasm_engine(), func_index, tier);
- CompilationEnv env = native_module->CreateCompilationEnv();
- WasmCompilationResult result = unit.ExecuteCompilation(
- &env, native_module->compilation_state()->GetWireBytesStorage(),
- isolate->counters(),
- Impl(native_module->compilation_state())->detected_features());
- WasmCode* code = unit.Publish(std::move(result), native_module);
-
- // During lazy compilation, we should never get compilation errors. The module
- // was verified before starting execution with lazy compilation.
- // This might be OOM, but then we cannot continue execution anyway.
- // TODO(clemensh): According to the spec, we can actually skip validation at
- // module creation time, and return a function that always traps here.
- CHECK(!native_module->compilation_state()->failed());
-
- if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
-
- int64_t func_size =
- static_cast<int64_t>(func->code.end_offset() - func->code.offset());
- int64_t compilation_time = compilation_timer.Elapsed().InMicroseconds();
-
- auto counters = isolate->counters();
- counters->wasm_lazily_compiled_functions()->Increment();
+namespace {
- counters->wasm_lazy_compilation_throughput()->AddSample(
- compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
- : 0);
+ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
+ ExecutionTier default_tier) {
+ switch (hint) {
+ case WasmCompilationHintTier::kDefault:
+ return default_tier;
+ case WasmCompilationHintTier::kInterpreter:
+ return ExecutionTier::kInterpreter;
+ case WasmCompilationHintTier::kBaseline:
+ return ExecutionTier::kLiftoff;
+ case WasmCompilationHintTier::kOptimized:
+ return ExecutionTier::kTurbofan;
+ }
+ UNREACHABLE();
+}
- return code;
+const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
+ uint32_t func_index) {
+ DCHECK_LE(module->num_imported_functions, func_index);
+ uint32_t hint_index = func_index - module->num_imported_functions;
+ const std::vector<WasmCompilationHint>& compilation_hints =
+ module->compilation_hints;
+ if (hint_index < compilation_hints.size()) {
+ return &compilation_hints[hint_index];
+ }
+ return nullptr;
}
-Address CompileLazy(Isolate* isolate, NativeModule* native_module,
- uint32_t func_index) {
- HistogramTimerScope lazy_time_scope(
- isolate->counters()->wasm_lazy_compilation_time());
+bool IsLazyCompilation(const WasmModule* module,
+ const WasmFeatures& enabled_features,
+ uint32_t func_index) {
+ if (enabled_features.compilation_hints) {
+ const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
+ return hint != nullptr &&
+ hint->strategy == WasmCompilationHintStrategy::kLazy;
+ }
+ return false;
+}
- DCHECK(!native_module->lazy_compile_frozen());
+bool IsLazyCompilation(const WasmModule* module,
+ const NativeModule* native_module,
+ const WasmFeatures& enabled_features,
+ uint32_t func_index) {
+ if (native_module->lazy_compilation()) return true;
+ return IsLazyCompilation(module, enabled_features, func_index);
+}
- NativeModuleModificationScope native_module_modification_scope(native_module);
+struct ExecutionTierPair {
+ ExecutionTier baseline_tier;
+ ExecutionTier top_tier;
+};
- WasmCode* result = LazyCompileFunction(isolate, native_module, func_index);
- DCHECK_NOT_NULL(result);
- DCHECK_EQ(func_index, result->index());
+ExecutionTierPair GetRequestedExecutionTiers(
+ const WasmModule* module, CompileMode compile_mode,
+ const WasmFeatures& enabled_features, uint32_t func_index) {
+ ExecutionTierPair result;
+ switch (compile_mode) {
+ case CompileMode::kRegular:
+ result.baseline_tier =
+ WasmCompilationUnit::GetDefaultExecutionTier(module);
+ result.top_tier = result.baseline_tier;
+ return result;
+ case CompileMode::kTiering:
+
+ // Default tiering behaviour.
+ result.baseline_tier = ExecutionTier::kLiftoff;
+ result.top_tier = ExecutionTier::kTurbofan;
+
+ // Check if compilation hints override default tiering behaviour.
+ if (enabled_features.compilation_hints) {
+ const WasmCompilationHint* hint =
+ GetCompilationHint(module, func_index);
+ if (hint != nullptr) {
+ result.baseline_tier = ApplyHintToExecutionTier(hint->baseline_tier,
+ result.baseline_tier);
+ result.top_tier =
+ ApplyHintToExecutionTier(hint->top_tier, result.top_tier);
+ }
+ }
- return result->instruction_start();
+ // Correct top tier if necessary.
+ static_assert(ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+ if (result.baseline_tier > result.top_tier) {
+ result.top_tier = result.baseline_tier;
+ }
+ return result;
+ }
+ UNREACHABLE();
}
-namespace {
-
// The {CompilationUnitBuilder} builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// {CompilationStateImpl} when {Commit} is called.
class CompilationUnitBuilder {
public:
- explicit CompilationUnitBuilder(NativeModule* native_module,
- WasmEngine* wasm_engine)
+ explicit CompilationUnitBuilder(NativeModule* native_module)
: native_module_(native_module),
- wasm_engine_(wasm_engine),
default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
native_module->module())) {}
- void AddUnit(uint32_t func_index) {
- switch (compilation_state()->compile_mode()) {
- case CompileMode::kTiering:
- tiering_units_.emplace_back(
- CreateUnit(func_index, ExecutionTier::kOptimized));
- baseline_units_.emplace_back(
- CreateUnit(func_index, ExecutionTier::kBaseline));
- return;
- case CompileMode::kRegular:
- baseline_units_.emplace_back(CreateUnit(func_index, default_tier_));
- return;
+ void AddUnits(uint32_t func_index) {
+ ExecutionTierPair tiers = GetRequestedExecutionTiers(
+ native_module_->module(), compilation_state()->compile_mode(),
+ native_module_->enabled_features(), func_index);
+ baseline_units_.emplace_back(CreateUnit(func_index, tiers.baseline_tier));
+ if (tiers.baseline_tier != tiers.top_tier) {
+ tiering_units_.emplace_back(CreateUnit(func_index, tiers.top_tier));
}
- UNREACHABLE();
}
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty()) return false;
- compilation_state()->AddCompilationUnits(baseline_units_, tiering_units_);
+ compilation_state()->AddCompilationUnits(VectorOf(baseline_units_),
+ VectorOf(tiering_units_));
Clear();
return true;
}
@@ -427,8 +606,7 @@ class CompilationUnitBuilder {
private:
std::unique_ptr<WasmCompilationUnit> CreateUnit(uint32_t func_index,
ExecutionTier tier) {
- return base::make_unique<WasmCompilationUnit>(wasm_engine_, func_index,
- tier);
+ return base::make_unique<WasmCompilationUnit>(func_index, tier);
}
CompilationStateImpl* compilation_state() const {
@@ -436,170 +614,293 @@ class CompilationUnitBuilder {
}
NativeModule* const native_module_;
- WasmEngine* const wasm_engine_;
const ExecutionTier default_tier_;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
-bool compile_lazy(const WasmModule* module) {
- return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+} // namespace
+
+void CompileLazy(Isolate* isolate, NativeModule* native_module,
+ uint32_t func_index) {
+ Counters* counters = isolate->counters();
+ HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
+
+ DCHECK(!native_module->lazy_compile_frozen());
+
+ base::ElapsedTimer compilation_timer;
+
+ NativeModuleModificationScope native_module_modification_scope(native_module);
+
+ DCHECK(!native_module->HasCode(static_cast<uint32_t>(func_index)));
+
+ compilation_timer.Start();
+
+ TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
+
+ const uint8_t* module_start = native_module->wire_bytes().start();
+
+ const WasmFunction* func = &native_module->module()->functions[func_index];
+ FunctionBody func_body{func->sig, func->code.offset(),
+ module_start + func->code.offset(),
+ module_start + func->code.end_offset()};
+
+ CompilationStateImpl* compilation_state =
+ Impl(native_module->compilation_state());
+ ExecutionTierPair tiers = GetRequestedExecutionTiers(
+ native_module->module(), compilation_state->compile_mode(),
+ native_module->enabled_features(), func_index);
+
+ WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = baseline_unit.ExecuteCompilation(
+ isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(),
+ isolate->counters(), compilation_state->detected_features());
+ WasmCodeRefScope code_ref_scope;
+ WasmCode* code = native_module->AddCompiledCode(std::move(result));
+
+ if (tiers.baseline_tier < tiers.top_tier) {
+ auto tiering_unit =
+ base::make_unique<WasmCompilationUnit>(func_index, tiers.top_tier);
+ compilation_state->AddTopTierCompilationUnit(std::move(tiering_unit));
+ }
+
+ // During lazy compilation, we should never get compilation errors. The module
+ // was verified before starting execution with lazy compilation.
+ // This might be OOM, but then we cannot continue execution anyway.
+ // TODO(clemensh): According to the spec, we can actually skip validation at
+ // module creation time, and return a function that always traps here.
+ CHECK(!compilation_state->failed());
+
+ // The code we just produced should be the one that was requested.
+ DCHECK_EQ(func_index, code->index());
+
+ if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
+
+ double func_kb = 1e-3 * func->code.length();
+ double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
+
+ counters->wasm_lazily_compiled_functions()->Increment();
+
+ int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
+ counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
}
+namespace {
+
void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
-double MonotonicallyIncreasingTimeInMs() {
- return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
- base::Time::kMillisecondsPerSecond;
-}
+constexpr int kMainThreadTaskId = -1;
+
+// Run by the main thread and background tasks to take part in compilation.
+// Returns whether any units were executed.
+bool ExecuteCompilationUnits(
+ const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
+ int task_id, CompileBaselineOnly baseline_only) {
+ TRACE_COMPILE("Compiling (task %d)...\n", task_id);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
+
+ const bool is_foreground = task_id == kMainThreadTaskId;
+ // The main thread uses task id 0, which might collide with one of the
+ // background tasks. This is fine, as it will only cause some contention on
+ // the one queue, but work otherwise.
+ if (is_foreground) task_id = 0;
+
+ Platform* platform = V8::GetCurrentPlatform();
+ // Deadline is in 50ms from now.
+ static constexpr double kBackgroundCompileTimeLimit =
+ 50.0 / base::Time::kMillisecondsPerSecond;
+ const double deadline =
+ platform->MonotonicallyIncreasingTime() + kBackgroundCompileTimeLimit;
+
+ // These fields are initialized in a {BackgroundCompileScope} before
+ // starting compilation.
+ base::Optional<CompilationEnv> env;
+ std::shared_ptr<WireBytesStorage> wire_bytes;
+ std::shared_ptr<const WasmModule> module;
+ WasmEngine* wasm_engine = nullptr;
+ std::unique_ptr<WasmCompilationUnit> unit;
+ WasmFeatures detected_features = kNoWasmFeatures;
+
+ auto stop = [is_foreground, task_id,
+ &detected_features](BackgroundCompileScope& compile_scope) {
+ if (is_foreground) {
+ compile_scope.compilation_state()->UpdateDetectedFeatures(
+ detected_features);
+ } else {
+ compile_scope.compilation_state()->OnBackgroundTaskStopped(
+ task_id, detected_features);
+ }
+ };
-// Run by each compilation task and by the main thread (i.e. in both
-// foreground and background threads).
-bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
- NativeModule* native_module,
- CompilationStateImpl* compilation_state,
- WasmFeatures* detected,
- Counters* counters) {
- DisallowHeapAccess no_heap_access;
+ // Preparation (synchronized): Initialize the fields above and get the first
+ // compilation unit.
+ {
+ BackgroundCompileScope compile_scope(token);
+ if (compile_scope.cancelled()) return false;
+ env.emplace(compile_scope.native_module()->CreateCompilationEnv());
+ wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
+ module = compile_scope.native_module()->shared_module();
+ wasm_engine = compile_scope.native_module()->engine();
+ unit = compile_scope.compilation_state()->GetNextCompilationUnit(
+ task_id, baseline_only);
+ if (unit == nullptr) {
+ stop(compile_scope);
+ return false;
+ }
+ }
- std::unique_ptr<WasmCompilationUnit> unit =
- compilation_state->GetNextCompilationUnit();
- if (unit == nullptr) return false;
+ std::vector<WasmCompilationResult> results_to_publish;
+
+ auto publish_results = [&results_to_publish](
+ BackgroundCompileScope* compile_scope) {
+ if (results_to_publish.empty()) return;
+ WasmCodeRefScope code_ref_scope;
+ std::vector<WasmCode*> code_vector =
+ compile_scope->native_module()->AddCompiledCode(
+ VectorOf(results_to_publish));
+ compile_scope->compilation_state()->OnFinishedUnits(VectorOf(code_vector));
+ results_to_publish.clear();
+ };
- WasmCompilationResult result = unit->ExecuteCompilation(
- env, compilation_state->GetWireBytesStorage(), counters, detected);
+ bool compilation_failed = false;
+ while (true) {
+ // (asynchronous): Execute the compilation.
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
+ results_to_publish.emplace_back(std::move(result));
- WasmCode* code = unit->Publish(std::move(result), native_module);
- compilation_state->OnFinishedUnit(unit->requested_tier(), code);
+ // (synchronized): Publish the compilation result and get the next unit.
+ {
+ BackgroundCompileScope compile_scope(token);
+ if (compile_scope.cancelled()) return true;
+ if (!results_to_publish.back().succeeded()) {
+ // Compile error.
+ compile_scope.compilation_state()->SetError();
+ stop(compile_scope);
+ compilation_failed = true;
+ break;
+ }
+ // Publish TurboFan units immediately to reduce peak memory consumption.
+ if (result.requested_tier == ExecutionTier::kTurbofan) {
+ publish_results(&compile_scope);
+ }
- return true;
-}
+ // Get next unit.
+ if (deadline < platform->MonotonicallyIncreasingTime()) {
+ unit = nullptr;
+ } else {
+ unit = compile_scope.compilation_state()->GetNextCompilationUnit(
+ task_id, baseline_only);
+ }
-void InitializeCompilationUnits(NativeModule* native_module,
- WasmEngine* wasm_engine) {
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const WasmModule* module = native_module->module();
- CompilationUnitBuilder builder(native_module, wasm_engine);
- uint32_t start = module->num_imported_functions;
- uint32_t end = start + module->num_declared_functions;
- for (uint32_t i = start; i < end; ++i) {
- builder.AddUnit(i);
+ if (unit == nullptr) {
+ publish_results(&compile_scope);
+ stop(compile_scope);
+ return true;
+ }
+ }
}
- builder.Commit();
+ // We only get here if compilation failed. Other exits return directly.
+ DCHECK(compilation_failed);
+ USE(compilation_failed);
+ token->Cancel();
+ return true;
}
-void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
- // Data structures for the parallel compilation.
-
- //-----------------------------------------------------------------------
- // For parallel compilation:
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units} within the
- // {compilation_state}. By adding units to the {compilation_state}, new
- // {BackgroundCompileTasks} instances are spawned which run on
- // the background threads.
- // 2) The background threads and the main thread pick one compilation unit at
- // a time and execute the parallel phase of the compilation unit.
-
- // Turn on the {CanonicalHandleScope} so that the background threads can
- // use the node cache.
- CanonicalHandleScope canonical(isolate);
-
- CompilationStateImpl* compilation_state =
- Impl(native_module->compilation_state());
- DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
- int num_wasm_functions =
- static_cast<int>(native_module->module()->num_declared_functions);
- compilation_state->SetNumberOfFunctionsToCompile(num_wasm_functions);
-
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units} within the
- // {compilation_state}. By adding units to the {compilation_state}, new
- // {BackgroundCompileTask} instances are spawned which run on
- // background threads.
- InitializeCompilationUnits(native_module, isolate->wasm_engine());
-
- // 2) The background threads and the main thread pick one compilation unit at
- // a time and execute the parallel phase of the compilation unit.
- WasmFeatures detected_features;
- CompilationEnv env = native_module->CreateCompilationEnv();
- // TODO(wasm): This might already execute TurboFan units on the main thread,
- // while waiting for baseline compilation to finish. This can introduce
- // additional delay.
- // TODO(wasm): This is a busy-wait loop once all units have started executing
- // in background threads. Replace by a semaphore / barrier.
- while (!compilation_state->failed() &&
- !compilation_state->baseline_compilation_finished()) {
- FetchAndExecuteCompilationUnit(&env, native_module, compilation_state,
- &detected_features, isolate->counters());
+DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
+ Vector<const uint8_t> code,
+ Counters* counters,
+ AccountingAllocator* allocator,
+ WasmFeatures enabled_features) {
+ const WasmFunction* func = &module->functions[func_index];
+ FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
+ DecodeResult result;
+ {
+ auto time_counter = SELECT_WASM_COUNTER(counters, module->origin,
+ wasm_decode, function_time);
+ TimedHistogramScope wasm_decode_function_time_scope(time_counter);
+ WasmFeatures detected;
+ result =
+ VerifyWasmCode(allocator, enabled_features, module, &detected, body);
}
-
- // Publish features from the foreground and background tasks.
- compilation_state->PublishDetectedFeatures(isolate, detected_features);
+ return result;
}
-void CompileSequentially(Isolate* isolate, NativeModule* native_module,
- ErrorThrower* thrower) {
+enum class OnlyLazyFunctions : bool { kNo = false, kYes = true };
+
+void ValidateSequentially(
+ const WasmModule* module, NativeModule* native_module, Counters* counters,
+ AccountingAllocator* allocator, ErrorThrower* thrower,
+ OnlyLazyFunctions only_lazy_functions = OnlyLazyFunctions ::kNo) {
DCHECK(!thrower->error());
+ uint32_t start = module->num_imported_functions;
+ uint32_t end = start + module->num_declared_functions;
+ auto enabled_features = native_module->enabled_features();
+ for (uint32_t func_index = start; func_index < end; func_index++) {
+ // Skip non-lazy functions if requested.
+ if (only_lazy_functions == OnlyLazyFunctions::kYes &&
+ !IsLazyCompilation(module, native_module, enabled_features,
+ func_index)) {
+ continue;
+ }
+ ModuleWireBytes wire_bytes{native_module->wire_bytes()};
+ const WasmFunction* func = &module->functions[func_index];
+ Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
+ DecodeResult result = ValidateSingleFunction(
+ module, func_index, code, counters, allocator, enabled_features);
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const WasmModule* module = native_module->module();
- WasmFeatures detected = kNoWasmFeatures;
- auto* comp_state = Impl(native_module->compilation_state());
- ExecutionTier tier =
- WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
- for (const WasmFunction& func : module->functions) {
- if (func.imported) continue; // Imports are compiled at instantiation time.
-
- // Compile the function.
- WasmCompilationUnit::CompileWasmFunction(isolate, native_module, &detected,
- &func, tier);
- if (comp_state->failed()) {
- thrower->CompileFailed(comp_state->GetCompileError());
- break;
+ if (result.failed()) {
+ WasmName name = wire_bytes.GetNameOrNull(func, module);
+ if (name.start() == nullptr) {
+ thrower->CompileError(
+ "Compiling function #%d failed: %s @+%u", func->func_index,
+ result.error().message().c_str(), result.error().offset());
+ } else {
+ TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
+ thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
+ func->func_index, name.length(), name.start(),
+ result.error().message().c_str(),
+ result.error().offset());
+ }
}
}
- UpdateFeatureUseCounts(isolate, detected);
}
-void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
- ErrorThrower* thrower) {
- DCHECK(!thrower->error());
+void InitializeCompilationUnits(NativeModule* native_module) {
+ // Set number of functions that must be compiled to consider the module fully
+ // compiled.
+ auto wasm_module = native_module->module();
+ int num_functions = wasm_module->num_declared_functions;
+ DCHECK_IMPLIES(!native_module->enabled_features().compilation_hints,
+ wasm_module->num_lazy_compilation_hints == 0);
+ int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
+ CompilationStateImpl* compilation_state =
+ Impl(native_module->compilation_state());
+ compilation_state->SetNumberOfFunctionsToCompile(num_functions,
+ num_lazy_functions);
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = native_module->module();
+ CompilationUnitBuilder builder(native_module);
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
- for (uint32_t i = start; i < end; ++i) {
- const WasmFunction& func = module->functions[i];
-
- const byte* base = wire_bytes.start();
- FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(),
- base + func.code.end_offset()};
- DecodeResult result;
- {
- auto time_counter = SELECT_WASM_COUNTER(
- isolate->counters(), module->origin, wasm_decode, function_time);
-
- TimedHistogramScope wasm_decode_function_time_scope(time_counter);
- WasmFeatures detected;
- result = VerifyWasmCode(isolate->allocator(),
- native_module->enabled_features(), module,
- &detected, body);
- }
- if (result.failed()) {
- TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
- thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
- name.length(), name.start(),
- result.error().message().c_str(),
- result.error().offset());
- break;
+ for (uint32_t func_index = start; func_index < end; func_index++) {
+ if (IsLazyCompilation(module, native_module,
+ native_module->enabled_features(), func_index)) {
+ native_module->UseLazyStub(func_index);
+ } else {
+ builder.AddUnits(func_index);
}
}
+ builder.Commit();
+}
+
+bool NeedsDeterministicCompile() {
+ return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
}
void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
@@ -607,7 +908,8 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- if (compile_lazy(wasm_module)) {
+ if (FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && wasm_module->origin == kAsmJsOrigin)) {
if (wasm_module->origin == kWasmOrigin) {
// Validate wasm modules for lazy compilation. Don't validate asm.js
// modules, they are valid by construction (otherwise a CHECK will fail
@@ -615,29 +917,71 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// TODO(clemensh): According to the spec, we can actually skip validation
// at module creation time, and return a function that always traps at
// (lazy) compilation time.
- ValidateSequentially(isolate, native_module, thrower);
+ ValidateSequentially(wasm_module, native_module, isolate->counters(),
+ isolate->allocator(), thrower);
+ // On error: Return and leave the module in an unexecutable state.
if (thrower->error()) return;
}
+ native_module->set_lazy_compilation(true);
+ native_module->UseLazyStubs();
+ return;
+ }
- native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
- } else {
- size_t funcs_to_compile =
- wasm_module->functions.size() - wasm_module->num_imported_functions;
- bool compile_parallel =
- !FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
- funcs_to_compile > 1 &&
- V8::GetCurrentPlatform()->NumberOfWorkerThreads() > 0;
-
- if (compile_parallel) {
- CompileInParallel(isolate, native_module);
- } else {
- CompileSequentially(isolate, native_module, thrower);
- }
- auto* compilation_state = Impl(native_module->compilation_state());
- if (compilation_state->failed()) {
- thrower->CompileFailed(compilation_state->GetCompileError());
+ if (native_module->enabled_features().compilation_hints) {
+ ValidateSequentially(wasm_module, native_module, isolate->counters(),
+ isolate->allocator(), thrower,
+ OnlyLazyFunctions::kYes);
+ // On error: Return and leave the module in an unexecutable state.
+ if (thrower->error()) return;
+ }
+
+ // Turn on the {CanonicalHandleScope} so that the background threads can
+ // use the node cache.
+ CanonicalHandleScope canonical(isolate);
+
+ auto* compilation_state = Impl(native_module->compilation_state());
+ DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
+
+ // Install a callback to notify us once background compilation finished, or
+ // compilation failed.
+ auto baseline_finished_semaphore = std::make_shared<base::Semaphore>(0);
+ // The callback captures a shared ptr to the semaphore.
+ compilation_state->AddCallback(
+ [baseline_finished_semaphore](CompilationEvent event) {
+ if (event == CompilationEvent::kFinishedBaselineCompilation ||
+ event == CompilationEvent::kFailedCompilation) {
+ baseline_finished_semaphore->Signal();
+ }
+ });
+
+ // Initialize the compilation units and kick off background compile tasks.
+ InitializeCompilationUnits(native_module);
+
+ // If tiering is disabled, the main thread can execute any unit (all of them
+ // are part of initial compilation). Otherwise, just execute baseline units.
+ bool is_tiering = compilation_state->compile_mode() == CompileMode::kTiering;
+ auto baseline_only = is_tiering ? kBaselineOnly : kBaselineOrTopTier;
+ // The main threads contributes to the compilation, except if we need
+ // deterministic compilation; in that case, the single background task will
+ // execute all compilation.
+ if (!NeedsDeterministicCompile()) {
+ while (ExecuteCompilationUnits(
+ compilation_state->background_compile_token(), isolate->counters(),
+ kMainThreadTaskId, baseline_only)) {
+ // Continue executing compilation units.
}
}
+
+ // Now wait until baseline compilation finished.
+ baseline_finished_semaphore->Wait();
+
+ compilation_state->PublishDetectedFeatures(isolate);
+
+ if (compilation_state->failed()) {
+ ValidateSequentially(wasm_module, native_module, isolate->counters(),
+ isolate->allocator(), thrower);
+ CHECK(thrower->error());
+ }
}
// The runnable task that performs compilations in the background.
@@ -645,96 +989,27 @@ class BackgroundCompileTask : public CancelableTask {
public:
explicit BackgroundCompileTask(CancelableTaskManager* manager,
std::shared_ptr<BackgroundCompileToken> token,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters,
+ int task_id)
: CancelableTask(manager),
token_(std::move(token)),
- async_counters_(std::move(async_counters)) {}
+ async_counters_(std::move(async_counters)),
+ task_id_(task_id) {}
void RunInternal() override {
- TRACE_COMPILE("(3b) Compiling...\n");
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "BackgroundCompileTask::RunInternal");
-
- double deadline = MonotonicallyIncreasingTimeInMs() + 50.0;
-
- // These fields are initialized in a {BackgroundCompileScope} before
- // starting compilation.
- base::Optional<CompilationEnv> env;
- std::shared_ptr<WireBytesStorage> wire_bytes;
- std::shared_ptr<const WasmModule> module;
- std::unique_ptr<WasmCompilationUnit> unit;
- WasmFeatures detected_features = kNoWasmFeatures;
-
- // Preparation (synchronized): Initialize the fields above and get the first
- // compilation unit.
- {
- BackgroundCompileScope compile_scope(token_);
- if (compile_scope.cancelled()) return;
- env.emplace(compile_scope.native_module()->CreateCompilationEnv());
- wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
- module = compile_scope.native_module()->shared_module();
- unit = compile_scope.compilation_state()->GetNextCompilationUnit();
- if (unit == nullptr) {
- compile_scope.compilation_state()->OnBackgroundTaskStopped(
- detected_features);
- return;
- }
- }
-
- bool compilation_failed = false;
- while (true) {
- // (asynchronous): Execute the compilation.
-
- WasmCompilationResult result = unit->ExecuteCompilation(
- &env.value(), wire_bytes, async_counters_.get(), &detected_features);
-
- // (synchronized): Publish the compilation result and get the next unit.
- {
- BackgroundCompileScope compile_scope(token_);
- if (compile_scope.cancelled()) return;
- WasmCode* code =
- unit->Publish(std::move(result), compile_scope.native_module());
- if (code == nullptr) {
- // Compile error.
- compile_scope.compilation_state()->OnBackgroundTaskStopped(
- detected_features);
- compilation_failed = true;
- break;
- }
-
- // Successfully finished one unit.
- compile_scope.compilation_state()->OnFinishedUnit(
- unit->requested_tier(), code);
- if (deadline < MonotonicallyIncreasingTimeInMs()) {
- compile_scope.compilation_state()->ReportDetectedFeatures(
- detected_features);
- compile_scope.compilation_state()->RestartBackgroundCompileTask();
- return;
- }
-
- // Get next unit.
- unit = compile_scope.compilation_state()->GetNextCompilationUnit();
- if (unit == nullptr) {
- compile_scope.compilation_state()->OnBackgroundTaskStopped(
- detected_features);
- return;
- }
- }
- }
- // We only get here if compilation failed. Other exits return directly.
- DCHECK(compilation_failed);
- USE(compilation_failed);
- token_->Cancel();
+ ExecuteCompilationUnits(token_, async_counters_.get(), task_id_,
+ kBaselineOrTopTier);
}
private:
- std::shared_ptr<BackgroundCompileToken> token_;
- std::shared_ptr<Counters> async_counters_;
+ const std::shared_ptr<BackgroundCompileToken> token_;
+ const std::shared_ptr<Counters> async_counters_;
+ const int task_id_;
};
} // namespace
-std::unique_ptr<NativeModule> CompileToNativeModule(
+std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out) {
@@ -768,8 +1043,8 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
- *export_wrappers_out =
- isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
+ *export_wrappers_out = isolate->factory()->NewFixedArray(
+ export_wrapper_size, AllocationType::kOld);
CompileJsToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
@@ -779,14 +1054,6 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
return native_module;
}
-void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
- ErrorThrower* thrower,
- const WasmModule* wasm_module,
- NativeModule* native_module) {
- native_module->DisableTrapHandler();
- CompileNativeModule(isolate, thrower, wasm_module, native_module);
-}
-
AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
@@ -936,7 +1203,8 @@ void AsyncCompileJob::FinishCompile() {
if (script->type() == Script::TYPE_WASM &&
module_object_->module()->source_map_url.size() != 0) {
MaybeHandle<String> src_map_str = isolate_->factory()->NewStringFromUtf8(
- CStrVector(module_object_->module()->source_map_url.c_str()), TENURED);
+ CStrVector(module_object_->module()->source_map_url.c_str()),
+ AllocationType::kOld);
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
isolate_->debug()->OnAfterCompile(script);
@@ -944,8 +1212,7 @@ void AsyncCompileJob::FinishCompile() {
// We can only update the feature counts once the entire compile is done.
auto compilation_state =
Impl(module_object_->native_module()->compilation_state());
- compilation_state->PublishDetectedFeatures(
- isolate_, *compilation_state->detected_features());
+ compilation_state->PublishDetectedFeatures(isolate_);
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
// just compile wrappers here.
@@ -956,7 +1223,7 @@ void AsyncCompileJob::FinishCompile() {
FinishModule();
}
-void AsyncCompileJob::AsyncCompileFailed(const WasmError& error) {
+void AsyncCompileJob::DecodeFailed(const WasmError& error) {
ErrorThrower thrower(isolate_, "WebAssembly.compile()");
thrower.CompileFailed(error);
// {job} keeps the {this} pointer alive.
@@ -965,6 +1232,17 @@ void AsyncCompileJob::AsyncCompileFailed(const WasmError& error) {
resolver_->OnCompilationFailed(thrower.Reify());
}
+void AsyncCompileJob::AsyncCompileFailed() {
+ ErrorThrower thrower(isolate_, "WebAssembly.compile()");
+ ValidateSequentially(native_module_->module(), native_module_.get(),
+ isolate_->counters(), isolate_->allocator(), &thrower);
+ DCHECK(thrower.error());
+ // {job} keeps the {this} pointer alive.
+ std::shared_ptr<AsyncCompileJob> job =
+ isolate_->wasm_engine()->RemoveCompileJob(this);
+ resolver_->OnCompilationFailed(thrower.Reify());
+}
+
void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
resolver_->OnCompilationSucceeded(result);
}
@@ -984,12 +1262,14 @@ class AsyncCompileJob::CompilationStateCallback {
break;
case CompilationEvent::kFinishedTopTierCompilation:
DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
- // This callback should not react to top tier finished callbacks, since
- // the job might already be gone then.
+ // At this point, the job will already be gone, thus do not access it
+ // here.
break;
case CompilationEvent::kFailedCompilation: {
DCHECK(!last_event_.has_value());
- job_->DoSync<CompileFailed>();
+ if (job_->DecrementAndCheckFinisherCount()) {
+ job_->DoSync<CompileFailed>();
+ }
break;
}
default:
@@ -1150,6 +1430,30 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
job->enabled_features_, job->wire_bytes_.start(),
job->wire_bytes_.end(), false, kWasmOrigin, counters_,
job->isolate()->wasm_engine()->allocator());
+
+ // Validate lazy functions here.
+ auto enabled_features = job->enabled_features_;
+ if (enabled_features.compilation_hints && result.ok()) {
+ const WasmModule* module = result.value().get();
+ auto allocator = job->isolate()->wasm_engine()->allocator();
+ int start = module->num_imported_functions;
+ int end = start + module->num_declared_functions;
+
+ for (int func_index = start; func_index < end; func_index++) {
+ const WasmFunction* func = &module->functions[func_index];
+ Vector<const uint8_t> code = job->wire_bytes_.GetFunctionBytes(func);
+
+ if (IsLazyCompilation(module, enabled_features, func_index)) {
+ DecodeResult function_result =
+ ValidateSingleFunction(module, func_index, code, counters_,
+ allocator, enabled_features);
+ if (function_result.failed()) {
+ result = ModuleResult(function_result.error());
+ break;
+ }
+ }
+ }
+ }
}
if (result.failed()) {
// Decoding failure; reject the promise and clean up.
@@ -1176,8 +1480,8 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(1b) Decoding failed.\n");
- // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job->AsyncCompileFailed(error_);
+ // {job_} is deleted in DecodeFailed, therefore the {return}.
+ return job->DecodeFailed(error_);
}
};
@@ -1203,15 +1507,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
job->CreateNativeModule(module_);
- size_t num_functions =
- module_->functions.size() - module_->num_imported_functions;
-
- if (num_functions == 0) {
- // Degenerate case of an empty module.
- job->FinishCompile();
- return;
- }
-
CompilationStateImpl* compilation_state =
Impl(job->native_module_->compilation_state());
compilation_state->AddCallback(CompilationStateCallback{job});
@@ -1221,11 +1516,8 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// InitializeCompilationUnits always returns 0 for streaming compilation,
// then DoAsync would do the same as NextStep already.
- compilation_state->SetNumberOfFunctionsToCompile(
- module_->num_declared_functions);
// Add compilation units and kick off compilation.
- InitializeCompilationUnits(job->native_module_.get(),
- job->isolate()->wasm_engine());
+ InitializeCompilationUnits(job->native_module_.get());
}
}
};
@@ -1237,13 +1529,34 @@ class AsyncCompileJob::CompileFailed : public CompileStep {
private:
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(3a) Compilation failed\n");
+ DCHECK(job->native_module_->compilation_state()->failed());
- WasmError error =
- Impl(job->native_module_->compilation_state())->GetCompileError();
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job->AsyncCompileFailed(error);
+ return job->AsyncCompileFailed();
+ }
+};
+
+namespace {
+class SampleTopTierCodeSizeCallback {
+ public:
+ explicit SampleTopTierCodeSizeCallback(
+ std::weak_ptr<NativeModule> native_module)
+ : native_module_(std::move(native_module)) {}
+
+ void operator()(CompilationEvent event) {
+ // This callback is registered after baseline compilation finished, so the
+ // only possible event to follow is {kFinishedTopTierCompilation}.
+ DCHECK_EQ(CompilationEvent::kFinishedTopTierCompilation, event);
+ if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
+ native_module->engine()->SampleTopTierCodeSizeInAllIsolates(
+ native_module);
+ }
}
+
+ private:
+ std::weak_ptr<NativeModule> native_module_;
};
+} // namespace
//==========================================================================
// Step 3b (sync): Compilation finished.
@@ -1252,6 +1565,15 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
private:
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(3b) Compilation finished\n");
+ DCHECK(!job->native_module_->compilation_state()->failed());
+ // Sample the generated code size when baseline compilation finished.
+ job->native_module_->SampleCodeSize(job->isolate_->counters(),
+ NativeModule::kAfterBaseline);
+ // Also, set a callback to sample the code size after top-tier compilation
+ // finished. This callback will *not* keep the NativeModule alive.
+ job->native_module_->compilation_state()->AddCallback(
+ SampleTopTierCodeSizeCallback{job->native_module_});
+ // Then finalize and publish the generated module.
job->FinishCompile();
}
};
@@ -1361,17 +1683,24 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// task.
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false);
- job_->native_module_->compilation_state()->SetWireBytesStorage(
- std::move(wire_bytes_storage));
-
auto* compilation_state = Impl(job_->native_module_->compilation_state());
- compilation_state->SetNumberOfFunctionsToCompile(functions_count);
+ compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
+
+ // Set number of functions that must be compiled to consider the module fully
+ // compiled.
+ auto wasm_module = job_->native_module_->module();
+ int num_functions = wasm_module->num_declared_functions;
+ DCHECK_IMPLIES(!job_->native_module_->enabled_features().compilation_hints,
+ wasm_module->num_lazy_compilation_hints == 0);
+ int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
+ compilation_state->SetNumberOfFunctionsToCompile(num_functions,
+ num_lazy_functions);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
- compilation_unit_builder_.reset(new CompilationUnitBuilder(
- job_->native_module_.get(), job_->isolate()->wasm_engine()));
+ compilation_unit_builder_.reset(
+ new CompilationUnitBuilder(job_->native_module_.get()));
return true;
}
@@ -1383,11 +1712,33 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
decoder_.DecodeFunctionBody(
num_functions_, static_cast<uint32_t>(bytes.length()), offset, false);
- int index = num_functions_ + decoder_.module()->num_imported_functions;
- compilation_unit_builder_->AddUnit(index);
+ NativeModule* native_module = job_->native_module_.get();
+ const WasmModule* module = native_module->module();
+ auto enabled_features = native_module->enabled_features();
+ uint32_t func_index =
+ num_functions_ + decoder_.module()->num_imported_functions;
+
+ if (IsLazyCompilation(module, native_module, enabled_features, func_index)) {
+ Counters* counters = Impl(native_module->compilation_state())->counters();
+ AccountingAllocator* allocator = native_module->engine()->allocator();
+
+ // The native module does not own the wire bytes until {SetWireBytes} is
+ // called in {OnFinishedStream}. Validation must use {bytes} parameter.
+ DecodeResult result = ValidateSingleFunction(
+ module, func_index, bytes, counters, allocator, enabled_features);
+
+ if (result.failed()) {
+ FinishAsyncCompileJobWithError(result.error());
+ return false;
+ }
+
+ native_module->UseLazyStub(func_index);
+ } else {
+ compilation_unit_builder_->AddUnits(func_index);
+ }
+
++num_functions_;
- // This method always succeeds. The return value is necessary to comply with
- // the StreamingProcessor interface.
+
return true;
}
@@ -1425,7 +1776,11 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
job_->native_module_->SetWireBytes(std::move(bytes));
if (needs_finish) {
- job_->FinishCompile();
+ if (job_->native_module_->compilation_state()->failed()) {
+ job_->AsyncCompileFailed();
+ } else {
+ job_->FinishCompile();
+ }
}
}
@@ -1461,9 +1816,20 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
+namespace {
+int GetMaxBackgroundTasks() {
+ if (NeedsDeterministicCompile()) return 1;
+ int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
+ int num_compile_tasks =
+ std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
+ return std::max(1, num_compile_tasks);
+}
+} // namespace
+
CompilationStateImpl::CompilationStateImpl(
- NativeModule* native_module, std::shared_ptr<Counters> async_counters)
- : native_module_(native_module),
+ const std::shared_ptr<NativeModule>& native_module,
+ std::shared_ptr<Counters> async_counters)
+ : native_module_(native_module.get()),
background_compile_token_(
std::make_shared<BackgroundCompileToken>(native_module)),
compile_mode_(FLAG_wasm_tier_up &&
@@ -1471,13 +1837,14 @@ CompilationStateImpl::CompilationStateImpl(
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_background_tasks_(std::max(
- 1, std::min(FLAG_wasm_num_compilation_tasks,
- V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {}
-
-CompilationStateImpl::~CompilationStateImpl() {
- CompilationError* error = compile_error_.load(std::memory_order_acquire);
- if (error != nullptr) delete error;
+ max_background_tasks_(GetMaxBackgroundTasks()),
+ compilation_unit_queues_(max_background_tasks_),
+ available_task_ids_(max_background_tasks_) {
+ for (int i = 0; i < max_background_tasks_; ++i) {
+ // Ids are popped on task creation, so reverse this list. This ensures that
+ // the first background task gets id 0.
+ available_task_ids_[i] = max_background_tasks_ - 1 - i;
+ }
}
void CompilationStateImpl::AbortCompilation() {
@@ -1487,13 +1854,26 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
-void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
+void CompilationStateImpl::SetNumberOfFunctionsToCompile(
+ int num_functions, int num_lazy_functions) {
DCHECK(!failed());
base::MutexGuard guard(&callbacks_mutex_);
- outstanding_baseline_units_ = num_functions;
- if (compile_mode_ == CompileMode::kTiering) {
- outstanding_tiering_units_ = num_functions;
+ int num_functions_to_compile = num_functions - num_lazy_functions;
+ outstanding_baseline_functions_ = num_functions_to_compile;
+ outstanding_top_tier_functions_ = num_functions_to_compile;
+ highest_execution_tier_.assign(num_functions, ExecutionTier::kNone);
+
+ // Degenerate case of an empty module. Trigger callbacks immediately.
+ if (num_functions_to_compile == 0) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedBaselineCompilation);
+ }
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
}
}
@@ -1503,171 +1883,185 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
}
void CompilationStateImpl::AddCompilationUnits(
- std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
- std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units) {
- {
- base::MutexGuard guard(&mutex_);
-
- if (compile_mode_ == CompileMode::kTiering) {
- DCHECK_EQ(baseline_units.size(), tiering_units.size());
- DCHECK_EQ(tiering_units.back()->requested_tier(),
- ExecutionTier::kOptimized);
- tiering_compilation_units_.insert(
- tiering_compilation_units_.end(),
- std::make_move_iterator(tiering_units.begin()),
- std::make_move_iterator(tiering_units.end()));
- } else {
- DCHECK(tiering_compilation_units_.empty());
- }
-
- baseline_compilation_units_.insert(
- baseline_compilation_units_.end(),
- std::make_move_iterator(baseline_units.begin()),
- std::make_move_iterator(baseline_units.end()));
- }
+ Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
+ Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units) {
+ compilation_unit_queues_.AddUnits(baseline_units, top_tier_units);
RestartBackgroundTasks();
}
-std::unique_ptr<WasmCompilationUnit>
-CompilationStateImpl::GetNextCompilationUnit() {
- base::MutexGuard guard(&mutex_);
-
- std::vector<std::unique_ptr<WasmCompilationUnit>>& units =
- baseline_compilation_units_.empty() ? tiering_compilation_units_
- : baseline_compilation_units_;
+void CompilationStateImpl::AddTopTierCompilationUnit(
+ std::unique_ptr<WasmCompilationUnit> unit) {
+ AddCompilationUnits({}, {&unit, 1});
+}
- if (!units.empty()) {
- std::unique_ptr<WasmCompilationUnit> unit = std::move(units.back());
- units.pop_back();
- return unit;
- }
+std::unique_ptr<WasmCompilationUnit>
+CompilationStateImpl::GetNextCompilationUnit(
+ int task_id, CompileBaselineOnly baseline_only) {
+ return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
+}
- return std::unique_ptr<WasmCompilationUnit>();
+void CompilationStateImpl::OnFinishedUnit(WasmCode* code) {
+ OnFinishedUnits({&code, 1});
}
-void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
- // This mutex guarantees that events happen in the right order.
+void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
base::MutexGuard guard(&callbacks_mutex_);
- // If we are *not* compiling in tiering mode, then all units are counted as
- // baseline units.
- bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
- bool is_tiering_unit = is_tiering_mode && tier == ExecutionTier::kOptimized;
-
- // Sanity check: If we are not in tiering mode, there cannot be outstanding
- // tiering units.
- DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
-
- bool baseline_finished = false;
- bool tiering_finished = false;
- if (is_tiering_unit) {
- DCHECK_LT(0, outstanding_tiering_units_);
- --outstanding_tiering_units_;
- tiering_finished = outstanding_tiering_units_ == 0;
- // If baseline compilation has not finished yet, then also trigger
- // {kFinishedBaselineCompilation}.
- baseline_finished = tiering_finished && outstanding_baseline_units_ > 0;
- } else {
- DCHECK_LT(0, outstanding_baseline_units_);
- --outstanding_baseline_units_;
- // If we are in tiering mode and tiering finished before, then do not
- // trigger baseline finished.
- baseline_finished = outstanding_baseline_units_ == 0 &&
- (!is_tiering_mode || outstanding_tiering_units_ > 0);
- // If we are not tiering, then we also trigger the "top tier finished"
- // event when baseline compilation is finished.
- tiering_finished = baseline_finished && !is_tiering_mode;
- }
-
- if (baseline_finished) {
- for (auto& callback : callbacks_)
- callback(CompilationEvent::kFinishedBaselineCompilation);
- }
- if (tiering_finished) {
- for (auto& callback : callbacks_)
- callback(CompilationEvent::kFinishedTopTierCompilation);
- // Clear the callbacks because no more events will be delivered.
- callbacks_.clear();
- }
+ // Assume an order of execution tiers that represents the quality of their
+ // generated code.
+ static_assert(ExecutionTier::kNone < ExecutionTier::kInterpreter &&
+ ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+
+ auto module = native_module_->module();
+ auto enabled_features = native_module_->enabled_features();
+ for (WasmCode* code : code_vector) {
+ DCHECK_NOT_NULL(code);
+ DCHECK_NE(code->tier(), ExecutionTier::kNone);
+ native_module_->engine()->LogCode(code);
+
+ // Skip lazily compiled code as we do not consider this for the completion
+ // of baseline respectively top tier compilation.
+ int func_index = code->index();
+ if (IsLazyCompilation(module, native_module_, enabled_features,
+ func_index)) {
+ continue;
+ }
- if (code != nullptr) native_module_->engine()->LogCode(code);
+ // Determine whether we are reaching baseline or top tier with the given
+ // code.
+ uint32_t slot_index = code->index() - module->num_imported_functions;
+ ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
+ module, compile_mode(), enabled_features, func_index);
+ DCHECK_EQ(highest_execution_tier_.size(), module->num_declared_functions);
+ ExecutionTier prior_tier = highest_execution_tier_[slot_index];
+ bool had_reached_baseline = prior_tier >= requested_tiers.baseline_tier;
+ bool had_reached_top_tier = prior_tier >= requested_tiers.top_tier;
+ DCHECK_IMPLIES(had_reached_baseline, prior_tier > ExecutionTier::kNone);
+ bool reaches_baseline = !had_reached_baseline;
+ bool reaches_top_tier =
+ !had_reached_top_tier && code->tier() >= requested_tiers.top_tier;
+ DCHECK_IMPLIES(reaches_baseline,
+ code->tier() >= requested_tiers.baseline_tier);
+ DCHECK_IMPLIES(reaches_top_tier, had_reached_baseline || reaches_baseline);
+
+ // Remember compilation state before update.
+ bool had_completed_baseline_compilation =
+ outstanding_baseline_functions_ == 0;
+ bool had_completed_top_tier_compilation =
+ outstanding_top_tier_functions_ == 0;
+
+ // Update compilation state.
+ if (code->tier() > prior_tier) {
+ highest_execution_tier_[slot_index] = code->tier();
+ }
+ if (reaches_baseline) outstanding_baseline_functions_--;
+ if (reaches_top_tier) outstanding_top_tier_functions_--;
+ DCHECK_LE(0, outstanding_baseline_functions_);
+ DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+
+ // Conclude if we are completing baseline or top tier compilation.
+ bool completes_baseline_compilation = !had_completed_baseline_compilation &&
+ outstanding_baseline_functions_ == 0;
+ bool completes_top_tier_compilation = !had_completed_top_tier_compilation &&
+ outstanding_top_tier_functions_ == 0;
+ DCHECK_IMPLIES(
+ completes_top_tier_compilation,
+ had_completed_baseline_compilation || completes_baseline_compilation);
+
+ // Trigger callbacks.
+ if (completes_baseline_compilation) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedBaselineCompilation);
+ }
+ }
+ if (completes_top_tier_compilation) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
+ }
+ }
}
-void CompilationStateImpl::RestartBackgroundCompileTask() {
- auto task =
- native_module_->engine()->NewBackgroundCompileTask<BackgroundCompileTask>(
- background_compile_token_, async_counters_);
-
- if (baseline_compilation_finished()) {
- V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
- std::move(task));
- } else {
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+void CompilationStateImpl::OnBackgroundTaskStopped(
+ int task_id, const WasmFeatures& detected) {
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(0, std::count(available_task_ids_.begin(),
+ available_task_ids_.end(), task_id));
+ DCHECK_GT(max_background_tasks_, available_task_ids_.size());
+ available_task_ids_.push_back(task_id);
+ UnionFeaturesInto(&detected_features_, detected);
}
-}
-void CompilationStateImpl::ReportDetectedFeatures(
- const WasmFeatures& detected) {
- base::MutexGuard guard(&mutex_);
- UnionFeaturesInto(&detected_features_, detected);
+ // The background task could have stopped while we were adding new units, or
+ // because it reached its deadline. In both cases we need to restart tasks to
+ // avoid a potential deadlock.
+ RestartBackgroundTasks();
}
-void CompilationStateImpl::OnBackgroundTaskStopped(
+void CompilationStateImpl::UpdateDetectedFeatures(
const WasmFeatures& detected) {
base::MutexGuard guard(&mutex_);
- DCHECK_LE(1, num_background_tasks_);
- --num_background_tasks_;
UnionFeaturesInto(&detected_features_, detected);
}
-void CompilationStateImpl::PublishDetectedFeatures(
- Isolate* isolate, const WasmFeatures& detected) {
+void CompilationStateImpl::PublishDetectedFeatures(Isolate* isolate) {
// Notifying the isolate of the feature counts must take place under
// the mutex, because even if we have finished baseline compilation,
// tiering compilations may still occur in the background.
base::MutexGuard guard(&mutex_);
- UnionFeaturesInto(&detected_features_, detected);
UpdateFeatureUseCounts(isolate, detected_features_);
}
void CompilationStateImpl::RestartBackgroundTasks() {
- int num_restart;
+ // Create new tasks, but only spawn them after releasing the mutex, because
+ // some platforms (e.g. the predictable platform) might execute tasks right
+ // away.
+ std::vector<std::unique_ptr<Task>> new_tasks;
{
base::MutexGuard guard(&mutex_);
+ // Explicit fast path (quite common): If no more task ids are available
+ // (i.e. {max_background_tasks_} tasks are already running), spawn nothing.
+ if (available_task_ids_.empty()) return;
// No need to restart tasks if compilation already failed.
if (failed()) return;
- DCHECK_LE(num_background_tasks_, max_background_tasks_);
- if (num_background_tasks_ == max_background_tasks_) return;
- size_t num_compilation_units =
- baseline_compilation_units_.size() + tiering_compilation_units_.size();
- num_restart = max_background_tasks_ - num_background_tasks_;
- DCHECK_LE(0, num_restart);
- if (num_compilation_units < static_cast<size_t>(num_restart)) {
- num_restart = static_cast<int>(num_compilation_units);
+ size_t max_num_restart = compilation_unit_queues_.GetTotalSize();
+
+ while (!available_task_ids_.empty() && max_num_restart-- > 0) {
+ int task_id = available_task_ids_.back();
+ available_task_ids_.pop_back();
+ new_tasks.emplace_back(
+ native_module_->engine()
+ ->NewBackgroundCompileTask<BackgroundCompileTask>(
+ background_compile_token_, async_counters_, task_id));
}
- num_background_tasks_ += num_restart;
}
- for (; num_restart > 0; --num_restart) {
- RestartBackgroundCompileTask();
+ if (baseline_compilation_finished()) {
+ for (auto& task : new_tasks) {
+ V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
+ std::move(task));
+ }
+ } else {
+ for (auto& task : new_tasks) {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ }
}
}
-void CompilationStateImpl::SetError(uint32_t func_index,
- const WasmError& error) {
- DCHECK(error.has_error());
- std::unique_ptr<CompilationError> compile_error =
- base::make_unique<CompilationError>(func_index, error);
- CompilationError* expected = nullptr;
- bool set = compile_error_.compare_exchange_strong(
- expected, compile_error.get(), std::memory_order_acq_rel);
- // Ignore all but the first error. If the previous value is not nullptr, just
- // return (and free the allocated error).
- if (!set) return;
- // If set successfully, give up ownership.
- compile_error.release();
+void CompilationStateImpl::SetError() {
+ bool expected = false;
+ if (!compile_failed_.compare_exchange_strong(expected, true,
+ std::memory_order_relaxed)) {
+ return; // Already failed before.
+ }
+
base::MutexGuard callbacks_guard(&callbacks_mutex_);
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFailedCompilation);
@@ -1714,12 +2108,13 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars), TENURED);
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ AllocationType::kOld);
script->set_name(*name_str.ToHandleChecked());
if (source_map_url.size() != 0) {
MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
- CStrVector(source_map_url.c_str()), TENURED);
+ CStrVector(source_map_url.c_str()), AllocationType::kOld);
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
return script;
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 04f0bd2042..cf5098f613 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -37,16 +37,11 @@ class NativeModule;
class WasmCode;
struct WasmModule;
-std::unique_ptr<NativeModule> CompileToNativeModule(
+std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out);
-void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
- ErrorThrower* thrower,
- const WasmModule* wasm_module,
- NativeModule* native_module);
-
V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers);
@@ -56,8 +51,7 @@ V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
const std::string& source_map_url);
// Triggered by the WasmCompileLazy builtin.
-// Returns the instruction start of the compiled code object.
-Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
+void CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
@@ -110,7 +104,8 @@ class AsyncCompileJob {
void FinishCompile();
- void AsyncCompileFailed(const WasmError&);
+ void DecodeFailed(const WasmError&);
+ void AsyncCompileFailed();
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
@@ -155,7 +150,7 @@ class AsyncCompileJob {
// Copy of the module wire bytes, moved into the {native_module_} on its
// creation.
std::unique_ptr<byte[]> bytes_copy_;
- // Reference to the wire bytes (hold in {bytes_copy_} or as part of
+ // Reference to the wire bytes (held in {bytes_copy_} or as part of
// {native_module_}).
ModuleWireBytes wire_bytes_;
Handle<Context> native_context_;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index c60eeba44f..f27cdd59ab 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -30,6 +30,7 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
+constexpr char kCompilationHintsString[] = "compilationHints";
template <size_t N>
constexpr size_t num_chars(const char (&)[N]) {
@@ -88,6 +89,8 @@ const char* SectionName(SectionCode code) {
return kNameString;
case kSourceMappingURLSectionCode:
return kSourceMappingURLString;
+ case kCompilationHintsSectionCode:
+ return kCompilationHintsString;
default:
return "<unknown>";
}
@@ -117,8 +120,8 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
return kWasmF32;
case WasmInitExpr::kF64Const:
return kWasmF64;
- case WasmInitExpr::kAnyRefConst:
- return kWasmAnyRef;
+ case WasmInitExpr::kRefNullConst:
+ return kWasmNullRef;
default:
UNREACHABLE();
}
@@ -223,7 +226,8 @@ class WasmSectionIterator {
}
if (section_code == kUnknownSectionCode) {
- // Check for the known "name" or "sourceMappingURL" section.
+ // Check for the known "name", "sourceMappingURL", or "compilationHints"
+ // section.
section_code =
ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
// As a side effect, the above function will forward the decoder to after
@@ -386,14 +390,20 @@ class ModuleDecoderImpl : public Decoder {
kExportSectionCode))
return;
break;
- case kSourceMappingURLSectionCode:
- // sourceMappingURL is a custom section and currently can occur anywhere
- // in the module. In case of multiple sourceMappingURL sections, all
- // except the first occurrence are ignored.
case kNameSectionCode:
// TODO(titzer): report out of place name section as a warning.
// Be lenient with placement of name section. All except first
// occurrence are ignored.
+ case kSourceMappingURLSectionCode:
+ // sourceMappingURL is a custom section and currently can occur anywhere
+ // in the module. In case of multiple sourceMappingURL sections, all
+ // except the first occurrence are ignored.
+ case kCompilationHintsSectionCode:
+ // TODO(frgossen): report out of place compilation hints section as a
+ // warning.
+ // Be lenient with placement of compilation hints section. All except
+ // first occurrence after function section and before code section are
+ // ignored.
break;
default:
next_ordered_section_ = section_code + 1;
@@ -442,6 +452,15 @@ class ModuleDecoderImpl : public Decoder {
case kSourceMappingURLSectionCode:
DecodeSourceMappingURLSection();
break;
+ case kCompilationHintsSectionCode:
+ if (enabled_features_.compilation_hints) {
+ DecodeCompilationHintsSection();
+ } else {
+ // Ignore this section when feature was disabled. It is an optional
+ // custom section anyways.
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+ break;
case kDataCountSectionCode:
if (enabled_features_.bulk_memory) {
DecodeDataCountSection();
@@ -506,7 +525,7 @@ class ModuleDecoderImpl : public Decoder {
static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
case kExternalFunction: {
- // ===== Imported function =======================================
+ // ===== Imported function ===========================================
import->index = static_cast<uint32_t>(module_->functions.size());
module_->num_imported_functions++;
module_->functions.push_back({nullptr, // sig
@@ -521,7 +540,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExternalTable: {
- // ===== Imported table ==========================================
+ // ===== Imported table ==============================================
if (!AddTable(module_.get())) break;
import->index = static_cast<uint32_t>(module_->tables.size());
module_->num_imported_tables++;
@@ -544,7 +563,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExternalMemory: {
- // ===== Imported memory =========================================
+ // ===== Imported memory =============================================
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
@@ -554,7 +573,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExternalGlobal: {
- // ===== Imported global =========================================
+ // ===== Imported global =============================================
import->index = static_cast<uint32_t>(module_->globals.size());
module_->globals.push_back(
{kWasmStmt, false, WasmInitExpr(), {0}, true, false});
@@ -567,7 +586,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExternalException: {
- // ===== Imported exception ======================================
+ // ===== Imported exception ==========================================
if (!enabled_features_.eh) {
errorf(pos, "unknown import kind 0x%02x", import->kind);
break;
@@ -919,7 +938,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeNameSection() {
// TODO(titzer): find a way to report name errors as warnings.
- // ignore all but the first occurrence of name section.
+ // Ignore all but the first occurrence of name section.
if (!has_seen_unordered_section(kNameSectionCode)) {
set_seen_unordered_section(kNameSectionCode);
// Use an inner decoder so that errors don't fail the outer decoder.
@@ -961,6 +980,97 @@ class ModuleDecoderImpl : public Decoder {
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeCompilationHintsSection() {
+ TRACE("DecodeCompilationHints module+%d\n", static_cast<int>(pc_ - start_));
+
+ // TODO(frgossen): Find a way to report compilation hint errors as warnings.
+ // All except first occurrence after function section and before code
+ // section are ignored.
+ const bool before_function_section =
+ next_ordered_section_ <= kFunctionSectionCode;
+ const bool after_code_section = next_ordered_section_ > kCodeSectionCode;
+ if (before_function_section || after_code_section ||
+ has_seen_unordered_section(kCompilationHintsSectionCode)) {
+ return;
+ }
+ set_seen_unordered_section(kCompilationHintsSectionCode);
+
+ // TODO(frgossen) Propagate errors to outer decoder in experimental phase.
+ // We should use an inner decoder later and propagate its errors as
+ // warnings.
+ Decoder& decoder = *this;
+ // Decoder decoder(start_, pc_, end_, buffer_offset_);
+
+ // Ensure exactly one compilation hint per function.
+ uint32_t hint_count = decoder.consume_u32v("compilation hint count");
+ if (hint_count != module_->num_declared_functions) {
+ decoder.errorf(decoder.pc(), "Expected %u compilation hints (%u found)",
+ module_->num_declared_functions, hint_count);
+ }
+
+ // Decode sequence of compilation hints.
+ if (decoder.ok()) {
+ module_->compilation_hints.reserve(hint_count);
+ module_->num_lazy_compilation_hints = 0;
+ }
+ for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
+ TRACE("DecodeCompilationHints[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ // Compilation hints are encoded in one byte each.
+ // +-------+----------+---------------+------------------+
+ // | 2 bit | 2 bit | 2 bit | 2 bit |
+ // | ... | Top tier | Baseline tier | Lazy compilation |
+ // +-------+----------+---------------+------------------+
+ uint8_t hint_byte = decoder.consume_u8("compilation hint");
+ if (!decoder.ok()) break;
+
+ // Decode compilation hint.
+ WasmCompilationHint hint;
+ hint.strategy =
+ static_cast<WasmCompilationHintStrategy>(hint_byte & 0x03);
+ hint.baseline_tier =
+ static_cast<WasmCompilationHintTier>(hint_byte >> 2 & 0x3);
+ hint.top_tier =
+ static_cast<WasmCompilationHintTier>(hint_byte >> 4 & 0x3);
+
+ // Check strategy.
+ if (hint.strategy > WasmCompilationHintStrategy::kEager) {
+ decoder.errorf(decoder.pc(),
+ "Invalid compilation hint %#x (unknown strategy)",
+ hint_byte);
+ }
+
+ // Ensure that the top tier never downgrades a compilation result.
+ // If baseline and top tier are the same compilation will be invoked only
+ // once.
+ if (hint.top_tier < hint.baseline_tier &&
+ hint.top_tier != WasmCompilationHintTier::kDefault) {
+ decoder.errorf(decoder.pc(),
+ "Invalid compilation hint %#x (forbidden downgrade)",
+ hint_byte);
+ }
+
+ // Happily accept compilation hint.
+ if (decoder.ok()) {
+ if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
+ module_->num_lazy_compilation_hints++;
+ }
+ module_->compilation_hints.push_back(std::move(hint));
+ }
+ }
+
+ // If section was invalid reset compilation hints.
+ if (decoder.failed()) {
+ module_->compilation_hints.clear();
+ module_->num_lazy_compilation_hints = 0;
+ }
+
+ // @TODO(frgossen) Skip the whole compilation hints section in the outer
+ // decoder if inner decoder was used.
+ // consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+
void DecodeDataCountSection() {
module_->num_declared_data_segments =
consume_count("data segments count", kV8MaxWasmDataSegments);
@@ -1101,9 +1211,9 @@ class ModuleDecoderImpl : public Decoder {
Counters* counters_ = nullptr;
// The type section is the first section in a module.
uint8_t next_ordered_section_ = kFirstSectionInModule;
- // We store next_ordered_section_ as uint8_t instead of SectionCode so that we
- // can increment it. This static_assert should make sure that SectionCode does
- // not get bigger than uint8_t accidentially.
+ // We store next_ordered_section_ as uint8_t instead of SectionCode so that
+ // we can increment it. This static_assert should make sure that SectionCode
+ // does not get bigger than uint8_t accidentially.
static_assert(sizeof(ModuleDecoderImpl::next_ordered_section_) ==
sizeof(SectionCode),
"type mismatch");
@@ -1169,7 +1279,7 @@ class ModuleDecoderImpl : public Decoder {
ValueTypes::TypeName(module->globals[other_index].type));
}
} else {
- if (global->type != TypeOf(module, global->init)) {
+ if (!ValueTypes::IsSubType(global->type, TypeOf(module, global->init))) {
errorf(pos, "type error in global initialization, expected %s, got %s",
ValueTypes::TypeName(global->type),
ValueTypes::TypeName(TypeOf(module, global->init)));
@@ -1185,7 +1295,7 @@ class ModuleDecoderImpl : public Decoder {
for (WasmGlobal& global : module->globals) {
if (global.mutability && global.imported) {
global.index = num_imported_mutable_globals++;
- } else if (global.type == ValueType::kWasmAnyRef) {
+ } else if (ValueTypes::IsReferenceType(global.type)) {
global.offset = tagged_offset;
// All entries in the tagged_globals_buffer have size 1.
tagged_offset++;
@@ -1434,8 +1544,8 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprRefNull: {
- if (enabled_features_.anyref) {
- expr.kind = WasmInitExpr::kAnyRefConst;
+ if (enabled_features_.anyref || enabled_features_.eh) {
+ expr.kind = WasmInitExpr::kRefNullConst;
len = 0;
break;
}
@@ -1491,6 +1601,9 @@ class ModuleDecoderImpl : public Decoder {
case kLocalAnyRef:
if (enabled_features_.anyref) return kWasmAnyRef;
break;
+ case kLocalExceptRef:
+ if (enabled_features_.eh) return kWasmExceptRef;
+ break;
default:
break;
}
@@ -1575,9 +1688,9 @@ class ModuleDecoderImpl : public Decoder {
flags = consume_u32v("flags");
if (failed()) return;
} else {
- // Without the bulk memory proposal, we should still read the table index.
- // This is the same as reading the `ActiveWithIndex` flag with the bulk
- // memory proposal.
+ // Without the bulk memory proposal, we should still read the table
+ // index. This is the same as reading the `ActiveWithIndex` flag with
+ // the bulk memory proposal.
flags = SegmentFlags::kActiveWithIndex;
}
@@ -1743,6 +1856,11 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
kSourceMappingURLString,
num_chars(kSourceMappingURLString)) == 0) {
return kSourceMappingURLSectionCode;
+ } else if (string.length() == num_chars(kCompilationHintsString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kCompilationHintsString,
+ num_chars(kCompilationHintsString)) == 0) {
+ return kCompilationHintsSectionCode;
}
return kUnknownSectionCode;
}
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 95c449640c..48b4129eb3 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -27,18 +27,18 @@ inline bool IsValidSectionCode(uint8_t byte) {
const char* SectionName(SectionCode code);
-typedef Result<std::shared_ptr<WasmModule>> ModuleResult;
-typedef Result<std::unique_ptr<WasmFunction>> FunctionResult;
-typedef std::vector<std::pair<int, int>> FunctionOffsets;
-typedef Result<FunctionOffsets> FunctionOffsetsResult;
+using ModuleResult = Result<std::shared_ptr<WasmModule>>;
+using FunctionResult = Result<std::unique_ptr<WasmFunction>>;
+using FunctionOffsets = std::vector<std::pair<int, int>>;
+using FunctionOffsetsResult = Result<FunctionOffsets>;
struct AsmJsOffsetEntry {
int byte_offset;
int source_position_call;
int source_position_number_conversion;
};
-typedef std::vector<std::vector<AsmJsOffsetEntry>> AsmJsOffsets;
-typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
+using AsmJsOffsets = std::vector<std::vector<AsmJsOffsetEntry>>;
+using AsmJsOffsetsResult = Result<AsmJsOffsets>;
struct LocalName {
int local_index;
@@ -138,8 +138,7 @@ class ModuleDecoder {
bool ok();
// Translates the unknown section that decoder is pointing to to an extended
- // SectionCode if the unknown section is known to decoder. Currently this only
- // handles the name section.
+ // SectionCode if the unknown section is known to decoder.
// The decoder is expected to point after the section lenght and just before
// the identifier string of the unknown section.
// If a SectionCode other than kUnknownSectionCode is returned, the decoder
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index d6a9bc3dfa..4dc61a91bf 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -6,12 +6,13 @@
#include "src/asmjs/asm-js.h"
#include "src/conversions-inl.h"
-#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
+#include "src/counters.h"
#include "src/property-descriptor.h"
+#include "src/tracing/trace-event.h"
#include "src/utils.h"
-#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-external-refs.h"
+#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -47,13 +48,6 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
UNREACHABLE();
}
}
-
-// Represents the initialized state of a table.
-struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_functions; // JSFunctions exported
- size_t table_size;
-};
} // namespace
// A helper class to simplify instantiating a module from a module object.
@@ -87,17 +81,10 @@ class InstanceBuilder {
MaybeHandle<JSArrayBuffer> memory_;
Handle<JSArrayBuffer> untagged_globals_;
Handle<FixedArray> tagged_globals_;
- std::vector<TableInstance> table_instances_;
std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
Handle<WasmExportedFunction> start_function_;
- JSToWasmWrapperCache js_to_wasm_cache_;
std::vector<SanitizedImport> sanitized_imports_;
- UseTrapHandler use_trap_handler() const {
- return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
- : kNoTrapHandler;
- }
-
// Helper routines to print out errors with imports.
#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
void Report##TYPE(const char* error, uint32_t index, \
@@ -249,19 +236,16 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
SanitizeImports();
if (thrower_->error()) return {};
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
// From here on, we expect the build pipeline to run without exiting to JS.
DisallowJavascriptExecution no_js(isolate_);
// Record build time into correct bucket, then build instance.
TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm_instantiate, module_time));
+ NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
// Allocate the memory array buffer.
//--------------------------------------------------------------------------
- // We allocate the memory buffer before cloning or reusing the compiled module
- // so we will know whether we need to recompile with bounds checks.
uint32_t initial_pages = module_->initial_pages;
auto initial_pages_counter = SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
@@ -282,10 +266,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_detachable(false);
- DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
- memory->backing_store() == nullptr);
- } else if (initial_pages > 0 || use_trap_handler()) {
+ DCHECK_IMPLIES(native_module->use_trap_handler(),
+ module_->origin == kAsmJsOrigin ||
+ memory->is_wasm_memory() ||
+ memory->backing_store() == nullptr);
+ } else if (initial_pages > 0 || native_module->use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
// even when the size is zero to prevent null-dereference issues
// (e.g. https://crbug.com/769637).
@@ -299,38 +284,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Recompile module if using trap handlers but could not get guarded memory
- //--------------------------------------------------------------------------
- if (module_->origin == kWasmOrigin && use_trap_handler()) {
- // Make sure the memory has suitable guard regions.
- WasmMemoryTracker* const memory_tracker =
- isolate_->wasm_engine()->memory_tracker();
-
- if (!memory_tracker->HasFullGuardRegions(
- memory_.ToHandleChecked()->backing_store())) {
- if (!FLAG_wasm_trap_handler_fallback) {
- thrower_->LinkError(
- "Provided memory is lacking guard regions but fallback was "
- "disabled.");
- return {};
- }
-
- TRACE("Recompiling module without bounds checks\n");
- ErrorThrower thrower(isolate_, "recompile");
- auto native_module = module_object_->native_module();
- CompileNativeModuleWithExplicitBoundsChecks(isolate_, &thrower, module_,
- native_module);
- if (thrower.error()) {
- return {};
- }
- DCHECK(!native_module->use_trap_handler());
- }
- }
-
- //--------------------------------------------------------------------------
// Create the WebAssembly.Instance object.
//--------------------------------------------------------------------------
- NativeModule* native_module = module_object_->native_module();
TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_);
@@ -347,8 +302,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
- untagged_globals_ =
- isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ untagged_globals_ = isolate_->factory()->NewJSArrayBuffer(
+ SharedFlag::kNotShared, AllocationType::kOld);
constexpr bool is_external = false;
constexpr bool is_wasm_memory = false;
JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external,
@@ -378,7 +333,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// more than required if multiple globals are imported from the same
// module.
Handle<FixedArray> buffers_array = isolate_->factory()->NewFixedArray(
- module_->num_imported_mutable_globals, TENURED);
+ module_->num_imported_mutable_globals, AllocationType::kOld);
instance->set_imported_mutable_globals_buffers(*buffers_array);
}
@@ -387,8 +342,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
int exceptions_count = static_cast<int>(module_->exceptions.size());
if (exceptions_count > 0) {
- Handle<FixedArray> exception_table =
- isolate_->factory()->NewFixedArray(exceptions_count, TENURED);
+ Handle<FixedArray> exception_table = isolate_->factory()->NewFixedArray(
+ exceptions_count, AllocationType::kOld);
instance->set_exceptions_table(*exception_table);
exception_wrappers_.resize(exceptions_count);
}
@@ -401,13 +356,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (int i = module_->num_imported_tables; i < table_count; i++) {
const WasmTable& table = module_->tables[i];
Handle<WasmTableObject> table_obj = WasmTableObject::New(
- isolate_, table.initial_size, table.maximum_size, nullptr);
+ isolate_, table.type, table.initial_size, table.has_maximum_size,
+ table.maximum_size, nullptr);
tables->set(i, *table_obj);
}
instance->set_tables(*tables);
- table_instances_.resize(table_count);
-
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
@@ -457,29 +411,39 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
}
- //--------------------------------------------------------------------------
- // Check that indirect function table segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmElemSegment& elem_segment : module_->elem_segments) {
- if (!elem_segment.active) continue;
- DCHECK(elem_segment.table_index < table_instances_.size());
- uint32_t base = EvalUint32InitExpr(instance, elem_segment.offset);
- size_t table_size = table_instances_[elem_segment.table_index].table_size;
- if (!IsInBounds(base, elem_segment.entries.size(), table_size)) {
- thrower_->LinkError("table initializer is out of bounds");
- return {};
+ // The bulk memory proposal changes the MVP behavior here; the segments are
+ // written as if `memory.init` and `table.init` are executed directly, and
+ // not bounds checked ahead of time.
+ if (!enabled_.bulk_memory) {
+ //--------------------------------------------------------------------------
+ // Check that indirect function table segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (const WasmElemSegment& elem_segment : module_->elem_segments) {
+ if (!elem_segment.active) continue;
+ DCHECK_LT(elem_segment.table_index, table_count);
+ uint32_t base = EvalUint32InitExpr(instance, elem_segment.offset);
+ // Because of imported tables, {table_size} has to come from the table
+ // object itself.
+ auto table_object = handle(WasmTableObject::cast(instance->tables()->get(
+ elem_segment.table_index)),
+ isolate_);
+ size_t table_size = table_object->elements()->length();
+ if (!IsInBounds(base, elem_segment.entries.size(), table_size)) {
+ thrower_->LinkError("table initializer is out of bounds");
+ return {};
+ }
}
- }
- //--------------------------------------------------------------------------
- // Check that memory segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmDataSegment& seg : module_->data_segments) {
- if (!seg.active) continue;
- uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
- if (!IsInBounds(base, seg.source.length(), instance->memory_size())) {
- thrower_->LinkError("data segment is out of bounds");
- return {};
+ //--------------------------------------------------------------------------
+ // Check that memory segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (const WasmDataSegment& seg : module_->data_segments) {
+ if (!seg.active) continue;
+ uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
+ if (!IsInBounds(base, seg.source.length(), instance->memory_size())) {
+ thrower_->LinkError("data segment is out of bounds");
+ return {};
+ }
}
}
@@ -494,6 +458,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (table_count > 0) {
LoadTableSegments(instance);
+ if (thrower_->error()) return {};
}
//--------------------------------------------------------------------------
@@ -501,6 +466,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (module_->data_segments.size() > 0) {
LoadDataSegments(instance);
+ if (thrower_->error()) return {};
}
//--------------------------------------------------------------------------
@@ -509,26 +475,15 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set all breakpoints that were set on the shared module.
WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
- if (FLAG_wasm_interpret_all && module_->origin == kWasmOrigin) {
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- std::vector<int> func_indexes;
- for (int func_index = num_imported_functions,
- num_wasm_functions = static_cast<int>(module_->functions.size());
- func_index < num_wasm_functions; ++func_index) {
- func_indexes.push_back(func_index);
- }
- WasmDebugInfo::RedirectToInterpreter(debug_info, VectorOf(func_indexes));
- }
-
//--------------------------------------------------------------------------
// Create a wrapper for the start function.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, function.sig, function.imported);
+ Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+ isolate_, function.sig, function.imported)
+ .ToHandleChecked();
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
@@ -568,7 +523,6 @@ MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
// We pre-validated in the js-api layer that the ffi object is present, and
// a JSObject, if the module has imports.
DCHECK(!ffi_.is_null());
-
// Look up the module first.
MaybeHandle<Object> result = Object::GetPropertyOrElement(
isolate_, ffi_.ToHandleChecked(), module_name);
@@ -636,16 +590,35 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
Vector<const uint8_t> wire_bytes =
module_object_->native_module()->wire_bytes();
for (const WasmDataSegment& segment : module_->data_segments) {
- uint32_t source_size = segment.source.length();
- // Segments of size == 0 are just nops.
- if (source_size == 0) continue;
- // Passive segments are not copied during instantiation.
- if (!segment.active) continue;
- uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
- DCHECK(IsInBounds(dest_offset, source_size, instance->memory_size()));
- byte* dest = instance->memory_start() + dest_offset;
- const byte* src = wire_bytes.start() + segment.source.offset();
- memcpy(dest, src, source_size);
+ uint32_t size = segment.source.length();
+
+ if (enabled_.bulk_memory) {
+ // Passive segments are not copied during instantiation.
+ if (!segment.active) continue;
+
+ uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
+ bool ok = ClampToBounds(dest_offset, &size,
+ static_cast<uint32_t>(instance->memory_size()));
+ Address dest_addr =
+ reinterpret_cast<Address>(instance->memory_start()) + dest_offset;
+ Address src_addr = reinterpret_cast<Address>(wire_bytes.start()) +
+ segment.source.offset();
+ memory_copy_wrapper(dest_addr, src_addr, size);
+ if (!ok) {
+ thrower_->LinkError("data segment is out of bounds");
+ return;
+ }
+ } else {
+ DCHECK(segment.active);
+ // Segments of size == 0 are just nops.
+ if (size == 0) continue;
+
+ uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
+ DCHECK(IsInBounds(dest_offset, size, instance->memory_size()));
+ byte* dest = instance->memory_start() + dest_offset;
+ const byte* src = wire_bytes.start() + segment.source.offset();
+ memcpy(dest, src, size);
+ }
}
}
@@ -846,13 +819,11 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
return false;
}
const WasmTable& table = module_->tables[table_index];
- TableInstance& table_instance = table_instances_[table_index];
- table_instance.table_object = Handle<WasmTableObject>::cast(value);
- instance->set_table_object(*table_instance.table_object);
- table_instance.js_functions =
- Handle<FixedArray>(table_instance.table_object->elements(), isolate_);
- int imported_table_size = table_instance.js_functions->length();
+ instance->tables()->set(table_index, *value);
+ auto table_object = Handle<WasmTableObject>::cast(value);
+
+ int imported_table_size = table_object->elements().length();
if (imported_table_size < static_cast<int>(table.initial_size)) {
thrower_->LinkError("table import %d is smaller than initial %d, got %u",
import_index, table.initial_size, imported_table_size);
@@ -860,8 +831,12 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
if (table.has_maximum_size) {
- int64_t imported_maximum_size =
- table_instance.table_object->maximum_length()->Number();
+ if (table_object->maximum_length()->IsUndefined(isolate_)) {
+ thrower_->LinkError("table import %d has no maximum length, expected %d",
+ import_index, table.maximum_size);
+ return false;
+ }
+ int64_t imported_maximum_size = table_object->maximum_length()->Number();
if (imported_maximum_size < 0) {
thrower_->LinkError("table import %d has no maximum length, expected %d",
import_index, table.maximum_size);
@@ -880,31 +855,37 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
if (!instance->has_indirect_function_table()) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, imported_table_size);
- table_instances_[table_index].table_size = imported_table_size;
}
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_table_size; ++i) {
- Handle<Object> val(table_instance.js_functions->get(i), isolate_);
- // TODO(mtrofin): this is the same logic as WasmTableObject::Set:
- // insert in the local table a wrapper from the other module, and add
- // a reference to the owning instance of the other module.
- if (!val->IsJSFunction()) continue;
- if (!WasmExportedFunction::IsWasmExportedFunction(*val)) {
+ bool is_valid;
+ bool is_null;
+ MaybeHandle<WasmInstanceObject> maybe_target_instance;
+ int function_index;
+ WasmTableObject::GetFunctionTableEntry(isolate_, table_object, i, &is_valid,
+ &is_null, &maybe_target_instance,
+ &function_index);
+ if (!is_valid) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
import_index, i);
return false;
}
- auto target_func = Handle<WasmExportedFunction>::cast(val);
+ if (is_null) continue;
+
Handle<WasmInstanceObject> target_instance =
- handle(target_func->instance(), isolate_);
+ maybe_target_instance.ToHandleChecked();
+ FunctionSig* sig = target_instance->module_object()
+ ->module()
+ ->functions[function_index]
+ .sig;
+
// Look up the signature's canonical id. If there is no canonical
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
- FunctionSig* sig = target_func->sig();
IndirectFunctionTableEntry(instance, i)
.Set(module_->signature_map.Find(*sig), target_instance,
- target_func->function_index());
+ function_index);
}
return true;
}
@@ -980,7 +961,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
DCHECK_LT(global.index, module_->num_imported_mutable_globals);
Handle<Object> buffer;
Address address_or_offset;
- if (global.type == kWasmAnyRef) {
+ if (ValueTypes::IsReferenceType(global.type)) {
static_assert(sizeof(global_object->offset()) <= sizeof(Address),
"The offset into the globals buffer does not fit into "
"the imported_mutable_globals array");
@@ -1058,7 +1039,18 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return false;
}
- if (global.type == ValueType::kWasmAnyRef) {
+ if (ValueTypes::IsReferenceType(global.type)) {
+ // There shouldn't be any null-ref globals.
+ DCHECK_NE(ValueType::kWasmNullRef, global.type);
+ if (global.type == ValueType::kWasmAnyFunc) {
+ if (!value->IsNull(isolate_) &&
+ !WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ ReportLinkError(
+ "imported anyfunc global must be null or an exported function",
+ import_index, module_name, import_name);
+ return false;
+ }
+ }
WriteGlobalAnyRef(global, value);
return true;
}
@@ -1191,8 +1183,8 @@ void InstanceBuilder::InitGlobals() {
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
global.init.val.f64_const);
break;
- case WasmInitExpr::kAnyRefConst:
- DCHECK(enabled_.anyref);
+ case WasmInitExpr::kRefNullConst:
+ DCHECK(enabled_.anyref || enabled_.eh);
if (global.imported) break; // We already initialized imported globals.
tagged_globals_->set(global.offset,
@@ -1200,25 +1192,21 @@ void InstanceBuilder::InitGlobals() {
SKIP_WRITE_BARRIER);
break;
case WasmInitExpr::kGlobalIndex: {
- if (global.type == ValueType::kWasmAnyRef) {
- DCHECK(enabled_.anyref);
- int other_offset =
- module_->globals[global.init.val.global_index].offset;
-
- tagged_globals_->set(global.offset,
- tagged_globals_->get(other_offset),
- SKIP_WRITE_BARRIER);
- }
// Initialize with another global.
uint32_t new_offset = global.offset;
uint32_t old_offset =
module_->globals[global.init.val.global_index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
- ? sizeof(double)
- : sizeof(int32_t);
- memcpy(raw_buffer_ptr(untagged_globals_, new_offset),
- raw_buffer_ptr(untagged_globals_, old_offset), size);
+ if (ValueTypes::IsReferenceType(global.type)) {
+ DCHECK(enabled_.anyref || enabled_.eh);
+ tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
+ } else {
+ size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
+ ? sizeof(double)
+ : sizeof(int32_t);
+ memcpy(raw_buffer_ptr(untagged_globals_, new_offset),
+ raw_buffer_ptr(untagged_globals_, old_offset), size);
+ }
break;
}
case WasmInitExpr::kNone:
@@ -1257,11 +1245,8 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
- for (auto& table_instance : table_instances_) {
- if (!table_instance.js_functions.is_null()) return true;
- }
for (auto& table : module_->tables) {
- if (table.exported) return true;
+ if (table.type == kWasmAnyFunc) return true;
}
return false;
}
@@ -1365,18 +1350,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
break;
}
case kExternalTable: {
- // Export a table as a WebAssembly.Table object.
- TableInstance& table_instance = table_instances_[exp.index];
- const WasmTable& table = module_->tables[exp.index];
- if (table_instance.table_object.is_null()) {
- uint32_t maximum = table.has_maximum_size ? table.maximum_size
- : FLAG_wasm_max_table_size;
- table_instance.table_object =
- WasmTableObject::New(isolate_, table.initial_size, maximum,
- &table_instance.js_functions);
- }
- instance->set_table_object(*table_instance.table_object);
- desc.set_value(table_instance.table_object);
+ desc.set_value(handle(instance->tables()->get(exp.index), isolate_));
break;
}
case kExternalMemory: {
@@ -1397,7 +1371,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
if (global.mutability && global.imported) {
Handle<FixedArray> buffers_array(
instance->imported_mutable_globals_buffers(), isolate_);
- if (global.type == kWasmAnyRef) {
+ if (ValueTypes::IsReferenceType(global.type)) {
tagged_buffer = buffers_array->GetValueChecked<FixedArray>(
isolate_, global.index);
// For anyref globals we store the relative offset in the
@@ -1420,7 +1394,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
offset = static_cast<uint32_t>(global_addr - backing_store);
}
} else {
- if (global.type == kWasmAnyRef) {
+ if (ValueTypes::IsReferenceType(global.type)) {
tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
} else {
untagged_buffer =
@@ -1486,33 +1460,31 @@ void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
table.type == kWasmAnyFunc) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, table.initial_size);
- table_instances_[index].table_size = table.initial_size;
}
}
}
bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
- const TableInstance& table_instance,
- JSToWasmWrapperCache* js_to_wasm_cache,
+ Handle<WasmTableObject> table_object,
const WasmElemSegment& elem_segment, uint32_t dst,
uint32_t src, size_t count) {
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
- if (!IsInBounds(dst, count, table_instance.table_size)) return false;
- if (!IsInBounds(src, count, elem_segment.entries.size())) return false;
+ bool ok =
+ ClampToBounds<size_t>(dst, &count, table_object->elements()->length());
+ // Use & instead of && so the clamp is not short-circuited.
+ ok &= ClampToBounds<size_t>(src, &count, elem_segment.entries.size());
const WasmModule* module = instance->module();
- for (uint32_t i = 0; i < count; ++i) {
+ for (size_t i = 0; i < count; ++i) {
uint32_t func_index = elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
IndirectFunctionTableEntry(instance, entry_index).clear();
- if (!table_instance.table_object.is_null()) {
- WasmTableObject::Set(isolate, table_instance.table_object, entry_index,
- Handle<JSFunction>::null());
- }
+ WasmTableObject::Set(isolate, table_object, entry_index,
+ isolate->factory()->null_value());
continue;
}
@@ -1523,49 +1495,25 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
IndirectFunctionTableEntry(instance, entry_index)
.Set(sig_id, instance, func_index);
- if (!table_instance.table_object.is_null()) {
- // Update the table object's other dispatch tables.
- MaybeHandle<WasmExportedFunction> wasm_exported_function =
- WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
- func_index);
- if (wasm_exported_function.is_null()) {
- // No JSFunction entry yet exists for this function. Create one.
- // TODO(titzer): We compile JS->wasm wrappers for functions are
- // not exported but are in an exported table. This should be done
- // at module compile time and cached instead.
-
- Handle<Code> wrapper_code =
- js_to_wasm_cache->GetOrCompileJSToWasmWrapper(
- isolate, function->sig, function->imported);
- MaybeHandle<String> func_name;
- if (module->origin == kAsmJsOrigin) {
- // For modules arising from asm.js, honor the names section.
- auto module_object =
- Handle<WasmModuleObject>(instance->module_object(), isolate);
- WireBytesRef func_name_ref = module->LookupFunctionName(
- ModuleWireBytes(module_object->native_module()->wire_bytes()),
- func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate, module_object, func_name_ref)
- .ToHandleChecked();
- }
- wasm_exported_function = WasmExportedFunction::New(
- isolate, instance, func_name, func_index,
- static_cast<int>(function->sig->parameter_count()), wrapper_code);
- WasmInstanceObject::SetWasmExportedFunction(
- isolate, instance, func_index,
- wasm_exported_function.ToHandleChecked());
- }
- table_instance.js_functions->set(
- entry_index, *wasm_exported_function.ToHandleChecked());
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(
- isolate, table_instance.table_object, entry_index, function->sig,
- instance, func_index);
+ // Update the table object's other dispatch tables.
+ MaybeHandle<WasmExportedFunction> wasm_exported_function =
+ WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+ func_index);
+ if (wasm_exported_function.is_null()) {
+ // No JSFunction entry yet exists for this function. Create a {Tuple2}
+ // holding the information to lazily allocate one.
+ WasmTableObject::SetFunctionTablePlaceholder(
+ isolate, table_object, entry_index, instance, func_index);
+ } else {
+ table_object->elements()->set(entry_index,
+ *wasm_exported_function.ToHandleChecked());
}
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(isolate, table_object, entry_index,
+ function->sig, instance, func_index);
}
- return true;
+ return ok;
}
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
@@ -1578,19 +1526,33 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
size_t count = elem_segment.entries.size();
bool success = LoadElemSegmentImpl(
- isolate_, instance, table_instances_[elem_segment.table_index],
- &js_to_wasm_cache_, elem_segment, dst, src, count);
- CHECK(success);
+ isolate_, instance,
+ handle(WasmTableObject::cast(
+ instance->tables()->get(elem_segment.table_index)),
+ isolate_),
+ elem_segment, dst, src, count);
+ if (enabled_.bulk_memory) {
+ if (!success) {
+ thrower_->LinkError("table initializer is out of bounds");
+ // Break out instead of returning; we don't want to continue to
+ // initialize any further element segments, but still need to add
+ // dispatch tables below.
+ break;
+ }
+ } else {
+ CHECK(success);
+ }
}
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
- TableInstance& table_instance = table_instances_[index];
+ if (module_->tables[index].type == kWasmAnyFunc) {
+ auto table_object = handle(
+ WasmTableObject::cast(instance->tables()->get(index)), isolate_);
- // Add the new dispatch table at the end to avoid redundant lookups.
- if (!table_instance.table_object.is_null()) {
- WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
- instance, index);
+ // Add the new dispatch table at the end to avoid redundant lookups.
+ WasmTableObject::AddDispatchTable(isolate_, table_object, instance,
+ index);
}
}
}
@@ -1609,21 +1571,12 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
- JSToWasmWrapperCache js_to_wasm_cache;
-
- Handle<WasmTableObject> table_object;
- Handle<FixedArray> js_functions;
- if (instance->has_table_object()) {
- table_object = Handle<WasmTableObject>(instance->table_object(), isolate);
- js_functions = Handle<FixedArray>(table_object->elements(), isolate);
- }
-
- TableInstance table_instance = {table_object, js_functions,
- instance->indirect_function_table_size()};
-
auto& elem_segment = instance->module()->elem_segments[segment_index];
- return LoadElemSegmentImpl(isolate, instance, table_instance,
- &js_to_wasm_cache, elem_segment, dst, src, count);
+ return LoadElemSegmentImpl(
+ isolate, instance,
+ handle(WasmTableObject::cast(instance->tables()->get(table_index)),
+ isolate),
+ elem_segment, dst, src, count);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 4472eb82e7..6c0403fcb4 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -319,7 +319,8 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
TRACE_STREAMING("ReadBytes of a VarInt\n");
memcpy(remaining_buf.start(), &bytes.first(), new_bytes);
buf.Truncate(offset() + new_bytes);
- Decoder decoder(buf, streaming->module_offset());
+ Decoder decoder(buf,
+ streaming->module_offset() - static_cast<uint32_t>(offset()));
value_ = decoder.consume_u32v(field_name_);
// The number of bytes we actually needed to read.
DCHECK_GT(decoder.pc(), buffer().start());
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 0d469a96b3..4df6b1d32f 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -256,7 +256,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
uint32_t module_offset() const { return module_offset_; }
- bool deserializing() const { return !compiled_module_bytes_.is_empty(); }
+ bool deserializing() const { return !compiled_module_bytes_.empty(); }
std::unique_ptr<StreamingProcessor> processor_;
std::unique_ptr<DecodingState> state_;
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 5cb24e7911..b40a337ca0 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -178,6 +178,27 @@ class StoreType {
// A collection of ValueType-related static methods.
class V8_EXPORT_PRIVATE ValueTypes {
public:
+ static inline bool IsSubType(ValueType expected, ValueType actual) {
+ return (expected == actual) ||
+ (expected == kWasmAnyRef && actual == kWasmNullRef) ||
+ (expected == kWasmAnyRef && actual == kWasmAnyFunc) ||
+ (expected == kWasmAnyRef && actual == kWasmExceptRef) ||
+ (expected == kWasmAnyFunc && actual == kWasmNullRef) ||
+ // TODO(mstarzinger): For now we treat "null_ref" as a sub-type of
+ // "except_ref", which is correct but might change. See here:
+ // https://github.com/WebAssembly/exception-handling/issues/55
+ (expected == kWasmExceptRef && actual == kWasmNullRef);
+ }
+
+ static inline bool IsReferenceType(ValueType type) {
+ // This function assumes at the moment that it is never called with
+ // {kWasmNullRef}. If this assumption is wrong, it should be added to the
+ // result calculation below.
+ DCHECK_NE(type, kWasmNullRef);
+ return type == kWasmAnyRef || type == kWasmAnyFunc ||
+ type == kWasmExceptRef;
+ }
+
static byte MemSize(MachineType type) {
return 1 << i::ElementSizeLog2Of(type.representation());
}
@@ -192,6 +213,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 8;
case kWasmS128:
return 16;
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef:
+ return kSystemPointerSize;
default:
UNREACHABLE();
}
@@ -228,6 +253,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
return kLocalS128;
case kWasmAnyRef:
return kLocalAnyRef;
+ case kWasmAnyFunc:
+ return kLocalAnyFunc;
case kWasmExceptRef:
return kLocalExceptRef;
case kWasmStmt:
@@ -247,8 +274,9 @@ class V8_EXPORT_PRIVATE ValueTypes {
return MachineType::Float32();
case kWasmF64:
return MachineType::Float64();
- case kWasmAnyFunc:
case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef:
return MachineType::TaggedPointer();
case kWasmS128:
return MachineType::Simd128();
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 0f9da37fa7..c874aa0f69 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -10,6 +10,7 @@
#include "src/base/adapters.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/counters.h"
#include "src/disassembler.h"
#include "src/globals.h"
#include "src/log.h"
@@ -22,11 +23,15 @@
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
-#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
+#if defined(V8_OS_WIN_X64)
+#include "src/unwinding-info-win64.h"
+#endif
+
#define TRACE_HEAP(...) \
do { \
if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
@@ -36,6 +41,8 @@ namespace v8 {
namespace internal {
namespace wasm {
+using trap_handler::ProtectedInstructionData;
+
void DisjointAllocationPool::Merge(base::AddressRegion region) {
auto dest_it = regions_.begin();
auto dest_end = regions_.end();
@@ -102,10 +109,12 @@ Address WasmCode::constant_pool() const {
}
Address WasmCode::code_comments() const {
- if (code_comments_offset_ < unpadded_binary_size_) {
- return instruction_start() + code_comments_offset_;
- }
- return kNullAddress;
+ return instruction_start() + code_comments_offset_;
+}
+
+uint32_t WasmCode::code_comments_size() const {
+ DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
+ return static_cast<uint32_t>(unpadded_binary_size_ - code_comments_offset_);
}
size_t WasmCode::trap_handler_index() const {
@@ -120,6 +129,7 @@ void WasmCode::set_trap_handler_index(size_t value) {
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!HasTrapHandlerIndex());
if (kind() != WasmCode::kFunction) return;
+ if (protected_instructions_.empty()) return;
Address base = instruction_start();
@@ -152,7 +162,7 @@ void WasmCode::LogCode(Isolate* isolate) const {
WireBytesRef name_ref =
native_module()->module()->LookupFunctionName(wire_bytes, index());
WasmName name_vec = wire_bytes.GetNameOrNull(name_ref);
- if (!name_vec.is_empty()) {
+ if (!name_vec.empty()) {
HandleScope scope(isolate);
MaybeHandle<String> maybe_name = isolate->factory()->NewStringFromUtf8(
Vector<const char>::cast(name_vec));
@@ -175,7 +185,7 @@ void WasmCode::LogCode(Isolate* isolate) const {
generated_name));
}
- if (!source_positions().is_empty()) {
+ if (!source_positions().empty()) {
LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
source_positions()));
}
@@ -282,7 +292,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
}
- if (!protected_instructions_.is_empty()) {
+ if (!protected_instructions_.empty()) {
os << "Protected instructions:\n pc offset land pad\n";
for (auto& data : protected_instructions()) {
os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
@@ -291,7 +301,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
}
- if (!source_positions().is_empty()) {
+ if (!source_positions().empty()) {
os << "Source positions:\n pc offset position\n";
for (SourcePositionTableIterator it(source_positions()); !it.done();
it.Advance()) {
@@ -331,10 +341,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
}
os << "\n";
- if (code_comments_offset() < unpadded_binary_size_) {
- Address code_comments = reinterpret_cast<Address>(instructions().start() +
- code_comments_offset());
- PrintCodeCommentsSection(os, code_comments);
+ if (code_comments_size() > 0) {
+ PrintCodeCommentsSection(os, code_comments(), code_comments_size());
}
#endif // ENABLE_DISASSEMBLER
}
@@ -345,8 +353,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
return "wasm function";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
- case WasmCode::kLazyStub:
- return "lazy-compile";
case WasmCode::kRuntimeStub:
return "runtime-stub";
case WasmCode::kInterpreterEntry:
@@ -365,14 +371,49 @@ WasmCode::~WasmCode() {
}
}
+V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
+ if (native_module_->engine()->AddPotentiallyDeadCode(this)) {
+ // The code just became potentially dead. The ref count we wanted to
+ // decrement is now transferred to the set of potentially dead code, and
+ // will be decremented when the next GC is run.
+ return false;
+ }
+ // If we reach here, the code was already potentially dead. Decrement the ref
+ // count, and return true if it drops to zero.
+ int old_count = ref_count_.load(std::memory_order_relaxed);
+ while (true) {
+ DCHECK_LE(1, old_count);
+ if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
+ std::memory_order_relaxed)) {
+ return old_count == 1;
+ }
+ }
+}
+
+// static
+void WasmCode::DecrementRefCount(Vector<WasmCode*> code_vec) {
+ // Decrement the ref counter of all given code objects. Keep the ones whose
+ // ref count drops to zero.
+ std::unordered_map<NativeModule*, std::vector<WasmCode*>> dead_code;
+ for (WasmCode* code : code_vec) {
+ if (code->DecRef()) dead_code[code->native_module()].push_back(code);
+ }
+
+ // For each native module, free all its code objects at once.
+ for (auto& dead_code_entry : dead_code) {
+ NativeModule* native_module = dead_code_entry.first;
+ Vector<WasmCode*> code_vec = VectorOf(dead_code_entry.second);
+ native_module->FreeCode(code_vec);
+ }
+}
+
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
bool can_request_more, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters,
+ std::shared_ptr<NativeModule>* shared_this)
: enabled_features_(enabled),
module_(std::move(module)),
- compilation_state_(
- CompilationState::New(this, std::move(async_counters))),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
new WasmImportWrapperCache(this))),
free_code_space_(code_space.region()),
@@ -380,24 +421,43 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
can_request_more_memory_(can_request_more),
use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler) {
+ // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
+ // there.
+ DCHECK_NOT_NULL(shared_this);
+ DCHECK_NULL(*shared_this);
+ shared_this->reset(this);
+ compilation_state_ =
+ CompilationState::New(*shared_this, std::move(async_counters));
DCHECK_NOT_NULL(module_);
owned_code_space_.emplace_back(std::move(code_space));
owned_code_.reserve(num_functions());
+#if defined(V8_OS_WIN_X64)
+ // On some platforms, specifically Win64, we need to reserve some pages at
+ // the beginning of an executable space.
+ // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
+ // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+ // for details.
+ if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+ FLAG_win64_unwinding_info) {
+ AllocateForCode(Heap::GetCodeRangeReservedAreaSize());
+ }
+#endif
+
uint32_t num_wasm_functions = module_->num_declared_functions;
if (num_wasm_functions > 0) {
- code_table_.reset(new WasmCode*[num_wasm_functions]);
- memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
+ code_table_.reset(new WasmCode* [num_wasm_functions] {});
+ WasmCodeRefScope code_ref_scope;
jump_table_ = CreateEmptyJumpTable(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
}
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
+ WasmCodeRefScope code_ref_scope;
DCHECK_LE(num_functions(), max_functions);
- WasmCode** new_table = new WasmCode*[max_functions];
- memset(new_table, 0, max_functions * sizeof(*new_table));
+ WasmCode** new_table = new WasmCode* [max_functions] {};
if (module_->num_declared_functions > 0) {
memcpy(new_table, code_table_.get(),
module_->num_declared_functions * sizeof(*new_table));
@@ -414,8 +474,11 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
// TODO(titzer): we skip the logging of the import wrappers
// here, but they should be included somehow.
- for (WasmCode* code : code_table()) {
- if (code != nullptr) code->LogCode(isolate);
+ int start = module()->num_imported_functions;
+ int end = start + module()->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ for (int func_index = start; func_index < end; ++func_index) {
+ if (WasmCode* code = GetCode(func_index)) code->LogCode(isolate);
}
}
@@ -424,58 +487,29 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
enabled_features_};
}
-WasmCode* NativeModule::AddOwnedCode(
- uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
- uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
- size_t handler_table_offset, size_t constant_pool_offset,
- size_t code_comments_offset, size_t unpadded_binary_size,
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
- OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- WasmCode::Tier tier) {
- CHECK(!FLAG_jitless); // TODO(jgruber): Support wasm in jitless mode.
-
- WasmCode* code;
- {
- // Both allocation and insertion in owned_code_ happen in the same critical
- // section, thus ensuring owned_code_'s elements are rarely if ever moved.
- base::MutexGuard lock(&allocation_mutex_);
- Vector<byte> executable_buffer = AllocateForCode(instructions.size());
- // Ownership will be transferred to {owned_code_} below.
- code = new WasmCode(
- this, index, executable_buffer, stack_slots, tagged_parameter_slots,
- safepoint_table_offset, handler_table_offset, constant_pool_offset,
- code_comments_offset, unpadded_binary_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_position_table), kind, tier);
-
- owned_code_.emplace_back(code);
- }
- memcpy(reinterpret_cast<void*>(code->instruction_start()),
- instructions.start(), instructions.size());
-
- return code;
-}
-
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::kFunction);
- return ret;
+ return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
}
-void NativeModule::SetLazyBuiltin(Handle<Code> code) {
- uint32_t num_wasm_functions = module_->num_declared_functions;
- if (num_wasm_functions == 0) return;
- WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
- // Fill the jump table with jumps to the lazy compile stub.
- Address lazy_compile_target = lazy_builtin->instruction_start();
- for (uint32_t i = 0; i < num_wasm_functions; ++i) {
- JumpTableAssembler::EmitLazyCompileJumpSlot(
- jump_table_->instruction_start(), i,
- i + module_->num_imported_functions, lazy_compile_target,
- WasmCode::kNoFlushICache);
+void NativeModule::UseLazyStubs() {
+ uint32_t start = module_->num_imported_functions;
+ uint32_t end = start + module_->num_declared_functions;
+ for (uint32_t func_index = start; func_index < end; func_index++) {
+ UseLazyStub(func_index);
}
- FlushInstructionCache(jump_table_->instructions().start(),
- jump_table_->instructions().size());
+}
+
+void NativeModule::UseLazyStub(uint32_t func_index) {
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ DCHECK_LT(func_index,
+ module_->num_imported_functions + module_->num_declared_functions);
+
+ // Add jump table entry for jump to the lazy compile stub.
+ uint32_t slot_index = func_index - module_->num_imported_functions;
+ DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
+ JumpTableAssembler::EmitLazyCompileJumpSlot(
+ jump_table_->instruction_start(), slot_index, func_index,
+ runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
}
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
@@ -483,6 +517,7 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
void NativeModule::SetRuntimeStubs(Isolate* isolate) {
DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
#ifdef V8_EMBEDDED_BUILTINS
+ WasmCodeRefScope code_ref_scope;
WasmCode* jump_table =
CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots(
WasmCode::kRuntimeStubCount));
@@ -509,11 +544,13 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
runtime_stub_table_ = jump_table;
#else // V8_EMBEDDED_BUILTINS
HandleScope scope(isolate);
+ WasmCodeRefScope code_ref_scope;
USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header.
-#define COPY_BUILTIN(Name) \
- runtime_stub_entries_[WasmCode::k##Name] = \
- AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
- WasmCode::kRuntimeStub, #Name) \
+#define COPY_BUILTIN(Name) \
+ runtime_stub_entries_[WasmCode::k##Name] = \
+ AddAndPublishAnonymousCode( \
+ isolate->builtins()->builtin_handle(Builtins::k##Name), \
+ WasmCode::kRuntimeStub, #Name) \
->instruction_start();
#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
@@ -523,15 +560,17 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
}
-WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
- const char* name) {
+WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
+ WasmCode::Kind kind,
+ const char* name) {
// For off-heap builtins, we create a copy of the off-heap instruction stream
// instead of the on-heap code object containing the trampoline. Ensure that
// we do not apply the on-heap reloc info to the off-heap instructions.
const size_t relocation_size =
code->is_off_heap_trampoline() ? 0 : code->relocation_size();
- OwnedVector<byte> reloc_info = OwnedVector<byte>::New(relocation_size);
+ OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
+ reloc_info = OwnedVector<byte>::New(relocation_size);
memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
}
Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
@@ -545,39 +584,34 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
Vector<const byte> instructions(
reinterpret_cast<byte*>(code->InstructionStart()),
static_cast<size_t>(code->InstructionSize()));
- const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ const uint32_t stack_slots = static_cast<uint32_t>(
+ code->has_safepoint_info() ? code->stack_slots() : 0);
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// Code objects contains real offsets but WasmCode expects an offset of 0 to
// mean 'empty'.
- const int safepoint_table_offset =
- code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
- const int handler_table_offset =
- code->has_handler_table() ? code->handler_table_offset() : 0;
-
- WasmCode* ret =
- AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
- instructions, // instructions
- stack_slots, // stack_slots
- 0, // tagged_parameter_slots
- safepoint_table_offset, // safepoint_table_offset
- handler_table_offset, // handler_table_offset
- code->constant_pool_offset(), // constant_pool_offset
- code->code_comments_offset(), // code_comments_offset
- instructions.size(), // unpadded_binary_size
- {}, // protected_instructions
- std::move(reloc_info), // reloc_info
- std::move(source_pos), // source positions
- kind, // kind
- WasmCode::kOther); // tier
+ const size_t safepoint_table_offset = static_cast<size_t>(
+ code->has_safepoint_table() ? code->safepoint_table_offset() : 0);
+ const size_t handler_table_offset = static_cast<size_t>(
+ code->has_handler_table() ? code->handler_table_offset() : 0);
+ const size_t constant_pool_offset =
+ static_cast<size_t>(code->constant_pool_offset());
+ const size_t code_comments_offset =
+ static_cast<size_t>(code->code_comments_offset());
+
+ Vector<uint8_t> dst_code_bytes = AllocateForCode(instructions.size());
+ memcpy(dst_code_bytes.begin(), instructions.start(), instructions.size());
// Apply the relocation delta by iterating over the RelocInfo.
- intptr_t delta = ret->instruction_start() - code->InstructionStart();
+ intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
+ code->InstructionStart();
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
+ Address constant_pool_start =
+ reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
- for (RelocIterator it(ret->instructions(), ret->reloc_info(),
- ret->constant_pool(), mode_mask);
+ for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
+ constant_pool_start, mode_mask);
!it.done(); it.next(), orig_it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmStubCall(mode)) {
@@ -591,23 +625,53 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
}
}
- // Flush the i-cache here instead of in AddOwnedCode, to include the changes
- // made while iterating over the RelocInfo above.
- FlushInstructionCache(ret->instructions().start(),
- ret->instructions().size());
- ret->MaybePrint(name);
- ret->Validate();
- return ret;
+ // Flush the i-cache after relocation.
+ FlushInstructionCache(dst_code_bytes.start(), dst_code_bytes.size());
+
+ DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
+ std::unique_ptr<WasmCode> new_code{new WasmCode{
+ this, // native_module
+ WasmCode::kAnonymousFuncIndex, // index
+ dst_code_bytes, // instructions
+ stack_slots, // stack_slots
+ 0, // tagged_parameter_slots
+ safepoint_table_offset, // safepoint_table_offset
+ handler_table_offset, // handler_table_offset
+ constant_pool_offset, // constant_pool_offset
+ code_comments_offset, // code_comments_offset
+ instructions.size(), // unpadded_binary_size
+ OwnedVector<ProtectedInstructionData>{}, // protected_instructions
+ std::move(reloc_info), // reloc_info
+ std::move(source_pos), // source positions
+ kind, // kind
+ ExecutionTier::kNone}}; // tier
+ new_code->MaybePrint(name);
+ new_code->Validate();
+
+ return PublishCode(std::move(new_code));
}
-WasmCode* NativeModule::AddCode(
+std::unique_ptr<WasmCode> NativeModule::AddCode(
uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
uint32_t tagged_parameter_slots,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
- OwnedVector<const byte> source_pos_table, WasmCode::Kind kind,
- WasmCode::Tier tier) {
- OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier) {
+ return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
+ std::move(protected_instructions),
+ std::move(source_position_table), kind, tier,
+ AllocateForCode(desc.instr_size));
+}
+
+std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
+ uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
+ uint32_t tagged_parameter_slots,
+ OwnedVector<ProtectedInstructionData> protected_instructions,
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier, Vector<uint8_t> dst_code_bytes) {
+ OwnedVector<byte> reloc_info;
if (desc.reloc_size > 0) {
+ reloc_info = OwnedVector<byte>::New(desc.reloc_size);
memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
}
@@ -615,25 +679,28 @@ WasmCode* NativeModule::AddCode(
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
// 'empty'.
- const int safepoint_table_offset =
- desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
- const int handler_table_offset =
- desc.handler_table_size == 0 ? 0 : desc.handler_table_offset;
-
- WasmCode* code = AddOwnedCode(
- index, {desc.buffer, static_cast<size_t>(desc.instr_size)}, stack_slots,
- tagged_parameter_slots, safepoint_table_offset, handler_table_offset,
- desc.constant_pool_offset, desc.code_comments_offset, desc.instr_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos_table), kind, tier);
+ const size_t safepoint_table_offset = static_cast<size_t>(
+ desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset);
+ const size_t handler_table_offset = static_cast<size_t>(
+ desc.handler_table_size == 0 ? 0 : desc.handler_table_offset);
+ const size_t constant_pool_offset =
+ static_cast<size_t>(desc.constant_pool_offset);
+ const size_t code_comments_offset =
+ static_cast<size_t>(desc.code_comments_offset);
+ const size_t instr_size = static_cast<size_t>(desc.instr_size);
+
+ memcpy(dst_code_bytes.begin(), desc.buffer,
+ static_cast<size_t>(desc.instr_size));
// Apply the relocation delta by iterating over the RelocInfo.
- intptr_t delta = code->instructions().start() - desc.buffer;
+ intptr_t delta = dst_code_bytes.begin() - desc.buffer;
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
- for (RelocIterator it(code->instructions(), code->reloc_info(),
- code->constant_pool(), mode_mask);
+ Address constant_pool_start =
+ reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
+ for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
+ constant_pool_start, mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmCall(mode)) {
@@ -651,25 +718,94 @@ WasmCode* NativeModule::AddCode(
}
}
- // Flush the i-cache here instead of in AddOwnedCode, to include the changes
- // made while iterating over the RelocInfo above.
- FlushInstructionCache(code->instructions().start(),
- code->instructions().size());
+ std::unique_ptr<WasmCode> code{new WasmCode{
+ this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
+ safepoint_table_offset, handler_table_offset, constant_pool_offset,
+ code_comments_offset, instr_size, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_position_table), kind, tier}};
code->MaybePrint();
code->Validate();
- if (!code->protected_instructions_.is_empty()) {
- code->RegisterTrapHandlerData();
- }
+ code->RegisterTrapHandlerData();
+
+ // Flush the i-cache for the region holding the relocated code.
+ // Do this last, as this seems to trigger an LTO bug that clobbers a register
+ // on arm, see https://crbug.com/952759#c6.
+ FlushInstructionCache(dst_code_bytes.start(), dst_code_bytes.size());
+ return code;
+}
+
+WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
base::MutexGuard lock(&allocation_mutex_);
- // Skip publishing code if there is an active redirection to the interpreter
- // for the given function index, in order to preserve the redirection.
- if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) {
- InstallCode(code);
+ return PublishCodeLocked(std::move(code));
+}
+
+namespace {
+WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
+ switch (tier) {
+ case ExecutionTier::kInterpreter:
+ return WasmCode::Kind::kInterpreterEntry;
+ case ExecutionTier::kLiftoff:
+ case ExecutionTier::kTurbofan:
+ return WasmCode::Kind::kFunction;
+ case ExecutionTier::kNone:
+ UNREACHABLE();
}
+}
+} // namespace
- return code;
+WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
+ // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
+ DCHECK(!allocation_mutex_.TryLock());
+
+ if (!code->IsAnonymous()) {
+ DCHECK_LT(code->index(), num_functions());
+ DCHECK_LE(module_->num_imported_functions, code->index());
+
+ // Assume an order of execution tiers that represents the quality of their
+ // generated code.
+ static_assert(ExecutionTier::kNone < ExecutionTier::kInterpreter &&
+ ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+
+ // Update code table but avoid to fall back to less optimized code. We use
+ // the new code if it was compiled with a higher tier.
+ uint32_t slot_idx = code->index() - module_->num_imported_functions;
+ WasmCode* prior_code = code_table_[slot_idx];
+ bool update_code_table = !prior_code || prior_code->tier() < code->tier();
+ if (update_code_table) {
+ code_table_[slot_idx] = code.get();
+ if (prior_code) {
+ WasmCodeRefScope::AddRef(prior_code);
+ // The code is added to the current {WasmCodeRefScope}, hence the ref
+ // count cannot drop to zero here.
+ CHECK(!prior_code->DecRef());
+ }
+ }
+
+ // Populate optimized code to the jump table unless there is an active
+ // redirection to the interpreter that should be preserved.
+ bool update_jump_table =
+ update_code_table && !has_interpreter_redirection(code->index());
+
+ // Ensure that interpreter entries always populate to the jump table.
+ if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
+ SetInterpreterRedirection(code->index());
+ update_jump_table = true;
+ }
+
+ if (update_jump_table) {
+ JumpTableAssembler::PatchJumpTableSlot(
+ jump_table_->instruction_start(), slot_idx, code->instruction_start(),
+ WasmCode::kFlushICache);
+ }
+ }
+ WasmCodeRefScope::AddRef(code.get());
+ WasmCode* result = code.get();
+ owned_code_.emplace_back(std::move(code));
+ return result;
}
WasmCode* NativeModule::AddDeserializedCode(
@@ -677,84 +813,81 @@ WasmCode* NativeModule::AddDeserializedCode(
uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
size_t handler_table_offset, size_t constant_pool_offset,
size_t code_comments_offset, size_t unpadded_binary_size,
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
+ OwnedVector<ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
- WasmCode* code = AddOwnedCode(
- index, instructions, stack_slots, tagged_parameter_slots,
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier) {
+ Vector<uint8_t> dst_code_bytes = AllocateForCode(instructions.size());
+ memcpy(dst_code_bytes.begin(), instructions.start(), instructions.size());
+
+ std::unique_ptr<WasmCode> code{new WasmCode{
+ this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, unpadded_binary_size,
std::move(protected_instructions), std::move(reloc_info),
- std::move(source_position_table), WasmCode::kFunction, tier);
+ std::move(source_position_table), kind, tier}};
+
+ code->RegisterTrapHandlerData();
- if (!code->protected_instructions_.is_empty()) {
- code->RegisterTrapHandlerData();
- }
- base::MutexGuard lock(&allocation_mutex_);
- InstallCode(code);
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
- return code;
-}
-void NativeModule::PublishInterpreterEntry(WasmCode* code,
- uint32_t func_index) {
- code->index_ = func_index;
- base::MutexGuard lock(&allocation_mutex_);
- InstallCode(code);
- SetInterpreterRedirection(func_index);
+ return PublishCode(std::move(code));
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
base::MutexGuard lock(&allocation_mutex_);
- std::vector<WasmCode*> result;
- result.reserve(code_table().size());
- for (WasmCode* code : code_table()) result.push_back(code);
- return result;
+ WasmCode** start = code_table_.get();
+ WasmCode** end = start + module_->num_declared_functions;
+ return std::vector<WasmCode*>{start, end};
+}
+
+WasmCode* NativeModule::GetCode(uint32_t index) const {
+ base::MutexGuard guard(&allocation_mutex_);
+ DCHECK_LT(index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, index);
+ WasmCode* code = code_table_[index - module_->num_imported_functions];
+ WasmCodeRefScope::AddRef(code);
+ return code;
+}
+
+bool NativeModule::HasCode(uint32_t index) const {
+ base::MutexGuard guard(&allocation_mutex_);
+ DCHECK_LT(index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, index);
+ return code_table_[index - module_->num_imported_functions] != nullptr;
}
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- OwnedVector<byte> instructions = OwnedVector<byte>::New(jump_table_size);
- memset(instructions.start(), 0, instructions.size());
- return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
- instructions.as_vector(), // instructions
- 0, // stack_slots
- 0, // tagged_parameter_slots
- 0, // safepoint_table_offset
- 0, // handler_table_offset
- instructions.size(), // constant_pool_offset
- instructions.size(), // code_comments_offset
- instructions.size(), // unpadded_binary_size
- {}, // protected_instructions
- {}, // reloc_info
- {}, // source_pos
- WasmCode::kJumpTable, // kind
- WasmCode::kOther); // tier
-}
-
-void NativeModule::InstallCode(WasmCode* code) {
- DCHECK_LT(code->index(), num_functions());
- DCHECK_LE(module_->num_imported_functions, code->index());
-
- // Update code table, except for interpreter entries.
- if (code->kind() != WasmCode::kInterpreterEntry) {
- code_table_[code->index() - module_->num_imported_functions] = code;
- }
-
- // Patch jump table.
- uint32_t slot_idx = code->index() - module_->num_imported_functions;
- JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
- slot_idx, code->instruction_start(),
- WasmCode::kFlushICache);
+ Vector<uint8_t> code_space = AllocateForCode(jump_table_size);
+ ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
+ std::unique_ptr<WasmCode> code{new WasmCode{
+ this, // native_module
+ WasmCode::kAnonymousFuncIndex, // index
+ code_space, // instructions
+ 0, // stack_slots
+ 0, // tagged_parameter_slots
+ 0, // safepoint_table_offset
+ 0, // handler_table_offset
+ jump_table_size, // constant_pool_offset
+ jump_table_size, // code_comments_offset
+ jump_table_size, // unpadded_binary_size
+ OwnedVector<ProtectedInstructionData>{}, // protected_instructions
+ OwnedVector<const uint8_t>{}, // reloc_info
+ OwnedVector<const uint8_t>{}, // source_pos
+ WasmCode::kJumpTable, // kind
+ ExecutionTier::kNone}}; // tier
+ return PublishCode(std::move(code));
}
Vector<byte> NativeModule::AllocateForCode(size_t size) {
+ base::MutexGuard lock(&allocation_mutex_);
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
- size = RoundUp(size, kCodeAlignment);
+ size = RoundUp<kCodeAlignment>(size);
base::AddressRegion code_space = free_code_space_.Allocate(size);
if (code_space.is_empty()) {
if (!can_request_more_memory_) {
@@ -829,6 +962,8 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
}
DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
allocated_code_space_.Merge(code_space);
+ generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
+
TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, code_space.begin(),
size);
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
@@ -854,7 +989,7 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
auto shared_wire_bytes =
std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
wire_bytes_ = shared_wire_bytes;
- if (!shared_wire_bytes->is_empty()) {
+ if (!shared_wire_bytes->empty()) {
compilation_state_->SetWireBytesStorage(
std::make_shared<NativeModuleWireBytesStorage>(
std::move(shared_wire_bytes)));
@@ -886,7 +1021,10 @@ WasmCode* NativeModule::Lookup(Address pc) const {
--iter;
WasmCode* candidate = iter->get();
DCHECK_NOT_NULL(candidate);
- if (candidate->contains(pc)) return candidate;
+ if (candidate->contains(pc)) {
+ WasmCodeRefScope::AddRef(candidate);
+ return candidate;
+ }
}
if (owned_code_sorted_portion_ == owned_code_.size()) return nullptr;
std::sort(owned_code_.begin(), owned_code_.end(),
@@ -921,20 +1059,6 @@ uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
return module_->num_imported_functions + slot_idx;
}
-void NativeModule::DisableTrapHandler() {
- // Switch {use_trap_handler_} from true to false.
- DCHECK(use_trap_handler_);
- use_trap_handler_ = kNoTrapHandler;
-
- // Clear the code table (just to increase the chances to hit an error if we
- // forget to re-add all code).
- uint32_t num_wasm_functions = module_->num_declared_functions;
- memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
-
- // TODO(clemensh): Actually free the owned code, such that the memory can be
- // recycled.
-}
-
const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
#define RETURN_NAME(Name) \
if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
@@ -953,13 +1077,18 @@ NativeModule::~NativeModule() {
// NativeModule or freeing anything.
compilation_state_->AbortCompilation();
engine_->FreeNativeModule(this);
+ // Free the import wrapper cache before releasing the {WasmCode} objects in
+ // {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
+ // decrease reference counts on the {WasmCode} objects.
+ import_wrapper_cache_.reset();
}
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed)
: memory_tracker_(memory_tracker),
- remaining_uncommitted_code_space_(max_committed),
- critical_uncommitted_code_space_(max_committed / 2) {
+ max_committed_code_space_(max_committed),
+ total_committed_code_space_(0),
+ critical_committed_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
}
@@ -968,14 +1097,14 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
if (FLAG_perf_prof) return true;
DCHECK(IsAligned(start, AllocatePageSize()));
DCHECK(IsAligned(size, AllocatePageSize()));
- // Reserve the size. Use CAS loop to avoid underflow on
- // {remaining_uncommitted_}. Temporary underflow would allow concurrent
- // threads to over-commit.
- size_t old_value = remaining_uncommitted_code_space_.load();
+ // Reserve the size. Use CAS loop to avoid overflow on
+ // {total_committed_code_space_}.
+ size_t old_value = total_committed_code_space_.load();
while (true) {
- if (old_value < size) return false;
- if (remaining_uncommitted_code_space_.compare_exchange_weak(
- old_value, old_value - size)) {
+ DCHECK_GE(max_committed_code_space_, old_value);
+ if (size > max_committed_code_space_ - old_value) return false;
+ if (total_committed_code_space_.compare_exchange_weak(old_value,
+ old_value + size)) {
break;
}
}
@@ -991,10 +1120,10 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
if (!ret) {
// Highly unlikely.
- remaining_uncommitted_code_space_.fetch_add(size);
+ total_committed_code_space_.fetch_sub(size);
return false;
}
- return ret;
+ return true;
}
void WasmCodeManager::AssignRanges(Address start, Address end,
@@ -1007,10 +1136,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
size = RoundUp(size, page_allocator->AllocatePageSize());
- if (!memory_tracker_->ReserveAddressSpace(size,
- WasmMemoryTracker::kHardLimit)) {
- return {};
- }
+ if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint,
@@ -1032,8 +1158,10 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
}
void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
- remaining_uncommitted_code_space_.store(limit);
- critical_uncommitted_code_space_.store(limit / 2);
+ // This has to be set before committing any memory.
+ DCHECK_EQ(0, total_committed_code_space_.load());
+ max_committed_code_space_ = limit;
+ critical_committed_code_space_.store(limit / 2);
}
// static
@@ -1070,17 +1198,19 @@ size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
return wasm_module_estimate + native_module_estimate;
}
-std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
+std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
size_t code_size_estimate, bool can_request_more,
std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
- if (remaining_uncommitted_code_space_.load() <
- critical_uncommitted_code_space_.load()) {
+ if (total_committed_code_space_.load() >
+ critical_committed_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical);
- critical_uncommitted_code_space_.store(
- remaining_uncommitted_code_space_.load() / 2);
+ size_t committed = total_committed_code_space_.load();
+ DCHECK_GE(max_committed_code_space_, committed);
+ critical_committed_code_space_.store(
+ committed + (max_committed_code_space_ - committed) / 2);
}
// If the code must be contiguous, reserve enough address space up front.
@@ -1106,11 +1236,22 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Address start = code_space.address();
size_t size = code_space.size();
Address end = code_space.end();
- std::unique_ptr<NativeModule> ret(
- new NativeModule(engine, enabled, can_request_more, std::move(code_space),
- std::move(module), isolate->async_counters()));
+ std::shared_ptr<NativeModule> ret;
+ new NativeModule(engine, enabled, can_request_more, std::move(code_space),
+ std::move(module), isolate->async_counters(), &ret);
+ // The constructor initialized the shared_ptr.
+ DCHECK_NOT_NULL(ret);
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
size);
+
+#if defined(V8_OS_WIN_X64)
+ if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+ FLAG_win64_unwinding_info) {
+ win64_unwindinfo::RegisterNonABICompliantCodeRange(
+ reinterpret_cast<void*>(start), size);
+ }
+#endif
+
base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
return ret;
@@ -1165,6 +1306,77 @@ bool NativeModule::SetExecutable(bool executable) {
return true;
}
+void NativeModule::SampleCodeSize(
+ Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
+ size_t code_size = sampling_time == kSampling
+ ? committed_code_space()
+ : generated_code_size_.load(std::memory_order_relaxed);
+ int code_size_mb = static_cast<int>(code_size / MB);
+ Histogram* histogram = nullptr;
+ switch (sampling_time) {
+ case kAfterBaseline:
+ histogram = counters->wasm_module_code_size_mb_after_baseline();
+ break;
+ case kAfterTopTier:
+ histogram = counters->wasm_module_code_size_mb_after_top_tier();
+ break;
+ case kSampling:
+ histogram = counters->wasm_module_code_size_mb();
+ break;
+ }
+ histogram->AddSample(code_size_mb);
+}
+
+WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
+ return AddCompiledCode({&result, 1})[0];
+}
+
+std::vector<WasmCode*> NativeModule::AddCompiledCode(
+ Vector<WasmCompilationResult> results) {
+ DCHECK(!results.empty());
+ // First, allocate code space for all the results.
+ size_t total_code_space = 0;
+ for (auto& result : results) {
+ DCHECK(result.succeeded());
+ total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
+ }
+ Vector<byte> code_space = AllocateForCode(total_code_space);
+
+ std::vector<std::unique_ptr<WasmCode>> generated_code;
+ generated_code.reserve(results.size());
+
+ // Now copy the generated code into the code space and relocate it.
+ for (auto& result : results) {
+ DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
+ size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
+ Vector<byte> this_code_space = code_space.SubVector(0, code_size);
+ code_space += code_size;
+ generated_code.emplace_back(AddCodeWithCodeSpace(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions),
+ GetCodeKindForExecutionTier(result.result_tier), result.result_tier,
+ this_code_space));
+ }
+ DCHECK_EQ(0, code_space.size());
+
+ // Under the {allocation_mutex_}, publish the code. The published code is put
+ // into the top-most surrounding {WasmCodeRefScope} by {PublishCodeLocked}.
+ std::vector<WasmCode*> code_vector;
+ code_vector.reserve(results.size());
+ {
+ base::MutexGuard lock(&allocation_mutex_);
+ for (auto& result : generated_code)
+ code_vector.push_back(PublishCodeLocked(std::move(result)));
+ }
+
+ return code_vector;
+}
+
+void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
+ // TODO(clemensh): Implement.
+}
+
void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
base::MutexGuard lock(&native_modules_mutex_);
TRACE_HEAP("Freeing NativeModule %p\n", native_module);
@@ -1172,6 +1384,15 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
DCHECK(code_space.IsReserved());
TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n",
code_space.address(), code_space.end(), code_space.size());
+
+#if defined(V8_OS_WIN_X64)
+ if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+ FLAG_win64_unwinding_info) {
+ win64_unwindinfo::UnregisterNonABICompliantCodeRange(
+ reinterpret_cast<void*>(code_space.address()));
+ }
+#endif
+
lookup_map_.erase(code_space.address());
memory_tracker_->ReleaseReservation(code_space.size());
code_space.Free();
@@ -1181,9 +1402,9 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
size_t code_size = native_module->committed_code_space_.load();
DCHECK(IsAligned(code_size, AllocatePageSize()));
- remaining_uncommitted_code_space_.fetch_add(code_size);
- // Remaining code space cannot grow bigger than maximum code space size.
- DCHECK_LE(remaining_uncommitted_code_space_.load(), kMaxWasmCodeMemory);
+ size_t old_committed = total_committed_code_space_.fetch_sub(code_size);
+ DCHECK_LE(code_size, old_committed);
+ USE(old_committed);
}
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
@@ -1206,10 +1427,6 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
return candidate ? candidate->Lookup(pc) : nullptr;
}
-size_t WasmCodeManager::remaining_uncommitted_code_space() const {
- return remaining_uncommitted_code_space_.load();
-}
-
// TODO(v8:7424): Code protection scopes are not yet supported with shared code
// enabled and need to be revisited to work with --wasm-shared-code as well.
NativeModuleModificationScope::NativeModuleModificationScope(
@@ -1230,6 +1447,33 @@ NativeModuleModificationScope::~NativeModuleModificationScope() {
}
}
+namespace {
+thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
+} // namespace
+
+WasmCodeRefScope::WasmCodeRefScope()
+ : previous_scope_(current_code_refs_scope) {
+ current_code_refs_scope = this;
+}
+
+WasmCodeRefScope::~WasmCodeRefScope() {
+ DCHECK_EQ(this, current_code_refs_scope);
+ current_code_refs_scope = previous_scope_;
+ std::vector<WasmCode*> code_ptrs;
+ code_ptrs.reserve(code_ptrs_.size());
+ code_ptrs.assign(code_ptrs_.begin(), code_ptrs_.end());
+ WasmCode::DecrementRefCount(VectorOf(code_ptrs));
+}
+
+// static
+void WasmCodeRefScope::AddRef(WasmCode* code) {
+ WasmCodeRefScope* current_scope = current_code_refs_scope;
+ DCHECK_NOT_NULL(current_scope);
+ auto entry = current_scope->code_ptrs_.insert(code);
+ // If we added a new entry, increment the ref counter.
+ if (entry.second) code->IncRef();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 272dab0b03..e689644430 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -5,13 +5,16 @@
#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_
-#include <functional>
+#include <atomic>
#include <list>
#include <map>
-#include <unordered_map>
+#include <memory>
#include <unordered_set>
+#include <utility>
+#include <vector>
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
@@ -19,6 +22,7 @@
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-tier.h"
namespace v8 {
namespace internal {
@@ -31,6 +35,7 @@ namespace wasm {
class NativeModule;
class WasmCodeManager;
+struct WasmCompilationResult;
class WasmEngine;
class WasmMemoryTracker;
class WasmImportWrapperCache;
@@ -74,7 +79,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum Kind {
kFunction,
kWasmToJsWrapper,
- kLazyStub,
kRuntimeStub,
kInterpreterEntry,
kJumpTable
@@ -91,11 +95,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
kRuntimeStubCount
};
- // kOther is used if we have WasmCode that is neither
- // liftoff- nor turbofan-compiled, i.e. if Kind is
- // not a kFunction.
- enum Tier : int8_t { kLiftoff, kTurbofan, kOther };
-
Vector<byte> instructions() const { return instructions_; }
Address instruction_start() const {
return reinterpret_cast<Address>(instructions_.start());
@@ -113,9 +112,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
Kind kind() const { return kind_; }
NativeModule* native_module() const { return native_module_; }
- Tier tier() const { return tier_; }
+ ExecutionTier tier() const { return tier_; }
Address constant_pool() const;
Address code_comments() const;
+ uint32_t code_comments_size() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
size_t handler_table_offset() const { return handler_table_offset_; }
@@ -123,7 +123,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t unpadded_binary_size() const { return unpadded_binary_size_; }
uint32_t stack_slots() const { return stack_slots_; }
uint32_t tagged_parameter_slots() const { return tagged_parameter_slots_; }
- bool is_liftoff() const { return tier_ == kLiftoff; }
+ bool is_liftoff() const { return tier_ == ExecutionTier::kLiftoff; }
bool contains(Address pc) const {
return reinterpret_cast<Address>(instructions_.start()) <= pc &&
pc < reinterpret_cast<Address>(instructions_.end());
@@ -145,6 +145,31 @@ class V8_EXPORT_PRIVATE WasmCode final {
~WasmCode();
+ void IncRef() {
+ int old_val = ref_count_.fetch_add(1, std::memory_order_relaxed);
+ DCHECK_LE(1, old_val);
+ DCHECK_GT(kMaxInt, old_val);
+ USE(old_val);
+ }
+
+ // Decrement the ref count. Returns whether this code becomes dead and needs
+ // to be freed.
+ V8_WARN_UNUSED_RESULT bool DecRef() {
+ int old_count = ref_count_.load(std::memory_order_relaxed);
+ while (true) {
+ DCHECK_LE(1, old_count);
+ if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
+ if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
+ std::memory_order_relaxed)) {
+ return false;
+ }
+ }
+ }
+
+ // Decrement the ref count on a set of {WasmCode} objects, potentially
+ // belonging to different {NativeModule}s. Dead code will be deleted.
+ static void DecrementRefCount(Vector<WasmCode*>);
+
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
@@ -161,7 +186,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, Kind kind, Tier tier)
+ OwnedVector<const byte> source_position_table, Kind kind,
+ ExecutionTier tier)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
source_position_table_(std::move(source_position_table)),
@@ -193,6 +219,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
// trap_handler_index.
void RegisterTrapHandlerData();
+ // Slow path for {DecRef}: The code becomes potentially dead.
+ // Returns whether this code becomes dead and needs to be freed.
+ bool DecRefOnPotentiallyDeadCode();
+
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
OwnedVector<const byte> source_position_table_;
@@ -213,7 +243,19 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t unpadded_binary_size_ = 0;
intptr_t trap_handler_index_ = -1;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
- Tier tier_;
+ ExecutionTier tier_;
+
+ // WasmCode is ref counted. Counters are held by:
+ // 1) The jump table.
+ // 2) Function tables.
+ // 3) {WasmCodeRefScope}s.
+ // 4) The set of potentially dead code in the {WasmEngine}.
+ // If a decrement of (1) or (2) would drop the ref count to 0, that code
+ // becomes a candidate for garbage collection. At that point, we add
+ // ref counts for (4) *before* decrementing the counter to ensure the code
+ // stays alive as long as it's being used. Once the ref count drops to zero,
+ // the code object is deleted and the memory for the machine code is freed.
+ std::atomic<int> ref_count_{1};
DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
@@ -231,14 +273,21 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
- // {AddCode} also makes the code available to the system by entering it into
- // the code table and patching the jump table.
- WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- uint32_t tagged_parameter_slots,
- OwnedVector<trap_handler::ProtectedInstructionData>
- protected_instructions,
- OwnedVector<const byte> source_position_table,
- WasmCode::Kind kind, WasmCode::Tier tier);
+ // The returned code still needs to be published via {PublishCode}.
+ std::unique_ptr<WasmCode> AddCode(
+ uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
+ uint32_t tagged_parameter_slots,
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ protected_instructions,
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier);
+
+ // {PublishCode} makes the code available to the system by entering it into
+ // the code table and patching the jump table. It returns a raw pointer to the
+ // given {WasmCode} object.
+ WasmCode* PublishCode(std::unique_ptr<WasmCode>);
+ // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
+ WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
@@ -248,37 +297,30 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier);
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
- // When starting lazy compilation, provide the WasmLazyCompile builtin by
- // calling SetLazyBuiltin. It will be copied into this NativeModule and the
- // jump table will be populated with that copy.
- void SetLazyBuiltin(Handle<Code> code);
+ // Use this to setup lazy compilation for the entire module ({UseLazyStubs})
+ // or for individual functions ({UseLazyStub}). It will use the existing
+ // {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with
+ // trampolines to that runtime stub.
+ void UseLazyStubs();
+ void UseLazyStub(uint32_t func_index);
// Initializes all runtime stubs by setting up entry addresses in the runtime
// stub table. It must be called exactly once per native module before adding
// other WasmCode so that runtime stub ids can be resolved during relocation.
void SetRuntimeStubs(Isolate* isolate);
- // Switch a function to an interpreter entry wrapper. When adding interpreter
- // wrappers, we do not insert them in the code_table, however, we let them
- // self-identify as the {index} function.
- void PublishInterpreterEntry(WasmCode* code, uint32_t index);
-
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
- WasmCode* code(uint32_t index) const {
- DCHECK_LT(index, num_functions());
- DCHECK_LE(module_->num_imported_functions, index);
- return code_table_[index - module_->num_imported_functions];
- }
-
- bool has_code(uint32_t index) const { return code(index) != nullptr; }
+ WasmCode* GetCode(uint32_t index) const;
+ bool HasCode(uint32_t index) const;
Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
DCHECK_LT(index, WasmCode::kRuntimeStubCount);
@@ -300,14 +342,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_->contains(address);
}
- // Transition this module from code relying on trap handlers (i.e. without
- // explicit memory bounds checks) to code that does not require trap handlers
- // (i.e. code with explicit bounds checks).
- // This method must only be called if {use_trap_handler()} is true (it will be
- // false afterwards). All code in this {NativeModule} needs to be re-added
- // after calling this method.
- void DisableTrapHandler();
-
// Returns the target to call for the given function (returns a jump table
// slot within {jump_table_}).
Address GetCallTargetForFunction(uint32_t func_index) const;
@@ -340,6 +374,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
+ void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
+ bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
@@ -360,46 +396,46 @@ class V8_EXPORT_PRIVATE NativeModule final {
const char* GetRuntimeStubName(Address runtime_stub_entry) const;
+ // Sample the current code size of this modules to the given counters.
+ enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
+ void SampleCodeSize(Counters*, CodeSamplingTime) const;
+
+ WasmCode* AddCompiledCode(WasmCompilationResult);
+ std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
+
+ // Free a set of functions of this module. Uncommits whole pages if possible.
+ // The given vector must be ordered by the instruction start address, and all
+ // {WasmCode} objects must not be used any more.
+ void FreeCode(Vector<WasmCode* const>);
+
private:
friend class WasmCode;
friend class WasmCodeManager;
friend class NativeModuleModificationScope;
+ // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
bool can_request_more, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
- std::shared_ptr<Counters> async_counters);
+ std::shared_ptr<Counters> async_counters,
+ std::shared_ptr<NativeModule>* shared_this);
- WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
- const char* name = nullptr);
+ std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
+ uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
+ uint32_t tagged_parameter_slots,
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ protected_instructions,
+ OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier, Vector<uint8_t> code_space);
+
+ // Add and publish anonymous code.
+ WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
+ const char* name = nullptr);
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
Vector<byte> AllocateForCode(size_t size);
- // Primitive for adding code to the native module. All code added to a native
- // module is owned by that module. Various callers get to decide on how the
- // code is obtained (CodeDesc vs, as a point in time, Code), the kind,
- // whether it has an index or is anonymous, etc.
- WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
- uint32_t stack_slots, uint32_t tagged_parameter_slots,
- size_t safepoint_table_offset,
- size_t handler_table_offset,
- size_t constant_pool_offset,
- size_t code_comments_offset,
- size_t unpadded_binary_size,
- OwnedVector<trap_handler::ProtectedInstructionData>,
- OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table,
- WasmCode::Kind, WasmCode::Tier);
-
WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
- // Hold the {allocation_mutex_} when calling this method.
- void InstallCode(WasmCode* code);
-
- Vector<WasmCode*> code_table() const {
- return {code_table_.get(), module_->num_declared_functions};
- }
-
// Hold the {mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) {
DCHECK_LT(func_index, num_functions());
@@ -417,7 +453,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
if (!interpreter_redirections_) {
interpreter_redirections_.reset(
new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
- kBitsPerByte]);
+ kBitsPerByte]{});
}
uint32_t bitset_idx = func_index - module_->num_imported_functions;
uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
@@ -484,11 +520,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmEngine* const engine_;
std::atomic<size_t> committed_code_space_{0};
+ std::atomic<size_t> generated_code_size_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
+ bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
@@ -498,9 +536,18 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed);
+#ifdef DEBUG
+ ~WasmCodeManager() {
+ // No more committed code space.
+ DCHECK_EQ(0, total_committed_code_space_.load());
+ }
+#endif
+
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
- size_t remaining_uncommitted_code_space() const;
+ size_t committed_code_space() const {
+ return total_committed_code_space_.load();
+ }
void SetMaxCommittedMemoryForTesting(size_t limit);
@@ -511,7 +558,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
friend class NativeModule;
friend class WasmEngine;
- std::unique_ptr<NativeModule> NewNativeModule(
+ std::shared_ptr<NativeModule> NewNativeModule(
WasmEngine* engine, Isolate* isolate,
const WasmFeatures& enabled_features, size_t code_size_estimate,
bool can_request_more, std::shared_ptr<const WasmModule> module);
@@ -528,12 +575,16 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRanges(Address start, Address end, NativeModule*);
WasmMemoryTracker* const memory_tracker_;
- std::atomic<size_t> remaining_uncommitted_code_space_;
- // If the remaining uncommitted code space falls below
- // {critical_uncommitted_code_space_}, then we trigger a GC before creating
- // the next module. This value is initialized to 50% of the available code
- // space on creation and after each GC.
- std::atomic<size_t> critical_uncommitted_code_space_;
+
+ size_t max_committed_code_space_;
+
+ std::atomic<size_t> total_committed_code_space_;
+ // If the committed code space exceeds {critical_committed_code_space_}, then
+ // we trigger a GC before creating the next module. This value is set to the
+ // currently committed space plus 50% of the available code space on creation
+ // and updated after each GC.
+ std::atomic<size_t> critical_committed_code_space_;
+
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////
@@ -566,6 +617,50 @@ class NativeModuleModificationScope final {
NativeModule* native_module_;
};
+// {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
+// by e.g. creating new code or looking up code by its address are added to the
+// top-most {WasmCodeRefScope}.
+class V8_EXPORT_PRIVATE WasmCodeRefScope {
+ public:
+ WasmCodeRefScope();
+ ~WasmCodeRefScope();
+
+ // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
+ // there is no current scope.
+ static void AddRef(WasmCode*);
+
+ private:
+ WasmCodeRefScope* const previous_scope_;
+ std::unordered_set<WasmCode*> code_ptrs_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmCodeRefScope);
+};
+
+// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
+// ref-counted pointer to a {WasmCode} object.
+class GlobalWasmCodeRef {
+ public:
+ explicit GlobalWasmCodeRef(WasmCode* code,
+ std::shared_ptr<NativeModule> native_module)
+ : code_(code), native_module_(std::move(native_module)) {
+ code_->IncRef();
+ }
+
+ ~GlobalWasmCodeRef() {
+ if (code_->DecRef()) code_->native_module()->FreeCode(VectorOf(&code_, 1));
+ }
+
+ // Get a pointer to the contained {WasmCode} object. This is only guaranteed
+ // to exist as long as this {GlobalWasmCodeRef} exists.
+ WasmCode* code() const { return code_; }
+
+ private:
+ WasmCode* const code_;
+ // Also keep the {NativeModule} alive.
+ const std::shared_ptr<NativeModule> native_module_;
+ DISALLOW_COPY_AND_ASSIGN(GlobalWasmCodeRef);
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index ba42cef4f5..bd24471bc3 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -63,7 +63,7 @@ enum SectionCode : int8_t {
kTypeSectionCode = 1, // Function signature declarations
kImportSectionCode = 2, // Import declarations
kFunctionSectionCode = 3, // Function declarations
- kTableSectionCode = 4, // Indirect function table and other tables
+ kTableSectionCode = 4, // Indirect function table and others
kMemorySectionCode = 5, // Memory attributes
kGlobalSectionCode = 6, // Global declarations
kExportSectionCode = 7, // Exports
@@ -79,10 +79,11 @@ enum SectionCode : int8_t {
// to be consistent.
kNameSectionCode, // Name section (encoded as a string)
kSourceMappingURLSectionCode, // Source Map URL section
+ kCompilationHintsSectionCode, // Compilation hints section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
- kLastKnownModuleSection = kSourceMappingURLSectionCode,
+ kLastKnownModuleSection = kCompilationHintsSectionCode,
kFirstUnorderedSection = kDataCountSectionCode,
};
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 8420e63eb6..9775e47d71 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -167,37 +167,20 @@ class InterpreterHandle {
// was not handled inside this activation. In the latter case, a pending
// exception will have been set on the isolate.
bool Execute(Handle<WasmInstanceObject> instance_object,
- Address frame_pointer, uint32_t func_index, Address arg_buffer) {
+ Address frame_pointer, uint32_t func_index,
+ Vector<WasmValue> argument_values,
+ Vector<WasmValue> return_values) {
DCHECK_GE(module()->functions.size(), func_index);
FunctionSig* sig = module()->functions[func_index].sig;
- DCHECK_GE(kMaxInt, sig->parameter_count());
- int num_params = static_cast<int>(sig->parameter_count());
- ScopedVector<WasmValue> wasm_args(num_params);
- Address arg_buf_ptr = arg_buffer;
- for (int i = 0; i < num_params; ++i) {
- uint32_t param_size = static_cast<uint32_t>(
- ValueTypes::ElementSizeInBytes(sig->GetParam(i)));
-#define CASE_ARG_TYPE(type, ctype) \
- case type: \
- DCHECK_EQ(param_size, sizeof(ctype)); \
- wasm_args[i] = WasmValue(ReadUnalignedValue<ctype>(arg_buf_ptr)); \
- break;
- switch (sig->GetParam(i)) {
- CASE_ARG_TYPE(kWasmI32, uint32_t)
- CASE_ARG_TYPE(kWasmI64, uint64_t)
- CASE_ARG_TYPE(kWasmF32, float)
- CASE_ARG_TYPE(kWasmF64, double)
-#undef CASE_ARG_TYPE
- default:
- UNREACHABLE();
- }
- arg_buf_ptr += param_size;
- }
+ DCHECK_EQ(sig->parameter_count(), argument_values.size());
+ DCHECK_EQ(sig->return_count(), return_values.size());
uint32_t activation_id = StartActivation(frame_pointer);
+ WasmCodeRefScope code_ref_scope;
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- thread->InitFrame(&module()->functions[func_index], wasm_args.start());
+ thread->InitFrame(&module()->functions[func_index],
+ argument_values.start());
bool finished = false;
while (!finished) {
// TODO(clemensh): Add occasional StackChecks.
@@ -237,27 +220,12 @@ class InterpreterHandle {
}
}
- // Copy back the return value
+ // Copy back the return value.
DCHECK_GE(kV8MaxWasmFunctionReturns, sig->return_count());
// TODO(wasm): Handle multi-value returns.
DCHECK_EQ(1, kV8MaxWasmFunctionReturns);
if (sig->return_count()) {
- WasmValue ret_val = thread->GetReturnValue(0);
-#define CASE_RET_TYPE(type, ctype) \
- case type: \
- DCHECK_EQ(ValueTypes::ElementSizeInBytes(sig->GetReturn(0)), \
- sizeof(ctype)); \
- WriteUnalignedValue<ctype>(arg_buffer, ret_val.to<ctype>()); \
- break;
- switch (sig->GetReturn(0)) {
- CASE_RET_TYPE(kWasmI32, uint32_t)
- CASE_RET_TYPE(kWasmI64, uint64_t)
- CASE_RET_TYPE(kWasmF32, float)
- CASE_RET_TYPE(kWasmF64, double)
-#undef CASE_RET_TYPE
- default:
- UNREACHABLE();
- }
+ return_values[0] = thread->GetReturnValue(0);
}
FinishActivation(frame_pointer, activation_id);
@@ -526,7 +494,7 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
DCHECK(!instance->has_debug_info());
Factory* factory = instance->GetIsolate()->factory();
Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
- factory->NewStruct(WASM_DEBUG_INFO_TYPE, TENURED));
+ factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld));
debug_info->set_wasm_instance(*instance);
debug_info->set_interpreted_functions(*factory->empty_fixed_array());
instance->set_debug_info(*debug_info);
@@ -545,9 +513,7 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate(
isolate, interpreter_size, isolate, debug_info);
debug_info->set_interpreter_handle(*interp_handle);
- auto ret = interp_handle->raw()->interpreter();
- ret->SetCallIndirectTestMode();
- return ret;
+ return interp_handle->raw()->interpreter();
}
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
@@ -580,12 +546,20 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
DCHECK_GT(module->functions.size(), func_index);
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
- wasm::WasmCode* wasm_new_code = compiler::CompileWasmInterpreterEntry(
- isolate->wasm_engine(), native_module, func_index,
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmCompilationResult result = compiler::CompileWasmInterpreterEntry(
+ isolate->wasm_engine(), native_module->enabled_features(), func_index,
module->functions[func_index].sig);
- native_module->PublishInterpreterEntry(wasm_new_code, func_index);
- Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
- wasm_new_code->instruction_start(), TENURED);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), wasm::WasmCode::kInterpreterEntry,
+ wasm::ExecutionTier::kInterpreter);
+ Address instruction_start = wasm_code->instruction_start();
+ native_module->PublishCode(std::move(wasm_code));
+
+ Handle<Foreign> foreign_holder =
+ isolate->factory()->NewForeign(instruction_start, AllocationType::kOld);
interpreted_functions->set(func_index, *foreign_holder);
}
}
@@ -598,12 +572,14 @@ void WasmDebugInfo::PrepareStep(StepAction step_action) {
bool WasmDebugInfo::RunInterpreter(Isolate* isolate,
Handle<WasmDebugInfo> debug_info,
Address frame_pointer, int func_index,
- Address arg_buffer) {
+ Vector<wasm::WasmValue> argument_values,
+ Vector<wasm::WasmValue> return_values) {
DCHECK_LE(0, func_index);
auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
return handle->Execute(instance, frame_pointer,
- static_cast<uint32_t>(func_index), arg_buffer);
+ static_cast<uint32_t>(func_index), argument_values,
+ return_values);
}
std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
@@ -644,7 +620,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
if (!debug_info->has_c_wasm_entries()) {
- auto entries = isolate->factory()->NewFixedArray(4, TENURED);
+ auto entries = isolate->factory()->NewFixedArray(4, AllocationType::kOld);
debug_info->set_c_wasm_entries(*entries);
size_t map_size = 0; // size estimate not so important here.
auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate, map_size);
@@ -657,7 +633,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
index = static_cast<int32_t>(map->FindOrInsert(*sig));
if (index == entries->length()) {
entries = isolate->factory()->CopyFixedArrayAndGrow(
- entries, entries->length(), TENURED);
+ entries, entries->length(), AllocationType::kOld);
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
@@ -665,7 +641,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
Handle<WasmExportedFunctionData> function_data =
Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
+ WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
function_data->set_wrapper_code(*new_entry_code);
function_data->set_instance(debug_info->wasm_instance());
function_data->set_jump_table_offset(-1);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 8a21252ddf..48bd96f254 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -23,10 +23,17 @@ namespace internal {
namespace wasm {
namespace {
+// A task to log a set of {WasmCode} objects in an isolate. It does not own any
+// data itself, since it is owned by the platform, so lifetime is not really
+// bound to the wasm engine.
class LogCodesTask : public Task {
public:
- LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate)
- : mutex_(mutex), task_slot_(task_slot), isolate_(isolate) {
+ LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate,
+ WasmEngine* engine)
+ : mutex_(mutex),
+ task_slot_(task_slot),
+ isolate_(isolate),
+ engine_(engine) {
DCHECK_NOT_NULL(task_slot);
DCHECK_NOT_NULL(isolate);
}
@@ -37,17 +44,10 @@ class LogCodesTask : public Task {
if (!cancelled()) DeregisterTask();
}
- // Hold the {mutex_} when calling this method.
- void AddCode(WasmCode* code) { code_to_log_.push_back(code); }
-
void Run() override {
if (cancelled()) return;
DeregisterTask();
- // If by now we should not log code any more, do not log it.
- if (!WasmCode::ShouldBeLogged(isolate_)) return;
- for (WasmCode* code : code_to_log_) {
- code->LogCode(isolate_);
- }
+ engine_->LogOutstandingCodesForIsolate(isolate_);
}
void Cancel() {
@@ -78,10 +78,41 @@ class LogCodesTask : public Task {
// cleared by this task before execution or on task destruction.
LogCodesTask** task_slot_;
Isolate* isolate_;
- std::vector<WasmCode*> code_to_log_;
+ WasmEngine* const engine_;
};
+
+class WasmGCForegroundTask : public Task {
+ public:
+ explicit WasmGCForegroundTask(Isolate* isolate) : isolate_(isolate) {
+ DCHECK_NOT_NULL(isolate);
+ }
+
+ void Run() final {
+ if (isolate_ == nullptr) return; // cancelled.
+ WasmEngine* engine = isolate_->wasm_engine();
+ // If the foreground task is executing, there is no wasm code active. Just
+ // report an empty set of live wasm code.
+ engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
+ }
+
+ void Cancel() { isolate_ = nullptr; }
+
+ private:
+ Isolate* isolate_;
+};
+
} // namespace
+struct WasmEngine::CurrentGCInfo {
+ // Set of isolates that did not scan their stack yet for used WasmCode, and
+ // their scheduled foreground task.
+ std::unordered_map<Isolate*, WasmGCForegroundTask*> outstanding_isolates;
+
+ // Set of dead code. Filled with all potentially dead code on initialization.
+ // Code that is still in-use is removed by the individual isolates.
+ std::unordered_set<WasmCode*> dead_code;
+};
+
struct WasmEngine::IsolateInfo {
explicit IsolateInfo(Isolate* isolate)
: log_codes(WasmCode::ShouldBeLogged(isolate)) {
@@ -90,6 +121,14 @@ struct WasmEngine::IsolateInfo {
foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
}
+#ifdef DEBUG
+ ~IsolateInfo() {
+ // Before destructing, the {WasmEngine} must have cleared outstanding code
+ // to log.
+ DCHECK_EQ(0, code_to_log.size());
+ }
+#endif
+
// All native modules that are being used by this Isolate (currently only
// grows, never shrinks).
std::set<NativeModule*> native_modules;
@@ -100,10 +139,23 @@ struct WasmEngine::IsolateInfo {
// The currently scheduled LogCodesTask.
LogCodesTask* log_codes_task = nullptr;
+ // The vector of code objects that still need to be logged in this isolate.
+ std::vector<WasmCode*> code_to_log;
+
// The foreground task runner of the isolate (can be called from background).
std::shared_ptr<v8::TaskRunner> foreground_task_runner;
};
+struct WasmEngine::NativeModuleInfo {
+ // Set of isolates using this NativeModule.
+ std::unordered_set<Isolate*> isolates;
+
+ // Set of potentially dead code. The ref-count of these code objects was
+ // incremented for each Isolate that might still execute the code, and is
+ // decremented on {RemoveIsolate} or on a GC.
+ std::unordered_set<WasmCode*> potentially_dead_code;
+};
+
WasmEngine::WasmEngine()
: code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
@@ -115,7 +167,7 @@ WasmEngine::~WasmEngine() {
// All Isolates have been deregistered.
DCHECK(isolates_.empty());
// All NativeModules did die.
- DCHECK(isolates_per_native_module_.empty());
+ DCHECK(native_modules_.empty());
}
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
@@ -135,12 +187,17 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
ModuleResult result =
DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false,
kAsmJsOrigin, isolate->counters(), allocator());
- CHECK(!result.failed());
+ if (result.failed()) {
+ // This happens once in a while when we have missed some limit check
+ // in the asm parser. Output an error message to help diagnose, but crash.
+ std::cout << result.error().message();
+ UNREACHABLE();
+ }
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToNativeModule}.
Handle<FixedArray> export_wrappers;
- std::unique_ptr<NativeModule> native_module =
+ std::shared_ptr<NativeModule> native_module =
CompileToNativeModule(isolate, kAsmjsWasmFeatures, thrower,
std::move(result).value(), bytes, &export_wrappers);
if (!native_module) return {};
@@ -188,7 +245,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToModuleObject}.
Handle<FixedArray> export_wrappers;
- std::unique_ptr<NativeModule> native_module =
+ std::shared_ptr<NativeModule> native_module =
CompileToNativeModule(isolate, enabled, thrower,
std::move(result).value(), bytes, &export_wrappers);
if (!native_module) return {};
@@ -250,7 +307,6 @@ void WasmEngine::AsyncInstantiate(
// We have to move the exception to the promise chain.
Handle<Object> exception(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
- DCHECK(*isolate->external_caught_exception_address());
*isolate->external_caught_exception_address() = false;
resolver->OnInstantiationFailed(exception);
thrower.Reset();
@@ -346,8 +402,8 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
base::MutexGuard lock(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
isolates_[isolate]->native_modules.insert(native_module);
- DCHECK_EQ(1, isolates_per_native_module_.count(native_module));
- isolates_per_native_module_[native_module].insert(isolate);
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ native_modules_[native_module]->isolates.insert(isolate);
}
return module_object;
}
@@ -438,14 +494,12 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
auto callback = [](v8::Isolate* v8_isolate, v8::GCType type,
v8::GCCallbackFlags flags, void* data) {
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ Counters* counters = isolate->counters();
WasmEngine* engine = isolate->wasm_engine();
base::MutexGuard lock(&engine->mutex_);
DCHECK_EQ(1, engine->isolates_.count(isolate));
- for (NativeModule* native_module :
- engine->isolates_[isolate]->native_modules) {
- int code_size =
- static_cast<int>(native_module->committed_code_space() / MB);
- isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size);
+ for (auto* native_module : engine->isolates_[isolate]->native_modules) {
+ native_module->SampleCodeSize(counters, NativeModule::kSampling);
}
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
@@ -456,29 +510,48 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
auto it = isolates_.find(isolate);
DCHECK_NE(isolates_.end(), it);
- for (NativeModule* native_module : it->second->native_modules) {
- DCHECK_EQ(1, isolates_per_native_module_[native_module].count(isolate));
- isolates_per_native_module_[native_module].erase(isolate);
- }
- if (auto* task = it->second->log_codes_task) task->Cancel();
+ std::unique_ptr<IsolateInfo> info = std::move(it->second);
isolates_.erase(it);
+ for (NativeModule* native_module : info->native_modules) {
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
+ auto* info = native_modules_[native_module].get();
+ info->isolates.erase(isolate);
+ if (current_gc_info_) {
+ auto it = current_gc_info_->outstanding_isolates.find(isolate);
+ if (it != current_gc_info_->outstanding_isolates.end()) {
+ if (auto* gc_task = it->second) gc_task->Cancel();
+ current_gc_info_->outstanding_isolates.erase(it);
+ }
+ for (WasmCode* code : info->potentially_dead_code) {
+ current_gc_info_->dead_code.erase(code);
+ }
+ }
+ }
+ if (auto* task = info->log_codes_task) task->Cancel();
+ if (!info->code_to_log.empty()) {
+ WasmCode::DecrementRefCount(VectorOf(info->code_to_log));
+ info->code_to_log.clear();
+ }
}
void WasmEngine::LogCode(WasmCode* code) {
base::MutexGuard guard(&mutex_);
NativeModule* native_module = code->native_module();
- DCHECK_EQ(1, isolates_per_native_module_.count(native_module));
- for (Isolate* isolate : isolates_per_native_module_[native_module]) {
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ for (Isolate* isolate : native_modules_[native_module]->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
if (info->log_codes == false) continue;
if (info->log_codes_task == nullptr) {
auto new_task = base::make_unique<LogCodesTask>(
- &mutex_, &info->log_codes_task, isolate);
+ &mutex_, &info->log_codes_task, isolate, this);
info->log_codes_task = new_task.get();
info->foreground_task_runner->PostTask(std::move(new_task));
+ isolate->stack_guard()->RequestLogWasmCode();
}
- info->log_codes_task->AddCode(code);
+ info->code_to_log.push_back(code);
+ code->IncRef();
}
}
@@ -489,15 +562,36 @@ void WasmEngine::EnableCodeLogging(Isolate* isolate) {
it->second->log_codes = true;
}
-std::unique_ptr<NativeModule> WasmEngine::NewNativeModule(
+void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
+ // If by now we should not log code any more, do not log it.
+ if (!WasmCode::ShouldBeLogged(isolate)) return;
+
+ // Under the mutex, get the vector of wasm code to log. Then log and decrement
+ // the ref count without holding the mutex.
+ std::vector<WasmCode*> code_to_log;
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ code_to_log.swap(isolates_[isolate]->code_to_log);
+ }
+ if (code_to_log.empty()) return;
+ for (WasmCode* code : code_to_log) {
+ code->LogCode(isolate);
+ }
+ WasmCode::DecrementRefCount(VectorOf(code_to_log));
+}
+
+std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
bool can_request_more, std::shared_ptr<const WasmModule> module) {
- std::unique_ptr<NativeModule> native_module =
+ std::shared_ptr<NativeModule> native_module =
code_manager_.NewNativeModule(this, isolate, enabled, code_size_estimate,
can_request_more, std::move(module));
base::MutexGuard lock(&mutex_);
- isolates_per_native_module_[native_module.get()].insert(isolate);
- DCHECK_EQ(1, isolates_.count(isolate));
+ auto pair = native_modules_.insert(std::make_pair(
+ native_module.get(), base::make_unique<NativeModuleInfo>()));
+ DCHECK(pair.second); // inserted new entry.
+ pair.first->second.get()->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
return native_module;
}
@@ -505,19 +599,137 @@ std::unique_ptr<NativeModule> WasmEngine::NewNativeModule(
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
{
base::MutexGuard guard(&mutex_);
- auto it = isolates_per_native_module_.find(native_module);
- DCHECK_NE(isolates_per_native_module_.end(), it);
- for (Isolate* isolate : it->second) {
+ auto it = native_modules_.find(native_module);
+ DCHECK_NE(native_modules_.end(), it);
+ for (Isolate* isolate : it->second->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
- DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module));
- isolates_[isolate]->native_modules.erase(native_module);
+ IsolateInfo* info = isolates_[isolate].get();
+ DCHECK_EQ(1, info->native_modules.count(native_module));
+ info->native_modules.erase(native_module);
+ // If there are {WasmCode} objects of the deleted {NativeModule}
+ // outstanding to be logged in this isolate, remove them. Decrementing the
+ // ref count is not needed, since the {NativeModule} dies anyway.
+ size_t remaining = info->code_to_log.size();
+ if (remaining > 0) {
+ for (size_t i = 0; i < remaining; ++i) {
+ while (i < remaining &&
+ info->code_to_log[i]->native_module() == native_module) {
+ // Move the last remaining item to this slot (this can be the same
+ // as {i}, which is OK).
+ info->code_to_log[i] = info->code_to_log[--remaining];
+ }
+ }
+ info->code_to_log.resize(remaining);
+ }
}
- isolates_per_native_module_.erase(it);
+ native_modules_.erase(it);
}
code_manager_.FreeNativeModule(native_module);
}
namespace {
+class SampleTopTierCodeSizeTask : public CancelableTask {
+ public:
+ SampleTopTierCodeSizeTask(Isolate* isolate,
+ std::weak_ptr<NativeModule> native_module)
+ : CancelableTask(isolate),
+ isolate_(isolate),
+ native_module_(std::move(native_module)) {}
+
+ void RunInternal() override {
+ if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
+ native_module->SampleCodeSize(isolate_->counters(),
+ NativeModule::kAfterTopTier);
+ }
+ }
+
+ private:
+ Isolate* const isolate_;
+ const std::weak_ptr<NativeModule> native_module_;
+};
+} // namespace
+
+void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
+ const std::shared_ptr<NativeModule>& native_module) {
+ base::MutexGuard lock(&mutex_);
+ DCHECK_EQ(1, native_modules_.count(native_module.get()));
+ for (Isolate* isolate : native_modules_[native_module.get()]->isolates) {
+ DCHECK_EQ(1, isolates_.count(isolate));
+ IsolateInfo* info = isolates_[isolate].get();
+ info->foreground_task_runner->PostTask(
+ base::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
+ }
+}
+
+void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
+ Vector<WasmCode*> live_code) {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_NOT_NULL(current_gc_info_);
+ auto outstanding_isolate_it =
+ current_gc_info_->outstanding_isolates.find(isolate);
+ DCHECK_NE(current_gc_info_->outstanding_isolates.end(),
+ outstanding_isolate_it);
+ auto* fg_task = outstanding_isolate_it->second;
+ if (fg_task) fg_task->Cancel();
+ current_gc_info_->outstanding_isolates.erase(outstanding_isolate_it);
+ for (WasmCode* code : live_code) current_gc_info_->dead_code.erase(code);
+
+ if (current_gc_info_->outstanding_isolates.empty()) {
+ std::unordered_map<NativeModule*, std::vector<WasmCode*>>
+ dead_code_per_native_module;
+ for (WasmCode* code : current_gc_info_->dead_code) {
+ dead_code_per_native_module[code->native_module()].push_back(code);
+ }
+ for (auto& entry : dead_code_per_native_module) {
+ entry.first->FreeCode(VectorOf(entry.second));
+ }
+ current_gc_info_.reset();
+ }
+}
+
+bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
+ base::MutexGuard guard(&mutex_);
+ auto it = native_modules_.find(code->native_module());
+ DCHECK_NE(native_modules_.end(), it);
+ auto added = it->second->potentially_dead_code.insert(code);
+ if (!added.second) return false; // An entry already existed.
+ new_potentially_dead_code_size_ += code->instructions().size();
+ // Trigger a GC if 1MiB plus 10% of committed code are potentially dead.
+ size_t dead_code_limit = 1 * MB + code_manager_.committed_code_space() / 10;
+ if (FLAG_wasm_code_gc && new_potentially_dead_code_size_ > dead_code_limit &&
+ !current_gc_info_) {
+ TriggerGC();
+ }
+ return true;
+}
+
+void WasmEngine::TriggerGC() {
+ DCHECK_NULL(current_gc_info_);
+ DCHECK(FLAG_wasm_code_gc);
+ current_gc_info_.reset(new CurrentGCInfo());
+ // Add all potentially dead code to this GC, and trigger a GC task in each
+ // isolate.
+ // TODO(clemensh): Also trigger a stack check interrupt.
+ for (auto& entry : native_modules_) {
+ NativeModuleInfo* info = entry.second.get();
+ if (info->potentially_dead_code.empty()) continue;
+ for (auto* isolate : native_modules_[entry.first]->isolates) {
+ auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
+ if (!gc_task) {
+ auto new_task = base::make_unique<WasmGCForegroundTask>(isolate);
+ gc_task = new_task.get();
+ DCHECK_EQ(1, isolates_.count(isolate));
+ isolates_[isolate]->foreground_task_runner->PostTask(
+ std::move(new_task));
+ }
+ }
+ for (WasmCode* code : info->potentially_dead_code) {
+ current_gc_info_->dead_code.insert(code);
+ }
+ }
+}
+
+namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
GetSharedWasmEngine)
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 01c353e3c9..c990005090 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -163,19 +163,38 @@ class V8_EXPORT_PRIVATE WasmEngine {
// {AddIsolate}.
void EnableCodeLogging(Isolate*);
+ // This is called from the foreground thread of the Isolate to log all
+ // outstanding code objects (added via {LogCode}).
+ void LogOutstandingCodesForIsolate(Isolate*);
+
// Create a new NativeModule. The caller is responsible for its
// lifetime. The native module will be given some memory for code,
// which will be page size aligned. The size of the initial memory
// is determined with a heuristic based on the total size of wasm
// code. The native module may later request more memory.
// TODO(titzer): isolate is only required here for CompilationState.
- std::unique_ptr<NativeModule> NewNativeModule(
+ std::shared_ptr<NativeModule> NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled_features,
size_t code_size_estimate, bool can_request_more,
std::shared_ptr<const WasmModule> module);
void FreeNativeModule(NativeModule*);
+ // Sample the code size of the given {NativeModule} in all isolates that have
+ // access to it. Call this after top-tier compilation finished.
+ // This will spawn foreground tasks that do *not* keep the NativeModule alive.
+ void SampleTopTierCodeSizeInAllIsolates(const std::shared_ptr<NativeModule>&);
+
+ // Called by each Isolate to report its live code for a GC cycle.
+ void ReportLiveCodeForGC(Isolate*, Vector<WasmCode*> live_code);
+
+ // Add potentially dead code. The occurrence in the set of potentially dead
+ // code counts as a reference, and is decremented on the next GC.
+ // Returns {true} if the code was added to the set of potentially dead code,
+ // {false} if an entry already exists. The ref count is *unchanged* in any
+ // case.
+ V8_WARN_UNUSED_RESULT bool AddPotentiallyDeadCode(WasmCode*);
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -185,7 +204,9 @@ class V8_EXPORT_PRIVATE WasmEngine {
static std::shared_ptr<WasmEngine> GetWasmEngine();
private:
+ struct CurrentGCInfo;
struct IsolateInfo;
+ struct NativeModuleInfo;
AsyncCompileJob* CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
@@ -193,6 +214,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
Handle<Context> context,
std::shared_ptr<CompilationResultResolver> resolver);
+ void TriggerGC();
+
WasmMemoryTracker memory_tracker_;
WasmCodeManager code_manager_;
AccountingAllocator allocator_;
@@ -219,10 +242,17 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Set of isolates which use this WasmEngine.
std::unordered_map<Isolate*, std::unique_ptr<IsolateInfo>> isolates_;
- // Maps each NativeModule to the set of Isolates that have access to that
- // NativeModule. The isolate sets currently only grow, they never shrink.
- std::unordered_map<NativeModule*, std::unordered_set<Isolate*>>
- isolates_per_native_module_;
+ // Set of native modules managed by this engine.
+ std::unordered_map<NativeModule*, std::unique_ptr<NativeModuleInfo>>
+ native_modules_;
+
+ // Size of code that became dead since the last GC. If this exceeds a certain
+ // threshold, a new GC is triggered.
+ size_t new_potentially_dead_code_size_ = 0;
+
+ // If an engine-wide GC is currently running, this pointer stores information
+ // about that.
+ std::unique_ptr<CurrentGCInfo> current_gc_info_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 0dcd3edf70..83f060cb9a 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -250,11 +250,36 @@ void float64_pow_wrapper(Address data) {
}
void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
- MemMove(reinterpret_cast<void*>(dst), reinterpret_cast<void*>(src), size);
+ // Use explicit forward and backward copy to match the required semantics for
+ // the memory.copy instruction. It is assumed that the caller of this
+ // function has already performed bounds checks, so {src + size} and
+ // {dst + size} should not overflow.
+ DCHECK(src + size >= src && dst + size >= dst);
+ uint8_t* dst8 = reinterpret_cast<uint8_t*>(dst);
+ uint8_t* src8 = reinterpret_cast<uint8_t*>(src);
+ if (src < dst && src + size > dst && dst + size > src) {
+ dst8 += size - 1;
+ src8 += size - 1;
+ for (; size > 0; size--) {
+ *dst8-- = *src8--;
+ }
+ } else {
+ for (; size > 0; size--) {
+ *dst8++ = *src8++;
+ }
+ }
}
void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size) {
- memset(reinterpret_cast<void*>(dst), value, size);
+ // Use an explicit forward copy to match the required semantics for the
+ // memory.fill instruction. It is assumed that the caller of this function
+ // has already performed bounds checks, so {dst + size} should not overflow.
+ DCHECK(dst + size >= dst);
+ uint8_t* dst8 = reinterpret_cast<uint8_t*>(dst);
+ uint8_t value8 = static_cast<uint8_t>(value);
+ for (; size > 0; size--) {
+ *dst8++ = value8;
+ }
}
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 64a6653277..1db608bf99 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -13,69 +13,70 @@ namespace v8 {
namespace internal {
namespace wasm {
-void f32_trunc_wrapper(Address data);
+V8_EXPORT_PRIVATE void f32_trunc_wrapper(Address data);
-void f32_floor_wrapper(Address data);
+V8_EXPORT_PRIVATE void f32_floor_wrapper(Address data);
-void f32_ceil_wrapper(Address data);
+V8_EXPORT_PRIVATE void f32_ceil_wrapper(Address data);
-void f32_nearest_int_wrapper(Address data);
+V8_EXPORT_PRIVATE void f32_nearest_int_wrapper(Address data);
-void f64_trunc_wrapper(Address data);
+V8_EXPORT_PRIVATE void f64_trunc_wrapper(Address data);
-void f64_floor_wrapper(Address data);
+V8_EXPORT_PRIVATE void f64_floor_wrapper(Address data);
-void f64_ceil_wrapper(Address data);
+V8_EXPORT_PRIVATE void f64_ceil_wrapper(Address data);
-void f64_nearest_int_wrapper(Address data);
+V8_EXPORT_PRIVATE void f64_nearest_int_wrapper(Address data);
-void int64_to_float32_wrapper(Address data);
+V8_EXPORT_PRIVATE void int64_to_float32_wrapper(Address data);
-void uint64_to_float32_wrapper(Address data);
+V8_EXPORT_PRIVATE void uint64_to_float32_wrapper(Address data);
-void int64_to_float64_wrapper(Address data);
+V8_EXPORT_PRIVATE void int64_to_float64_wrapper(Address data);
-void uint64_to_float64_wrapper(Address data);
+V8_EXPORT_PRIVATE void uint64_to_float64_wrapper(Address data);
-int32_t float32_to_int64_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t float32_to_int64_wrapper(Address data);
-int32_t float32_to_uint64_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t float32_to_uint64_wrapper(Address data);
-int32_t float64_to_int64_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t float64_to_int64_wrapper(Address data);
-int32_t float64_to_uint64_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t float64_to_uint64_wrapper(Address data);
-int32_t int64_div_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t int64_div_wrapper(Address data);
-int32_t int64_mod_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t int64_mod_wrapper(Address data);
-int32_t uint64_div_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t uint64_div_wrapper(Address data);
-int32_t uint64_mod_wrapper(Address data);
+V8_EXPORT_PRIVATE int32_t uint64_mod_wrapper(Address data);
-uint32_t word32_ctz_wrapper(Address data);
+V8_EXPORT_PRIVATE uint32_t word32_ctz_wrapper(Address data);
-uint32_t word64_ctz_wrapper(Address data);
+V8_EXPORT_PRIVATE uint32_t word64_ctz_wrapper(Address data);
-uint32_t word32_popcnt_wrapper(Address data);
+V8_EXPORT_PRIVATE uint32_t word32_popcnt_wrapper(Address data);
-uint32_t word64_popcnt_wrapper(Address data);
+V8_EXPORT_PRIVATE uint32_t word64_popcnt_wrapper(Address data);
-uint32_t word32_rol_wrapper(Address data);
+V8_EXPORT_PRIVATE uint32_t word32_rol_wrapper(Address data);
-uint32_t word32_ror_wrapper(Address data);
+V8_EXPORT_PRIVATE uint32_t word32_ror_wrapper(Address data);
-void float64_pow_wrapper(Address data);
+V8_EXPORT_PRIVATE void float64_pow_wrapper(Address data);
void memory_copy_wrapper(Address dst, Address src, uint32_t size);
void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size);
-typedef void (*WasmTrapCallbackForTesting)();
+using WasmTrapCallbackForTesting = void (*)();
-void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
+V8_EXPORT_PRIVATE void set_trap_callback_for_testing(
+ WasmTrapCallbackForTesting callback);
-void call_trap_callback_for_testing();
+V8_EXPORT_PRIVATE void call_trap_callback_for_testing();
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index d32ac3a788..77d46fdc0d 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -6,25 +6,28 @@
#define V8_WASM_WASM_FEATURE_FLAGS_H_
// The SEPARATOR argument allows generating proper comma-separated lists.
-#define FOREACH_WASM_FEATURE_FLAG(V, SEPARATOR) \
- V(mv, "multi-value support", false) \
- SEPARATOR \
- V(eh, "exception handling opcodes", false) \
- SEPARATOR \
- V(se, "sign extension opcodes", true) \
- SEPARATOR \
- V(sat_f2i_conversions, "saturating float conversion opcodes", false) \
- SEPARATOR \
- V(threads, "thread opcodes", false) \
- SEPARATOR \
- V(simd, "SIMD opcodes", false) \
- SEPARATOR \
- V(anyref, "anyref opcodes", false) \
- SEPARATOR \
- V(bigint, "JS BigInt support", false) \
- SEPARATOR \
- V(bulk_memory, "bulk memory opcodes", false) \
- SEPARATOR \
- V(return_call, "return call opcodes", false)
-
+#define FOREACH_WASM_FEATURE_FLAG(V, SEPARATOR) \
+ V(mv, "multi-value support", false) \
+ SEPARATOR \
+ V(eh, "exception handling opcodes", false) \
+ SEPARATOR \
+ V(se, "sign extension opcodes", true) \
+ SEPARATOR \
+ V(sat_f2i_conversions, "saturating float conversion opcodes", true) \
+ SEPARATOR \
+ V(threads, "thread opcodes", false) \
+ SEPARATOR \
+ V(simd, "SIMD opcodes", false) \
+ SEPARATOR \
+ V(anyref, "anyref opcodes", false) \
+ SEPARATOR \
+ V(bigint, "JS BigInt support", false) \
+ SEPARATOR \
+ V(bulk_memory, "bulk memory opcodes", true) \
+ SEPARATOR \
+ V(return_call, "return call opcodes", false) \
+ SEPARATOR \
+ V(type_reflection, "wasm type reflection in JS", false) \
+ SEPARATOR \
+ V(compilation_hints, "compilation hints section", false)
#endif // V8_WASM_WASM_FEATURE_FLAGS_H_
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h b/deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h
deleted file mode 100644
index 290df24898..0000000000
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_WASM_IMPORT_WRAPPER_CACHE_INL_H_
-#define V8_WASM_WASM_IMPORT_WRAPPER_CACHE_INL_H_
-
-#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
-#include "src/wasm/value-type.h"
-#include "src/wasm/wasm-code-manager.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Implements a cache for import wrappers.
-class WasmImportWrapperCache {
- public:
- WasmCode* GetOrCompile(WasmEngine* wasm_engine, Counters* counters,
- compiler::WasmImportCallKind kind, FunctionSig* sig) {
- base::MutexGuard lock(&mutex_);
- CacheKey key(static_cast<uint8_t>(kind), *sig);
- WasmCode*& cached = entry_map_[key];
- if (cached == nullptr) {
- // TODO(wasm): no need to hold the lock while compiling an import wrapper.
- bool source_positions = native_module_->module()->origin == kAsmJsOrigin;
- cached = compiler::CompileWasmImportCallWrapper(
- wasm_engine, native_module_, kind, sig, source_positions);
- counters->wasm_generated_code_size()->Increment(
- cached->instructions().length());
- counters->wasm_reloc_size()->Increment(cached->reloc_info().length());
- }
- return cached;
- }
-
- private:
- friend class NativeModule;
- mutable base::Mutex mutex_;
- NativeModule* native_module_;
- using CacheKey = std::pair<uint8_t, FunctionSig>;
- std::unordered_map<CacheKey, WasmCode*, base::hash<CacheKey>> entry_map_;
-
- explicit WasmImportWrapperCache(NativeModule* native_module)
- : native_module_(native_module) {}
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_WASM_IMPORT_WRAPPER_CACHE_INL_H_
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
new file mode 100644
index 0000000000..caa9eb7904
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-import-wrapper-cache.h"
+
+#include <vector>
+
+#include "src/counters.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+WasmImportWrapperCache::~WasmImportWrapperCache() {
+ std::vector<WasmCode*> ptrs;
+ ptrs.reserve(entry_map_.size());
+ for (auto& e : entry_map_) ptrs.push_back(e.second);
+ WasmCode::DecrementRefCount(VectorOf(ptrs));
+}
+
+WasmCode* WasmImportWrapperCache::GetOrCompile(
+ WasmEngine* wasm_engine, Counters* counters,
+ compiler::WasmImportCallKind kind, FunctionSig* sig) {
+ base::MutexGuard lock(&mutex_);
+ CacheKey key(static_cast<uint8_t>(kind), *sig);
+ WasmCode*& cached = entry_map_[key];
+ if (cached == nullptr) {
+ // TODO(wasm): no need to hold the lock while compiling an import wrapper.
+ bool source_positions = native_module_->module()->origin == kAsmJsOrigin;
+ // Keep the {WasmCode} alive until we explicitly call {IncRef}.
+ WasmCodeRefScope code_ref_scope;
+ cached = compiler::CompileWasmImportCallWrapper(
+ wasm_engine, native_module_, kind, sig, source_positions);
+ cached->IncRef();
+ counters->wasm_generated_code_size()->Increment(
+ cached->instructions().length());
+ counters->wasm_reloc_size()->Increment(cached->reloc_info().length());
+ }
+ return cached;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
new file mode 100644
index 0000000000..91fe1c7b23
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_IMPORT_WRAPPER_CACHE_H_
+#define V8_WASM_WASM_IMPORT_WRAPPER_CACHE_H_
+
+#include "src/base/platform/mutex.h"
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+class Counters;
+
+namespace wasm {
+
+class WasmCode;
+class WasmEngine;
+
+using FunctionSig = Signature<ValueType>;
+
+// Implements a cache for import wrappers.
+class WasmImportWrapperCache {
+ public:
+ ~WasmImportWrapperCache();
+
+ V8_EXPORT_PRIVATE WasmCode* GetOrCompile(WasmEngine* wasm_engine,
+ Counters* counters,
+ compiler::WasmImportCallKind kind,
+ FunctionSig* sig);
+
+ private:
+ friend class NativeModule;
+ using CacheKey = std::pair<uint8_t, FunctionSig>;
+
+ mutable base::Mutex mutex_;
+ NativeModule* native_module_;
+ std::unordered_map<CacheKey, WasmCode*, base::hash<CacheKey>> entry_map_;
+
+ explicit WasmImportWrapperCache(NativeModule* native_module)
+ : native_module_(native_module) {}
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_IMPORT_WRAPPER_CACHE_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index aca754095a..9118719def 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -803,6 +803,9 @@ class SideTable : public ZoneObject {
DCHECK_GE(control_stack.size() - 1, exception_stack.back());
const Control* c = &control_stack[exception_stack.back()];
if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
+ if (exceptional_stack_height + kCatchInArity > max_stack_height_) {
+ max_stack_height_ = exceptional_stack_height + kCatchInArity;
+ }
TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(), OpcodeName(opcode),
static_cast<uint32_t>(c->pc - code->start));
}
@@ -893,6 +896,19 @@ class SideTable : public ZoneObject {
stack_height = c->end_label->target_stack_height + kCatchInArity;
break;
}
+ case kExprBrOnExn: {
+ BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ uint32_t depth = imm.depth.depth; // Extracted for convenience.
+ imm.index.exception = &module->exceptions[imm.index.index];
+ DCHECK_EQ(0, imm.index.exception->sig->return_count());
+ size_t params = imm.index.exception->sig->parameter_count();
+ // Taken branches pop the exception and push the encoded values.
+ uint32_t height = stack_height - 1 + static_cast<uint32_t>(params);
+ TRACE("control @%u: BrOnExn[depth=%u]\n", i.pc_offset(), depth);
+ Control* c = &control_stack[control_stack.size() - depth - 1];
+ if (!unreachable) c->end_label->Ref(i.pc(), height);
+ break;
+ }
case kExprEnd: {
Control* c = &control_stack.back();
TRACE("control @%u: End\n", i.pc_offset());
@@ -966,9 +982,6 @@ class CodeMap {
Zone* zone_;
const WasmModule* module_;
ZoneVector<InterpreterCode> interpreter_code_;
- // TODO(wasm): Remove this testing wart. It is needed because interpreter
- // entry stubs are not generated in testing the interpreter in cctests.
- bool call_indirect_through_module_ = false;
public:
CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
@@ -986,12 +999,6 @@ class CodeMap {
}
}
- bool call_indirect_through_module() { return call_indirect_through_module_; }
-
- void set_call_indirect_through_module(bool val) {
- call_indirect_through_module_ = val;
- }
-
const WasmModule* module() const { return module_; }
InterpreterCode* GetCode(const WasmFunction* function) {
@@ -1005,36 +1012,6 @@ class CodeMap {
return Preprocess(&interpreter_code_[function_index]);
}
- InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
- uint32_t saved_index;
- USE(saved_index);
- if (table_index >= module_->tables.size()) return nullptr;
- // Mask table index for SSCA mitigation.
- saved_index = table_index;
- table_index &= static_cast<int32_t>((table_index - module_->tables.size()) &
- ~static_cast<int32_t>(table_index)) >>
- 31;
- DCHECK_EQ(table_index, saved_index);
- const WasmTable* table = &module_->tables[table_index];
- if (entry_index >= table->values.size()) return nullptr;
- // Mask entry_index for SSCA mitigation.
- saved_index = entry_index;
- entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
- ~static_cast<int32_t>(entry_index)) >>
- 31;
- DCHECK_EQ(entry_index, saved_index);
- uint32_t index = table->values[entry_index];
- if (index >= interpreter_code_.size()) return nullptr;
- // Mask index for SSCA mitigation.
- saved_index = index;
- index &= static_cast<int32_t>((index - interpreter_code_.size()) &
- ~static_cast<int32_t>(index)) >>
- 31;
- DCHECK_EQ(index, saved_index);
-
- return GetCode(index);
- }
-
InterpreterCode* Preprocess(InterpreterCode* code) {
DCHECK_EQ(code->function->imported, code->start == nullptr);
if (!code->side_table && code->start) {
@@ -1147,9 +1124,12 @@ class ThreadImpl {
public:
ThreadImpl(Zone* zone, CodeMap* codemap,
- Handle<WasmInstanceObject> instance_object)
+ Handle<WasmInstanceObject> instance_object,
+ Handle<Cell> reference_stack_cell)
: codemap_(codemap),
+ isolate_(instance_object->GetIsolate()),
instance_object_(instance_object),
+ reference_stack_cell_(reference_stack_cell),
frames_(zone),
activations_(zone) {}
@@ -1214,12 +1194,12 @@ class ThreadImpl {
WasmValue GetStackValue(sp_t index) {
DCHECK_GT(StackHeight(), index);
- return stack_[index];
+ return stack_[index].ExtractValue(this, index);
}
void SetStackValue(sp_t index, WasmValue value) {
DCHECK_GT(StackHeight(), index);
- stack_[index] = value;
+ stack_[index] = StackValue(value, this, index);
}
TrapReason GetTrapReason() { return trap_reason_; }
@@ -1234,6 +1214,8 @@ class ThreadImpl {
void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
+ Handle<Cell> reference_stack_cell() const { return reference_stack_cell_; }
+
uint32_t NumActivations() {
return static_cast<uint32_t>(activations_.size());
}
@@ -1292,10 +1274,7 @@ class ThreadImpl {
InterpreterCode* code = frame.code;
if (code->side_table->HasEntryAt(frame.pc)) {
TRACE("----- HANDLE -----\n");
- // TODO(mstarzinger): Push a reference to the pending exception instead
- // of a bogus {int32_t(0)} value here once the interpreter supports it.
- USE(isolate->pending_exception());
- Push(WasmValue(int32_t{0}));
+ Push(WasmValue(handle(isolate->pending_exception(), isolate)));
isolate->clear_pending_exception();
frame.pc += JumpToHandlerDelta(code, frame.pc);
TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
@@ -1326,13 +1305,53 @@ class ThreadImpl {
sp_t llimit() { return plimit() + code->locals.type_list.size(); }
};
+ // Safety wrapper for values on the operand stack represented as {WasmValue}.
+ // Most values are stored directly on the stack, only reference values are
+ // kept in a separate on-heap reference stack to make the GC trace them.
+ // TODO(mstarzinger): Optimize simple stack operations (like "get_local",
+ // "set_local", and "tee_local") so that they don't require a handle scope.
+ // TODO(mstarzinger): Ensure unused slots on the reference stack are cleared
+ // so that they don't keep alive old/stale references unnecessarily long.
+ // TODO(mstarzinger): Consider optimizing activations that use no reference
+ // values to avoid allocating the reference stack entirely.
+ class StackValue {
+ public:
+ StackValue() = default; // Only needed for resizing the stack.
+ StackValue(WasmValue v, ThreadImpl* thread, sp_t index) : value_(v) {
+ if (IsReferenceValue()) {
+ value_ = WasmValue(Handle<Object>::null());
+ int ref_index = static_cast<int>(index);
+ thread->reference_stack()->set(ref_index, *v.to_anyref());
+ }
+ }
+
+ WasmValue ExtractValue(ThreadImpl* thread, sp_t index) {
+ if (!IsReferenceValue()) return value_;
+ DCHECK(value_.to_anyref().is_null());
+ int ref_index = static_cast<int>(index);
+ Isolate* isolate = thread->isolate_;
+ Handle<Object> ref(thread->reference_stack()->get(ref_index), isolate);
+ return WasmValue(ref);
+ }
+
+ bool IsReferenceValue() const { return value_.type() == kWasmAnyRef; }
+
+ private:
+ WasmValue value_;
+ };
+
friend class InterpretedFrameImpl;
CodeMap* codemap_;
+ Isolate* isolate_;
Handle<WasmInstanceObject> instance_object_;
- std::unique_ptr<WasmValue[]> stack_;
- WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
- WasmValue* sp_ = nullptr; // Current stack pointer.
+ std::unique_ptr<StackValue[]> stack_;
+ StackValue* stack_limit_ = nullptr; // End of allocated stack space.
+ StackValue* sp_ = nullptr; // Current stack pointer.
+ // The reference stack is pointed to by a {Cell} to be able to replace the
+ // underlying {FixedArray} when growing the stack. This avoids having to
+ // recreate or update the global handle keeping this object alive.
+ Handle<Cell> reference_stack_cell_; // References are on an on-heap stack.
ZoneVector<Frame> frames_;
WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
pc_t break_pc_ = kInvalidPc;
@@ -1346,6 +1365,9 @@ class ThreadImpl {
CodeMap* codemap() const { return codemap_; }
const WasmModule* module() const { return codemap_->module(); }
+ FixedArray reference_stack() const {
+ return FixedArray::cast(reference_stack_cell_->value());
+ }
void DoTrap(TrapReason trap, pc_t pc) {
TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
@@ -1388,6 +1410,12 @@ class ThreadImpl {
break;
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
+ val = WasmValue(isolate_->factory()->null_value());
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -1445,7 +1473,8 @@ class ThreadImpl {
return pc + 1 + imm.length;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ CallIndirectImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
+ decoder, code->at(pc));
return pc + 1 + imm.length;
}
default:
@@ -1456,7 +1485,7 @@ class ThreadImpl {
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
size_t arity) {
DCHECK_GT(frames_.size(), 0);
- WasmValue* sp_dest = stack_.get() + frames_.back().sp;
+ StackValue* sp_dest = stack_.get() + frames_.back().sp;
frames_.pop_back();
if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
@@ -1504,7 +1533,7 @@ class ThreadImpl {
Frame* top = &frames_.back();
// Drop everything except current parameters.
- WasmValue* sp_dest = stack_.get() + top->sp;
+ StackValue* sp_dest = stack_.get() + top->sp;
size_t arity = target->function->sig->parameter_count();
DoStackTransfer(sp_dest, arity);
@@ -1528,7 +1557,7 @@ class ThreadImpl {
// Copies {arity} values on the top of the stack down the stack to {dest},
// dropping the values in-between.
- void DoStackTransfer(WasmValue* dest, size_t arity) {
+ void DoStackTransfer(StackValue* dest, size_t arity) {
// before: |---------------| pop_count | arity |
// ^ 0 ^ dest ^ sp_
//
@@ -1536,10 +1565,26 @@ class ThreadImpl {
// ^ 0 ^ sp_
DCHECK_LE(dest, sp_);
DCHECK_LE(dest + arity, sp_);
- if (arity) memmove(dest, sp_ - arity, arity * sizeof(*sp_));
+ if (arity && (dest != sp_ - arity)) {
+ memmove(dest, sp_ - arity, arity * sizeof(*sp_));
+ // Also move elements on the reference stack accordingly.
+ // TODO(mstarzinger): Refactor the interface so that we don't have to
+ // recompute values here which are already known at the call-site.
+ int dst = static_cast<int>(StackHeight() - (sp_ - dest));
+ int src = static_cast<int>(StackHeight() - arity);
+ int len = static_cast<int>(arity);
+ isolate_->heap()->MoveElements(reference_stack(), dst, src, len);
+ }
sp_ = dest + arity;
}
+ inline Address EffectiveAddress(uint32_t index) {
+ // Compute the effective address of the access, making sure to condition
+ // the index even in the in-bounds case.
+ return reinterpret_cast<Address>(instance_object_->memory_start()) +
+ (index & instance_object_->memory_mask());
+ }
+
template <typename mtype>
inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
uint32_t effective_index = offset + index;
@@ -1550,10 +1595,15 @@ class ThreadImpl {
instance_object_->memory_size())) {
return kNullAddress; // oob
}
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return reinterpret_cast<Address>(instance_object_->memory_start()) +
- (effective_index & instance_object_->memory_mask());
+ return EffectiveAddress(effective_index);
+ }
+
+ inline bool BoundsCheckMemRange(uint32_t index, uint32_t* size,
+ Address* out_address) {
+ bool ok = ClampToBounds(
+ index, size, static_cast<uint32_t>(instance_object_->memory_size()));
+ *out_address = EffectiveAddress(index);
+ return ok;
}
template <typename ctype, typename mtype>
@@ -1609,6 +1659,24 @@ class ThreadImpl {
return true;
}
+ bool CheckDataSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
+ DCHECK_LT(index, module()->num_declared_data_segments);
+ if (instance_object_->dropped_data_segments()[index]) {
+ DoTrap(kTrapDataSegmentDropped, pc);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckElemSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
+ DCHECK_LT(index, module()->elem_segments.size());
+ if (instance_object_->dropped_elem_segments()[index]) {
+ DoTrap(kTrapElemSegmentDropped, pc);
+ return false;
+ }
+ return true;
+ }
+
template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address& address, pc_t pc, int& len,
@@ -1654,6 +1722,113 @@ class ThreadImpl {
case kExprI64UConvertSatF64:
Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
return true;
+ case kExprMemoryInit: {
+ MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
+ len += imm.length;
+ if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
+ pc)) {
+ return false;
+ }
+ auto size = Pop().to<uint32_t>();
+ auto src = Pop().to<uint32_t>();
+ auto dst = Pop().to<uint32_t>();
+ Address dst_addr;
+ bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
+ auto src_max =
+ instance_object_->data_segment_sizes()[imm.data_segment_index];
+ // Use & instead of && so the clamp is not short-circuited.
+ ok &= ClampToBounds(src, &size, src_max);
+ Address src_addr =
+ instance_object_->data_segment_starts()[imm.data_segment_index] +
+ src;
+ memory_copy_wrapper(dst_addr, src_addr, size);
+ if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
+ return ok;
+ }
+ case kExprDataDrop: {
+ DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ len += imm.length;
+ if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) {
+ return false;
+ }
+ instance_object_->dropped_data_segments()[imm.index] = 1;
+ return true;
+ }
+ case kExprMemoryCopy: {
+ MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ auto size = Pop().to<uint32_t>();
+ auto src = Pop().to<uint32_t>();
+ auto dst = Pop().to<uint32_t>();
+ Address dst_addr;
+ bool copy_backward = src < dst && dst - src < size;
+ bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
+ // Trap without copying any bytes if we are copying backward and the
+ // copy is partially out-of-bounds. We only need to check that the dst
+ // region is out-of-bounds, because we know that {src < dst}, so the src
+ // region is always out of bounds if the dst region is.
+ if (ok || !copy_backward) {
+ Address src_addr;
+ // Use & instead of && so the bounds check is not short-circuited.
+ ok &= BoundsCheckMemRange(src, &size, &src_addr);
+ memory_copy_wrapper(dst_addr, src_addr, size);
+ }
+ if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
+ len += imm.length;
+ return ok;
+ }
+ case kExprMemoryFill: {
+ MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ auto size = Pop().to<uint32_t>();
+ auto value = Pop().to<uint32_t>();
+ auto dst = Pop().to<uint32_t>();
+ Address dst_addr;
+ bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
+ memory_fill_wrapper(dst_addr, value, size);
+ if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
+ len += imm.length;
+ return ok;
+ }
+ case kExprTableInit: {
+ TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ len += imm.length;
+ if (!CheckElemSegmentIsPassiveAndNotDropped(imm.elem_segment_index,
+ pc)) {
+ return false;
+ }
+ auto size = Pop().to<uint32_t>();
+ auto src = Pop().to<uint32_t>();
+ auto dst = Pop().to<uint32_t>();
+ HandleScope scope(isolate_); // Avoid leaking handles.
+ bool ok = WasmInstanceObject::InitTableEntries(
+ instance_object_->GetIsolate(), instance_object_, imm.table.index,
+ imm.elem_segment_index, dst, src, size);
+ if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
+ return ok;
+ }
+ case kExprElemDrop: {
+ ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ len += imm.length;
+ if (!CheckElemSegmentIsPassiveAndNotDropped(imm.index, pc)) {
+ return false;
+ }
+ instance_object_->dropped_elem_segments()[imm.index] = 1;
+ return true;
+ }
+ case kExprTableCopy: {
+ TableCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ auto size = Pop().to<uint32_t>();
+ auto src = Pop().to<uint32_t>();
+ auto dst = Pop().to<uint32_t>();
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ bool ok = WasmInstanceObject::CopyTableEntries(
+ isolate_, instance_object_, imm.table_dst.index,
+ imm.table_src.index, dst, src, size);
+ if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
+ len += imm.length;
+ return ok;
+ }
default:
FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
OpcodeName(code->start[pc]));
@@ -1864,6 +2039,7 @@ class ThreadImpl {
}
byte* GetGlobalPtr(const WasmGlobal* global) {
+ DCHECK(!ValueTypes::IsReferenceType(global->type));
if (global->mutability && global->imported) {
return reinterpret_cast<byte*>(
instance_object_->imported_mutable_globals()[global->index]);
@@ -1872,6 +2048,24 @@ class ThreadImpl {
}
}
+ void GetGlobalBufferAndIndex(const WasmGlobal* global,
+ Handle<FixedArray>* buffer, uint32_t* index) {
+ DCHECK(ValueTypes::IsReferenceType(global->type));
+ if (global->mutability && global->imported) {
+ *buffer =
+ handle(FixedArray::cast(
+ instance_object_->imported_mutable_globals_buffers()->get(
+ global->index)),
+ isolate_);
+ Address idx = instance_object_->imported_mutable_globals()[global->index];
+ DCHECK_LE(idx, std::numeric_limits<uint32_t>::max());
+ *index = static_cast<uint32_t>(idx);
+ } else {
+ *buffer = handle(instance_object_->tagged_globals_buffer(), isolate_);
+ *index = global->offset;
+ }
+ }
+
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
pc_t pc, int& len) {
switch (opcode) {
@@ -2174,9 +2368,9 @@ class ThreadImpl {
true)
#undef PACK_CASE
case kExprS128Select: {
+ int4 bool_val = Pop().to_s128().to_i32x4();
int4 v2 = Pop().to_s128().to_i32x4();
int4 v1 = Pop().to_s128().to_i32x4();
- int4 bool_val = Pop().to_s128().to_i32x4();
int4 res;
for (size_t i = 0; i < 4; ++i) {
res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
@@ -2220,21 +2414,26 @@ class ThreadImpl {
Push(WasmValue(Simd128(res)));
return true;
}
+ case kExprS1x4AnyTrue:
+ case kExprS1x8AnyTrue:
+ case kExprS1x16AnyTrue: {
+ int4 s = Pop().to_s128().to_i32x4();
+ bool res = s.val[0] | s.val[1] | s.val[2] | s.val[3];
+ Push(WasmValue((res)));
+ return true;
+ }
#define REDUCTION_CASE(op, name, stype, count, operation) \
case kExpr##op: { \
stype s = Pop().to_s128().to_##name(); \
- int32_t res = s.val[0]; \
- for (size_t i = 1; i < count; ++i) { \
- res = res operation static_cast<int32_t>(s.val[i]); \
+ bool res = true; \
+ for (size_t i = 0; i < count; ++i) { \
+ res = res & static_cast<bool>(s.val[i]); \
} \
Push(WasmValue(res)); \
return true; \
}
- REDUCTION_CASE(S1x4AnyTrue, i32x4, int4, 4, |)
REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
- REDUCTION_CASE(S1x8AnyTrue, i16x8, int8, 8, |)
REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
- REDUCTION_CASE(S1x16AnyTrue, i8x16, int16, 16, |)
REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
default:
@@ -2255,18 +2454,16 @@ class ThreadImpl {
// stack actually lies in zone memory.
const size_t stack_size_limit = FLAG_stack_size * KB;
// Sum up the value stack size and the control stack size.
- const size_t current_stack_size =
- (sp_ - stack_.get()) + frames_.size() * sizeof(Frame);
+ const size_t current_stack_size = (sp_ - stack_.get()) * sizeof(*sp_) +
+ frames_.size() * sizeof(frames_[0]);
if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
return true;
}
// The pc of the top frame is initialized to the first instruction. We reset
// it to 0 here such that we report the same position as in compiled code.
frames_.back().pc = 0;
- Isolate* isolate = instance_object_->GetIsolate();
- HandleScope handle_scope(isolate);
- isolate->StackOverflow();
- return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
+ isolate_->StackOverflow();
+ return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
}
void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
@@ -2288,66 +2485,162 @@ class ThreadImpl {
// handled locally by the interpreter, false otherwise (interpreter exits).
bool DoThrowException(const WasmException* exception,
uint32_t index) V8_WARN_UNUSED_RESULT {
- Isolate* isolate = instance_object_->GetIsolate();
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<WasmExceptionTag> exception_tag(
WasmExceptionTag::cast(
instance_object_->exceptions_table()->get(index)),
- isolate);
+ isolate_);
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
Handle<Object> exception_object =
- WasmExceptionPackage::New(isolate, exception_tag, encoded_size);
+ WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
- WasmExceptionPackage::GetExceptionValues(isolate, exception_object));
+ WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
// Encode the exception values on the operand stack into the exception
// package allocated above. This encoding has to be in sync with other
// backends so that exceptions can be passed between them.
- const wasm::WasmExceptionSig* sig = exception->sig;
+ const WasmExceptionSig* sig = exception->sig;
uint32_t encoded_index = 0;
+ sp_t base_index = StackHeight() - sig->parameter_count();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
- WasmValue value = sp_[i - sig->parameter_count()];
+ WasmValue value = GetStackValue(base_index + i);
switch (sig->GetParam(i)) {
- case wasm::kWasmI32: {
+ case kWasmI32: {
uint32_t u32 = value.to_u32();
EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
break;
}
- case wasm::kWasmF32: {
+ case kWasmF32: {
uint32_t f32 = value.to_f32_boxed().get_bits();
EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
break;
}
- case wasm::kWasmI64: {
+ case kWasmI64: {
uint64_t u64 = value.to_u64();
EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
break;
}
- case wasm::kWasmF64: {
+ case kWasmF64: {
uint64_t f64 = value.to_f64_boxed().get_bits();
EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
break;
}
- case wasm::kWasmAnyRef:
- UNIMPLEMENTED();
+ case kWasmS128: {
+ int4 s128 = value.to_s128().to_i32x4();
+ EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
+ EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
+ EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[2]);
+ EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
+ break;
+ }
+ case kWasmAnyRef: {
+ Handle<Object> anyref = value.to_anyref();
+ encoded_values->set(encoded_index++, *anyref);
break;
+ }
default:
UNREACHABLE();
}
}
DCHECK_EQ(encoded_size, encoded_index);
- PopN(static_cast<int>(sig->parameter_count()));
+ Drop(static_cast<int>(sig->parameter_count()));
// Now that the exception is ready, set it as pending.
- isolate->Throw(*exception_object);
- return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
+ isolate_->Throw(*exception_object);
+ return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
}
// Throw a given existing exception. Returns true if the exception is being
// handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoRethrowException(WasmValue* exception) {
- Isolate* isolate = instance_object_->GetIsolate();
- // TODO(mstarzinger): Use the passed {exception} here once reference types
- // as values on the operand stack are supported by the interpreter.
- isolate->ReThrow(*isolate->factory()->undefined_value());
- return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
+ bool DoRethrowException(WasmValue exception) {
+ isolate_->ReThrow(*exception.to_anyref());
+ return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
+ }
+
+ // Determines whether the given exception has a tag matching the expected tag
+ // for the given index within the exception table of the current instance.
+ bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
+ Handle<Object> caught_tag =
+ WasmExceptionPackage::GetExceptionTag(isolate_, exception_object);
+ Handle<Object> expected_tag =
+ handle(instance_object_->exceptions_table()->get(index), isolate_);
+ DCHECK(expected_tag->IsWasmExceptionTag());
+ return expected_tag.is_identical_to(caught_tag);
+ }
+
+ void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint32_t* value) {
+ uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
+ uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
+ *value = (msb << 16) | (lsb & 0xffff);
+ }
+
+ void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint64_t* value) {
+ uint32_t lsb = 0, msb = 0;
+ DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
+ DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
+ *value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
+ }
+
+ // Unpack the values encoded in the given exception. The exception values are
+ // pushed onto the operand stack. Callers must perform a tag check to ensure
+ // the encoded values match the expected signature of the exception.
+ void DoUnpackException(const WasmException* exception,
+ Handle<Object> exception_object) {
+ Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
+ WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
+ // Decode the exception values from the given exception package and push
+ // them onto the operand stack. This encoding has to be in sync with other
+ // backends so that exceptions can be passed between them.
+ const WasmExceptionSig* sig = exception->sig;
+ uint32_t encoded_index = 0;
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ WasmValue value;
+ switch (sig->GetParam(i)) {
+ case kWasmI32: {
+ uint32_t u32 = 0;
+ DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
+ value = WasmValue(u32);
+ break;
+ }
+ case kWasmF32: {
+ uint32_t f32_bits = 0;
+ DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
+ value = WasmValue(Float32::FromBits(f32_bits));
+ break;
+ }
+ case kWasmI64: {
+ uint64_t u64 = 0;
+ DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
+ value = WasmValue(u64);
+ break;
+ }
+ case kWasmF64: {
+ uint64_t f64_bits = 0;
+ DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
+ value = WasmValue(Float64::FromBits(f64_bits));
+ break;
+ }
+ case kWasmS128: {
+ int4 s128 = {0, 0, 0, 0};
+ uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
+ DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
+ DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[1]);
+ DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[2]);
+ DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[3]);
+ value = WasmValue(Simd128(s128));
+ break;
+ }
+ case kWasmAnyRef: {
+ Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
+ value = WasmValue(anyref);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ Push(value);
+ }
+ DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(exception), encoded_index);
}
void Execute(InterpreterCode* code, pc_t pc, int max) {
@@ -2359,6 +2652,10 @@ class ThreadImpl {
code->locals.type_list.size() +
code->side_table->max_stack_height_,
stack_limit_ - stack_.get() - frames_.back().sp);
+ // Seal the surrounding {HandleScope} to ensure that all cases within the
+ // interpreter switch below which deal with handles open their own scope.
+ // This avoids leaking / accumulating handles in the surrounding scope.
+ SealHandleScope shs(isolate_);
Decoder decoder(code->start, code->end);
pc_t limit = code->end - code->start;
@@ -2461,12 +2758,31 @@ class ThreadImpl {
continue; // Do not bump pc.
}
case kExprRethrow: {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue ex = Pop();
CommitPc(pc); // Needed for local unwinding.
- if (!DoRethrowException(&ex)) return;
+ if (!DoRethrowException(ex)) return;
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
continue; // Do not bump pc.
}
+ case kExprBrOnExn: {
+ BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ WasmValue ex = Pop();
+ Handle<Object> exception = ex.to_anyref();
+ if (MatchingExceptionTag(exception, imm.index.index)) {
+ imm.index.exception = &module()->exceptions[imm.index.index];
+ DoUnpackException(imm.index.exception, exception);
+ len = DoBreak(code, pc, imm.depth.depth);
+ TRACE(" match => @%zu\n", pc + len);
+ } else {
+ Push(ex); // Exception remains on stack.
+ TRACE(" false => fallthrough\n");
+ len = 1 + imm.length;
+ }
+ break;
+ }
case kExprSelect: {
WasmValue cond = Pop();
WasmValue fval = Pop();
@@ -2546,14 +2862,20 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
+ case kExprRefNull: {
+ Push(WasmValue(isolate_->factory()->null_value()));
+ break;
+ }
case kExprGetLocal: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
Push(GetStackValue(frames_.back().sp + imm.index));
len = 1 + imm.length;
break;
}
case kExprSetLocal: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
SetStackValue(frames_.back().sp + imm.index, val);
len = 1 + imm.length;
@@ -2561,6 +2883,7 @@ class ThreadImpl {
}
case kExprTeeLocal: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
SetStackValue(frames_.back().sp + imm.index, val);
Push(val);
@@ -2568,7 +2891,7 @@ class ThreadImpl {
break;
}
case kExprDrop: {
- Pop();
+ Drop();
break;
}
case kExprCallFunction: {
@@ -2608,8 +2931,8 @@ class ThreadImpl {
} break;
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
+ CallIndirectImmediate<Decoder::kNoValidate> imm(
+ kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->tables.size(), 1u);
@@ -2680,8 +3003,8 @@ class ThreadImpl {
} break;
case kExprReturnCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
+ CallIndirectImmediate<Decoder::kNoValidate> imm(
+ kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->tables.size(), 1u);
@@ -2728,20 +3051,30 @@ class ThreadImpl {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
const WasmGlobal* global = &module()->globals[imm.index];
- byte* ptr = GetGlobalPtr(global);
- WasmValue val;
switch (global->type) {
-#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: \
- val = WasmValue( \
- ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \
- break;
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: { \
+ byte* ptr = GetGlobalPtr(global); \
+ Push(WasmValue( \
+ ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr)))); \
+ break; \
+ }
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ Handle<FixedArray> global_buffer; // The buffer of the global.
+ uint32_t global_index = 0; // The index into the buffer.
+ GetGlobalBufferAndIndex(global, &global_buffer, &global_index);
+ Handle<Object> value(global_buffer->get(global_index), isolate_);
+ Push(WasmValue(value));
+ break;
+ }
default:
UNREACHABLE();
}
- Push(val);
len = 1 + imm.length;
break;
}
@@ -2749,16 +3082,26 @@ class ThreadImpl {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
const WasmGlobal* global = &module()->globals[imm.index];
- byte* ptr = GetGlobalPtr(global);
- WasmValue val = Pop();
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: \
+ case kWasm##wasm: { \
+ byte* ptr = GetGlobalPtr(global); \
WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
- val.to<ctype>()); \
- break;
+ Pop().to<ctype>()); \
+ break; \
+ }
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ Handle<FixedArray> global_buffer; // The buffer of the global.
+ uint32_t global_index = 0; // The index into the buffer.
+ GetGlobalBufferAndIndex(global, &global_buffer, &global_index);
+ global_buffer->set(global_index, *Pop().to_anyref());
+ break;
+ }
default:
UNREACHABLE();
}
@@ -2856,10 +3199,11 @@ class ThreadImpl {
MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
- instance_object_->GetIsolate());
- Isolate* isolate = memory->GetIsolate();
- int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
+ isolate_);
+ int32_t result =
+ WasmMemoryObject::Grow(isolate_, memory, delta_pages);
Push(WasmValue(result));
len = 1 + imm.length;
// Treat one grow_memory instruction like 1000 other instructions,
@@ -2900,6 +3244,12 @@ class ThreadImpl {
SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
#undef SIGN_EXTENSION_CASE
+ case kExprRefIsNull: {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ uint32_t result = Pop().to_anyref()->IsNull() ? 1 : 0;
+ Push(WasmValue(result));
+ break;
+ }
case kNumericPrefix: {
++len;
if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
@@ -2994,10 +3344,13 @@ class ThreadImpl {
WasmValue Pop() {
DCHECK_GT(frames_.size(), 0);
DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
- return *--sp_;
+ StackValue stack_value = *--sp_;
+ // Note that {StackHeight} depends on the current {sp} value, hence this
+ // operation is split into two statements to ensure proper evaluation order.
+ return stack_value.ExtractValue(this, StackHeight());
}
- void PopN(int n) {
+ void Drop(int n = 1) {
DCHECK_GE(StackHeight(), n);
DCHECK_GT(frames_.size(), 0);
// Check that we don't pop into locals.
@@ -3014,18 +3367,18 @@ class ThreadImpl {
void Push(WasmValue val) {
DCHECK_NE(kWasmStmt, val.type());
DCHECK_LE(1, stack_limit_ - sp_);
- *sp_++ = val;
+ StackValue stack_value(val, this, StackHeight());
+ // Note that {StackHeight} depends on the current {sp} value, hence this
+ // operation is split into two statements to ensure proper evaluation order.
+ *sp_++ = stack_value;
}
void Push(WasmValue* vals, size_t arity) {
DCHECK_LE(arity, stack_limit_ - sp_);
for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
DCHECK_NE(kWasmStmt, val->type());
+ Push(*val);
}
- if (arity > 0) {
- memcpy(sp_, vals, arity * sizeof(*sp_));
- }
- sp_ += arity;
}
void EnsureStackSpace(size_t size) {
@@ -3034,13 +3387,20 @@ class ThreadImpl {
size_t requested_size =
base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
- std::unique_ptr<WasmValue[]> new_stack(new WasmValue[new_size]);
+ std::unique_ptr<StackValue[]> new_stack(new StackValue[new_size]);
if (old_size > 0) {
memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
}
sp_ = new_stack.get() + (sp_ - stack_.get());
stack_ = std::move(new_stack);
stack_limit_ = stack_.get() + new_size;
+ // Also resize the reference stack to the same size.
+ int grow_by = static_cast<int>(new_size - old_size);
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ Handle<FixedArray> old_ref_stack(reference_stack(), isolate_);
+ Handle<FixedArray> new_ref_stack =
+ isolate_->factory()->CopyFixedArrayAndGrow(old_ref_stack, grow_by);
+ reference_stack_cell_->set_value(*new_ref_stack);
}
sp_t StackHeight() { return sp_ - stack_.get(); }
@@ -3048,6 +3408,7 @@ class ThreadImpl {
void TraceValueStack() {
#ifdef DEBUG
if (!FLAG_trace_wasm_interpreter) return;
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
sp_t sp = top ? top->sp : 0;
sp_t plimit = top ? top->plimit() : 0;
@@ -3073,6 +3434,23 @@ class ThreadImpl {
case kWasmF64:
PrintF("f64:%lf", val.to<double>());
break;
+ case kWasmS128: {
+ // This defaults to tracing all S128 values as i32x4 values for now,
+ // when there is more state to know what type of values are on the
+ // stack, the right format should be printed here.
+ int4 s = val.to_s128().to_i32x4();
+ PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
+ break;
+ }
+ case kWasmAnyRef: {
+ Handle<Object> ref = val.to_anyref();
+ if (ref->IsNull()) {
+ PrintF("ref:null");
+ } else {
+ PrintF("ref:0x%" V8PRIxPTR, ref->ptr());
+ }
+ break;
+ }
case kWasmStmt:
PrintF("void");
break;
@@ -3097,8 +3475,7 @@ class ThreadImpl {
const WasmCode* code,
FunctionSig* sig) {
int num_args = static_cast<int>(sig->parameter_count());
- wasm::WasmFeatures enabled_features =
- wasm::WasmFeaturesFromIsolate(isolate);
+ WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
if (code->kind() == WasmCode::kWasmToJsWrapper &&
!IsJSCompatibleSignature(sig, enabled_features.bigint)) {
@@ -3119,25 +3496,32 @@ class ThreadImpl {
// con-/destruction.
std::vector<uint8_t> arg_buffer(num_args * 8);
size_t offset = 0;
- WasmValue* wasm_args = sp_ - num_args;
+ sp_t base_index = StackHeight() - num_args;
for (int i = 0; i < num_args; ++i) {
int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
if (arg_buffer.size() < offset + param_size) {
arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
}
Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
+ WasmValue arg = GetStackValue(base_index + i);
switch (sig->GetParam(i)) {
case kWasmI32:
- WriteUnalignedValue(address, wasm_args[i].to<uint32_t>());
+ WriteUnalignedValue(address, arg.to<uint32_t>());
break;
case kWasmI64:
- WriteUnalignedValue(address, wasm_args[i].to<uint64_t>());
+ WriteUnalignedValue(address, arg.to<uint64_t>());
break;
case kWasmF32:
- WriteUnalignedValue(address, wasm_args[i].to<float>());
+ WriteUnalignedValue(address, arg.to<float>());
break;
case kWasmF64:
- WriteUnalignedValue(address, wasm_args[i].to<double>());
+ WriteUnalignedValue(address, arg.to<double>());
+ break;
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef:
+ DCHECK_EQ(kSystemPointerSize, param_size);
+ WriteUnalignedValue<Object>(address, *arg.to_anyref());
break;
default:
UNIMPLEMENTED();
@@ -3210,6 +3594,13 @@ class ThreadImpl {
case kWasmF64:
Push(WasmValue(ReadUnalignedValue<double>(address)));
break;
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
+ Handle<Object> ref(ReadUnalignedValue<Object>(address), isolate);
+ Push(WasmValue(ref));
+ break;
+ }
default:
UNIMPLEMENTED();
}
@@ -3223,7 +3614,7 @@ class ThreadImpl {
if (native_module->is_jump_table_slot(target)) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
- return native_module->code(func_index);
+ return native_module->GetCode(func_index);
}
WasmCode* code = native_module->Lookup(target);
DCHECK_EQ(code->instruction_start(), target);
@@ -3232,45 +3623,19 @@ class ThreadImpl {
ExternalCallResult CallImportedFunction(uint32_t function_index) {
DCHECK_GT(module()->num_imported_functions, function_index);
- // Use a new HandleScope to avoid leaking / accumulating handles in the
- // outer scope.
- Isolate* isolate = instance_object_->GetIsolate();
- HandleScope handle_scope(isolate);
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
ImportedFunctionEntry entry(instance_object_, function_index);
- Handle<Object> object_ref(entry.object_ref(), isolate);
+ Handle<Object> object_ref(entry.object_ref(), isolate_);
WasmCode* code =
- GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
+ GetTargetCode(isolate_->wasm_engine()->code_manager(), entry.target());
FunctionSig* sig = module()->functions[function_index].sig;
- return CallExternalWasmFunction(isolate, object_ref, code, sig);
+ return CallExternalWasmFunction(isolate_, object_ref, code, sig);
}
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- if (codemap()->call_indirect_through_module()) {
- // Rely on the information stored in the WasmModule.
- InterpreterCode* code =
- codemap()->GetIndirectCode(table_index, entry_index);
- if (!code) return {ExternalCallResult::INVALID_FUNC};
- if (code->function->sig_index != sig_index) {
- // If not an exact match, we have to do a canonical check.
- int function_canonical_id =
- module()->signature_ids[code->function->sig_index];
- int expected_canonical_id = module()->signature_ids[sig_index];
- DCHECK_EQ(function_canonical_id,
- module()->signature_map.Find(*code->function->sig));
- if (function_canonical_id != expected_canonical_id) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
- }
- if (code->function->imported) {
- return CallImportedFunction(code->function->func_index);
- }
- return {ExternalCallResult::INTERNAL, code};
- }
-
- Isolate* isolate = instance_object_->GetIsolate();
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
module()->signature_map.Find(*module()->signatures[sig_index]));
@@ -3289,15 +3654,15 @@ class ThreadImpl {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- HandleScope scope(isolate);
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
FunctionSig* signature = module()->signatures[sig_index];
- Handle<Object> object_ref = handle(entry.object_ref(), isolate);
+ Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
WasmCode* code =
- GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
+ GetTargetCode(isolate_->wasm_engine()->code_manager(), entry.target());
if (!object_ref->IsWasmInstanceObject() || /* call to an import */
!instance_object_.is_identical_to(object_ref) /* cross-instance */) {
- return CallExternalWasmFunction(isolate, object_ref, code, signature);
+ return CallExternalWasmFunction(isolate_, object_ref, code, signature);
}
DCHECK(code->kind() == WasmCode::kInterpreterEntry ||
@@ -3477,7 +3842,15 @@ class WasmInterpreterInternals : public ZoneObject {
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
codemap_(module, module_bytes_.data(), zone),
threads_(zone) {
- threads_.emplace_back(zone, &codemap_, instance_object);
+ Isolate* isolate = instance_object->GetIsolate();
+ Handle<Cell> reference_stack = isolate->global_handles()->Create(
+ *isolate->factory()->NewCell(isolate->factory()->empty_fixed_array()));
+ threads_.emplace_back(zone, &codemap_, instance_object, reference_stack);
+ }
+
+ ~WasmInterpreterInternals() {
+ DCHECK_EQ(1, threads_.size());
+ GlobalHandles::Destroy(threads_[0].reference_stack_cell().location());
}
};
@@ -3569,10 +3942,6 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
internals_->codemap_.SetFunctionCode(function, start, end);
}
-void WasmInterpreter::SetCallIndirectTestMode() {
- internals_->codemap_.set_call_indirect_through_module(true);
-}
-
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
// Create some dummy structures, to avoid special-casing the implementation
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 1de6a491b6..9432446fb8 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -16,7 +16,7 @@ class WasmInstanceObject;
namespace wasm {
-// forward declarations.
+// Forward declarations.
struct ModuleWireBytes;
struct WasmFunction;
struct WasmModule;
@@ -56,7 +56,7 @@ using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
// param #0 _/Ā· _/Ā·
// -----------------
//
-class InterpretedFrame {
+class V8_EXPORT_PRIVATE InterpretedFrame {
public:
const WasmFunction* function() const;
int pc() const;
@@ -77,7 +77,7 @@ class InterpretedFrame {
// Deleter struct to delete the underlying InterpretedFrameImpl without
// violating language specifications.
-struct InterpretedFrameDeleter {
+struct V8_EXPORT_PRIVATE InterpretedFrameDeleter {
void operator()(InterpretedFrame* ptr);
};
@@ -203,7 +203,6 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Manually adds code to the interpreter for the given function.
void SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
- void SetCallIndirectTestMode();
// Computes the control transfers for the given bytecode. Used internally in
// the interpreter, but exposed for testing.
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 302002b7c9..9aafc45b7e 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -176,18 +176,26 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
return Utils::ToLocal(v8_str(reinterpret_cast<i::Isolate*>(isolate), str));
}
-i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
- const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
- if (!arg0->IsWasmModuleObject()) {
- thrower->TypeError("Argument 0 must be a WebAssembly.Module");
- return {};
+#define GET_FIRST_ARGUMENT_AS(Type) \
+ i::MaybeHandle<i::Wasm##Type##Object> GetFirstArgumentAs##Type( \
+ const v8::FunctionCallbackInfo<v8::Value>& args, \
+ ErrorThrower* thrower) { \
+ i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]); \
+ if (!arg0->IsWasm##Type##Object()) { \
+ thrower->TypeError("Argument 0 must be a WebAssembly." #Type); \
+ return {}; \
+ } \
+ Local<Object> obj = Local<Object>::Cast(args[0]); \
+ return i::Handle<i::Wasm##Type##Object>::cast( \
+ v8::Utils::OpenHandle(*obj)); \
}
- Local<Object> module_obj = Local<Object>::Cast(args[0]);
- return i::Handle<i::WasmModuleObject>::cast(
- v8::Utils::OpenHandle(*module_obj));
-}
+GET_FIRST_ARGUMENT_AS(Module)
+GET_FIRST_ARGUMENT_AS(Memory)
+GET_FIRST_ARGUMENT_AS(Table)
+GET_FIRST_ARGUMENT_AS(Global)
+
+#undef GET_FIRST_ARGUMENT_AS
i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower,
@@ -525,6 +533,36 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
std::move(resolver), bytes, is_shared);
}
+void WasmStreamingCallbackForTesting(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+
+ bool is_shared = false;
+ i::wasm::ModuleWireBytes bytes =
+ GetFirstArgumentAsBytes(args, &thrower, &is_shared);
+ if (thrower.error()) {
+ streaming->Abort(Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+ streaming->OnBytesReceived(bytes.start(), bytes.length());
+ streaming->Finish();
+ CHECK(!thrower.error());
+}
+
+void WasmStreamingPromiseFailedCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+ streaming->Abort(args[0]);
+}
+
// WebAssembly.compileStreaming(Promise<Response>) -> Promise
void WebAssemblyCompileStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -563,6 +601,10 @@ void WebAssemblyCompileStreaming(
v8::Function, compile_callback,
v8::Function::New(context, i_isolate->wasm_streaming_callback(),
Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1));
+ ASSIGN(
+ v8::Function, reject_callback,
+ v8::Function::New(context, WasmStreamingPromiseFailedCallback,
+ Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1));
// The parameter may be of type {Response} or of type {Promise<Response>}.
// Treat either case of parameter as Promise.resolve(parameter)
@@ -576,7 +618,8 @@ void WebAssemblyCompileStreaming(
// We do not have any use of the result here. The {compile_callback} will
// start streaming compilation, which will eventually resolve the promise we
// set as result value.
- USE(input_resolver->GetPromise()->Then(context, compile_callback));
+ USE(input_resolver->GetPromise()->Then(context, compile_callback,
+ reject_callback));
}
// WebAssembly.validate(bytes) -> bool
@@ -837,6 +880,10 @@ void WebAssemblyInstantiateStreaming(
v8::Function, compile_callback,
v8::Function::New(context, i_isolate->wasm_streaming_callback(),
Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1));
+ ASSIGN(
+ v8::Function, reject_callback,
+ v8::Function::New(context, WasmStreamingPromiseFailedCallback,
+ Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1));
// The parameter may be of type {Response} or of type {Promise<Response>}.
// Treat either case of parameter as Promise.resolve(parameter)
@@ -850,7 +897,8 @@ void WebAssemblyInstantiateStreaming(
// We do not have any use of the result here. The {compile_callback} will
// start streaming compilation, which will eventually resolve the promise we
// set as result value.
- USE(input_resolver->GetPromise()->Then(context, compile_callback));
+ USE(input_resolver->GetPromise()->Then(context, compile_callback,
+ reject_callback));
}
// WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
@@ -959,50 +1007,59 @@ bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return true;
}
-bool GetRequiredIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
Local<Context> context,
Local<v8::Object> object,
- Local<String> property, int64_t* result,
- int64_t lower_bound, uint64_t upper_bound) {
+ Local<String> property, bool* has_property,
+ int64_t* result, int64_t lower_bound,
+ uint64_t upper_bound) {
v8::Local<v8::Value> value;
if (!object->Get(context, property).ToLocal(&value)) {
return false;
}
- i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
-
// Web IDL: dictionary presence
// https://heycam.github.io/webidl/#dfn-present
if (value->IsUndefined()) {
- thrower->TypeError("Property '%s' is required",
- property_name->ToCString().get());
- return false;
+ if (has_property != nullptr) *has_property = false;
+ return true;
}
+ if (has_property != nullptr) *has_property = true;
+ i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
+
return GetIntegerProperty(isolate, thrower, context, value, property_name,
result, lower_bound, upper_bound);
}
-bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
- Local<Context> context,
- Local<v8::Object> object,
- Local<String> property, int64_t* result,
- int64_t lower_bound, uint64_t upper_bound) {
- v8::Local<v8::Value> value;
- if (!object->Get(context, property).ToLocal(&value)) {
+// Fetch 'initial' or 'minimum' property from object. If both are provided,
+// 'initial' is used.
+// TODO(aseemgarg): change behavior when the following bug is resolved:
+// https://github.com/WebAssembly/js-types/issues/6
+bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+ Local<Context> context,
+ Local<v8::Object> object, int64_t* result,
+ int64_t lower_bound, uint64_t upper_bound) {
+ bool has_initial = false;
+ if (!GetOptionalIntegerProperty(isolate, thrower, context, object,
+ v8_str(isolate, "initial"), &has_initial,
+ result, lower_bound, upper_bound)) {
return false;
}
-
- // Web IDL: dictionary presence
- // https://heycam.github.io/webidl/#dfn-present
- if (value->IsUndefined()) {
- return true;
+ auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+ if (!has_initial && enabled_features.type_reflection) {
+ if (!GetOptionalIntegerProperty(isolate, thrower, context, object,
+ v8_str(isolate, "minimum"), &has_initial,
+ result, lower_bound, upper_bound)) {
+ return false;
+ }
}
-
- i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
-
- return GetIntegerProperty(isolate, thrower, context, value, property_name,
- result, lower_bound, upper_bound);
+ if (!has_initial) {
+ // TODO(aseemgarg): update error message when the spec issue is resolved.
+ thrower->TypeError("Property 'initial' is required");
+ return false;
+ }
+ return true;
}
// new WebAssembly.Table(args) -> WebAssembly.Table
@@ -1021,6 +1078,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> context = isolate->GetCurrentContext();
Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
+ i::wasm::ValueType type;
// The descriptor's 'element'.
{
v8::MaybeLocal<v8::Value> maybe =
@@ -1029,30 +1087,37 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!maybe.ToLocal(&value)) return;
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
- if (!string->StringEquals(v8_str(isolate, "anyfunc"))) {
+ auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+ if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
+ type = i::wasm::kWasmAnyFunc;
+ } else if (enabled_features.anyref &&
+ string->StringEquals(v8_str(isolate, "anyref"))) {
+ type = i::wasm::kWasmAnyRef;
+ } else {
thrower.TypeError("Descriptor property 'element' must be 'anyfunc'");
return;
}
}
- // The descriptor's 'initial'.
+
int64_t initial = 0;
- if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0,
- i::wasm::max_table_init_entries())) {
+ if (!GetInitialOrMinimumProperty(isolate, &thrower, context, descriptor,
+ &initial, 0,
+ i::wasm::max_table_init_entries())) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
- if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "maximum"), &maximum, initial,
- i::wasm::max_table_init_entries())) {
+ bool has_maximum = true;
+ if (!GetOptionalIntegerProperty(
+ isolate, &thrower, context, descriptor, v8_str(isolate, "maximum"),
+ &has_maximum, &maximum, initial, i::wasm::max_table_init_entries())) {
return;
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj =
- i::WasmTableObject::New(i_isolate, static_cast<uint32_t>(initial),
- static_cast<uint32_t>(maximum), &fixed_array);
+ i::Handle<i::JSObject> table_obj = i::WasmTableObject::New(
+ i_isolate, type, static_cast<uint32_t>(initial), has_maximum,
+ static_cast<uint32_t>(maximum), &fixed_array);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(table_obj));
}
@@ -1072,18 +1137,17 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> context = isolate->GetCurrentContext();
Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
- // The descriptor's 'initial'.
+
int64_t initial = 0;
- if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0,
- i::wasm::max_mem_pages())) {
+ if (!GetInitialOrMinimumProperty(isolate, &thrower, context, descriptor,
+ &initial, 0, i::wasm::max_mem_pages())) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "maximum"), &maximum, initial,
- i::wasm::kSpecMaxWasmMemoryPages)) {
+ v8_str(isolate, "maximum"), nullptr, &maximum,
+ initial, i::wasm::kSpecMaxWasmMemoryPages)) {
return;
}
@@ -1168,6 +1232,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
if (string->StringEquals(v8_str(isolate, "i32"))) {
type = i::wasm::kWasmI32;
} else if (string->StringEquals(v8_str(isolate, "f32"))) {
@@ -1176,8 +1241,12 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
type = i::wasm::kWasmI64;
} else if (string->StringEquals(v8_str(isolate, "f64"))) {
type = i::wasm::kWasmF64;
- } else if (string->StringEquals(v8_str(isolate, "anyref"))) {
+ } else if (enabled_features.anyref &&
+ string->StringEquals(v8_str(isolate, "anyref"))) {
type = i::wasm::kWasmAnyRef;
+ } else if (enabled_features.anyref &&
+ string->StringEquals(v8_str(isolate, "anyfunc"))) {
+ type = i::wasm::kWasmAnyFunc;
} else {
thrower.TypeError(
"Descriptor property 'value' must be 'i32', 'i64', 'f32', or "
@@ -1253,13 +1322,27 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 2) {
// When no inital value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
- global_obj->SetAnyRef(
- handle(i::ReadOnlyRoots(i_isolate).null_value(), i_isolate));
+ global_obj->SetAnyRef(i_isolate->factory()->null_value());
break;
}
global_obj->SetAnyRef(Utils::OpenHandle(*value));
break;
}
+ case i::wasm::kWasmAnyFunc: {
+ if (args.Length() < 2) {
+ // When no inital value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetAnyFunc(i_isolate, i_isolate->factory()->null_value());
+ break;
+ }
+
+ if (!global_obj->SetAnyFunc(i_isolate, Utils::OpenHandle(*value))) {
+ thrower.TypeError(
+ "The value of anyfunc globals must be null or an "
+ "exported function");
+ }
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1273,7 +1356,7 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Excepion()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception()");
thrower.TypeError("WebAssembly.Exception cannot be called");
}
@@ -1332,7 +1415,9 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::FixedArray> old_array(receiver->elements(), i_isolate);
uint32_t old_size = static_cast<uint32_t>(old_array->length());
- uint64_t max_size64 = receiver->maximum_length()->Number();
+ uint64_t max_size64 = receiver->maximum_length().IsUndefined(i_isolate)
+ ? i::FLAG_wasm_max_table_size
+ : receiver->maximum_length()->Number();
if (max_size64 > i::FLAG_wasm_max_table_size) {
max_size64 = i::FLAG_wasm_max_table_size;
}
@@ -1373,21 +1458,21 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- i::Handle<i::FixedArray> array(receiver->elements(), i_isolate);
uint32_t index;
if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
return;
}
-
- v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- if (index >= static_cast<uint32_t>(array->length())) {
- thrower.RangeError("Index out of bounds");
+ if (!i::WasmTableObject::IsInBounds(i_isolate, receiver, index)) {
+ thrower.RangeError("invalid index %u into function table", index);
return;
}
- i::Handle<i::Object> value(array->get(static_cast<int>(index)), i_isolate);
- return_value.Set(Utils::ToLocal(value));
+ i::Handle<i::Object> result =
+ i::WasmTableObject::Get(i_isolate, receiver, index);
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(Utils::ToLocal(result));
}
// WebAssembly.Table.set(num, JSFunction)
@@ -1397,31 +1482,79 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
Local<Context> context = isolate->GetCurrentContext();
- EXTRACT_THIS(receiver, WasmTableObject);
+ EXTRACT_THIS(table_object, WasmTableObject);
// Parameter 0.
uint32_t index;
if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
return;
}
+ if (!i::WasmTableObject::IsInBounds(i_isolate, table_object, index)) {
+ thrower.RangeError("invalid index %u into function table", index);
+ return;
+ }
- // Parameter 1.
- i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
- if (!value->IsNull(i_isolate) &&
- !i::WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ if (!i::WasmTableObject::IsValidElement(i_isolate, table_object, element)) {
thrower.TypeError("Argument 1 must be null or a WebAssembly function");
return;
}
+ i::WasmTableObject::Set(i_isolate, table_object, index, element);
+}
+
+// WebAssembly.Table.type(WebAssembly.Table) -> TableType
+void WebAssemblyTableGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.type()");
+
+ auto maybe_table = GetFirstArgumentAsTable(args, &thrower);
+ if (thrower.error()) return;
+ i::Handle<i::WasmTableObject> table = maybe_table.ToHandleChecked();
+ v8::Local<v8::Object> ret = v8::Object::New(isolate);
+
+ Local<String> element;
+ auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+ if (table->type() == i::wasm::ValueType::kWasmAnyFunc) {
+ element = v8_str(isolate, "anyfunc");
+ } else if (enabled_features.anyref &&
+ table->type() == i::wasm::ValueType::kWasmAnyRef) {
+ element = v8_str(isolate, "anyref");
+ } else {
+ UNREACHABLE();
+ }
+ // TODO(aseemgarg): update anyfunc to funcref
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "element"), element)
+ .IsJust()) {
+ return;
+ }
- if (index >= static_cast<uint64_t>(receiver->elements()->length())) {
- thrower.RangeError("index out of bounds");
+ uint32_t curr_size = table->current_length();
+ DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "minimum"),
+ v8::Integer::NewFromUnsigned(
+ isolate, static_cast<uint32_t>(curr_size)))
+ .IsJust()) {
return;
}
- i::WasmTableObject::Set(i_isolate, receiver, index,
- value->IsNull(i_isolate)
- ? i::Handle<i::JSFunction>::null()
- : i::Handle<i::JSFunction>::cast(value));
+ if (!table->maximum_length()->IsUndefined()) {
+ uint64_t max_size = table->maximum_length()->Number();
+ DCHECK_LE(max_size, std::numeric_limits<uint32_t>::max());
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "maximum"),
+ v8::Integer::NewFromUnsigned(
+ isolate, static_cast<uint32_t>(max_size)))
+ .IsJust()) {
+ return;
+ }
+ }
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(ret);
}
// WebAssembly.Memory.grow(num) -> num
@@ -1443,10 +1576,6 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
max_size64 = i::wasm::max_mem_pages();
}
i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer(), i_isolate);
- if (!old_buffer->is_growable()) {
- thrower.RangeError("This memory cannot be grown");
- return;
- }
DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
@@ -1495,6 +1624,45 @@ void WebAssemblyMemoryGetBuffer(
return_value.Set(Utils::ToLocal(buffer));
}
+// WebAssembly.Memory.type(WebAssembly.Memory) -> MemoryType
+void WebAssemblyMemoryGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.type()");
+
+ auto maybe_memory = GetFirstArgumentAsMemory(args, &thrower);
+ if (thrower.error()) return;
+ i::Handle<i::WasmMemoryObject> memory = maybe_memory.ToHandleChecked();
+ v8::Local<v8::Object> ret = v8::Object::New(isolate);
+ i::Handle<i::JSArrayBuffer> buffer(memory->array_buffer(), i_isolate);
+
+ size_t curr_size = buffer->byte_length() / i::wasm::kWasmPageSize;
+ DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "minimum"),
+ v8::Integer::NewFromUnsigned(
+ isolate, static_cast<uint32_t>(curr_size)))
+ .IsJust()) {
+ return;
+ }
+
+ if (memory->has_maximum_pages()) {
+ uint64_t max_size = memory->maximum_pages();
+ DCHECK_LE(max_size, std::numeric_limits<uint32_t>::max());
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "maximum"),
+ v8::Integer::NewFromUnsigned(
+ isolate, static_cast<uint32_t>(max_size)))
+ .IsJust()) {
+ return;
+ }
+ }
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(ret);
+}
+
void WebAssemblyGlobalGetValueCommon(
const v8::FunctionCallbackInfo<v8::Value>& args, const char* name) {
v8::Isolate* isolate = args.GetIsolate();
@@ -1527,7 +1695,9 @@ void WebAssemblyGlobalGetValueCommon(
return_value.Set(receiver->GetF64());
break;
case i::wasm::kWasmAnyRef:
- return_value.Set(Utils::ToLocal(receiver->GetAnyRef()));
+ case i::wasm::kWasmAnyFunc:
+ case i::wasm::kWasmExceptRef:
+ return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
default:
UNREACHABLE();
@@ -1594,13 +1764,76 @@ void WebAssemblyGlobalSetValue(
receiver->SetF64(f64_value);
break;
}
- case i::wasm::kWasmAnyRef: {
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmExceptRef: {
receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
break;
}
+ case i::wasm::kWasmAnyFunc: {
+ if (!receiver->SetAnyFunc(i_isolate, Utils::OpenHandle(*args[0]))) {
+ thrower.TypeError(
+ "value of an anyfunc reference must be either null or an "
+ "exported function");
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// WebAssembly.Global.type(WebAssembly.Global) -> GlobalType
+void WebAssemblyGlobalGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Global.type()");
+
+ auto maybe_global = GetFirstArgumentAsGlobal(args, &thrower);
+ if (thrower.error()) return;
+ i::Handle<i::WasmGlobalObject> global = maybe_global.ToHandleChecked();
+ v8::Local<v8::Object> ret = v8::Object::New(isolate);
+
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "mutable"),
+ v8::Boolean::New(isolate, global->is_mutable()))
+ .IsJust()) {
+ return;
+ }
+
+ Local<String> type;
+ switch (global->type()) {
+ case i::wasm::kWasmI32: {
+ type = v8_str(isolate, "i32");
+ break;
+ }
+ case i::wasm::kWasmI64: {
+ type = v8_str(isolate, "i64");
+ break;
+ }
+ case i::wasm::kWasmF32: {
+ type = v8_str(isolate, "f32");
+ break;
+ }
+ case i::wasm::kWasmF64: {
+ type = v8_str(isolate, "f64");
+ break;
+ }
+ case i::wasm::kWasmAnyRef: {
+ type = v8_str(isolate, "anyref");
+ break;
+ }
default:
UNREACHABLE();
}
+ if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
+ v8_str(isolate, "value"), type)
+ .IsJust()) {
+ return;
+ }
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(ret);
}
} // namespace
@@ -1719,7 +1952,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
name, isolate->strict_function_map(), LanguageMode::kStrict);
Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
+ Handle<JSObject> webassembly =
+ factory->NewJSObject(cons, AllocationType::kOld);
PropertyAttributes ro_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
@@ -1729,6 +1963,10 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
+ if (FLAG_wasm_test_streaming) {
+ isolate->set_wasm_streaming_callback(WasmStreamingCallbackForTesting);
+ }
+
if (isolate->wasm_streaming_callback() != nullptr) {
InstallFunc(isolate, webassembly, "compileStreaming",
WebAssemblyCompileStreaming, 1);
@@ -1778,6 +2016,10 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Instance"), ro_attributes);
+ // The context is not set up completely yet. That's why we cannot use
+ // {WasmFeaturesFromIsolate} and have to use {WasmFeaturesFromFlags} instead.
+ auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+
// Setup Table
Handle<JSFunction> table_constructor =
InstallConstructorFunc(isolate, webassembly, "Table", WebAssemblyTable);
@@ -1793,6 +2035,9 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
+ if (enabled_features.type_reflection) {
+ InstallFunc(isolate, table_constructor, "type", WebAssemblyTableGetType, 1);
+ }
JSObject::AddProperty(isolate, table_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Table"), ro_attributes);
@@ -1809,13 +2054,13 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+ if (enabled_features.type_reflection) {
+ InstallFunc(isolate, memory_constructor, "type", WebAssemblyMemoryGetType,
+ 1);
+ }
JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
- // The context is not set up completely yet. That's why we cannot use
- // {WasmFeaturesFromIsolate} and have to use {WasmFeaturesFromFlags} instead.
- auto enabled_features = i::wasm::WasmFeaturesFromFlags();
-
// Setup Global
Handle<JSFunction> global_constructor =
InstallConstructorFunc(isolate, webassembly, "Global", WebAssemblyGlobal);
@@ -1830,6 +2075,10 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
+ if (enabled_features.type_reflection) {
+ InstallFunc(isolate, global_constructor, "type", WebAssemblyGlobalGetType,
+ 1);
+ }
JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 78ea260f64..914b61244d 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -27,13 +27,20 @@ void AddAllocationStatusSample(Isolate* isolate,
static_cast<int>(status));
}
-size_t GetAllocationLength(uint32_t size, bool require_full_guard_regions) {
- if (require_full_guard_regions) {
- return RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize());
- } else {
- return RoundUp(
- base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
- kWasmPageSize);
+bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
+ bool* did_retry) {
+ // Try up to three times; getting rid of dead JSArrayBuffer allocations might
+ // require two GCs because the first GC maybe incremental and may have
+ // floating garbage.
+ static constexpr int kAllocationRetries = 2;
+
+ for (int trial = 0;; ++trial) {
+ if (fn()) return true;
+ // {fn} failed. If {kAllocationRetries} is reached, fail.
+ *did_retry = true;
+ if (trial == kAllocationRetries) return false;
+ // Otherwise, collect garbage and retry.
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
}
@@ -43,100 +50,80 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus;
#if V8_TARGET_ARCH_64_BIT
- bool require_full_guard_regions = true;
+ constexpr bool kRequireFullGuardRegions = true;
#else
- bool require_full_guard_regions = false;
+ constexpr bool kRequireFullGuardRegions = false;
#endif
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
- // Try up to three times; getting rid of dead JSArrayBuffer allocations might
- // require two GCs because the first GC maybe incremental and may have
- // floating garbage.
- static constexpr int kAllocationRetries = 2;
- // TODO(7881): do not use static_cast<uint32_t>() here
- uint32_t reservation_size =
- static_cast<uint32_t>((max_size > size) ? max_size : size);
- // TODO(8898): Cleanup the allocation retry flow
+ size_t reservation_size = std::max(max_size, size);
bool did_retry = false;
- for (int trial = 0;; ++trial) {
- // For guard regions, we always allocate the largest possible offset into
- // the heap, so the addressable memory after the guard page can be made
- // inaccessible.
+
+ auto reserve_memory_space = [&] {
+ // For guard regions, we always allocate the largest possible offset
+ // into the heap, so the addressable memory after the guard page can
+ // be made inaccessible.
//
- // To protect against 32-bit integer overflow issues, we also protect the
- // 2GiB before the valid part of the memory buffer.
+ // To protect against 32-bit integer overflow issues, we also
+ // protect the 2GiB before the valid part of the memory buffer.
*allocation_length =
- GetAllocationLength(reservation_size, require_full_guard_regions);
+ kRequireFullGuardRegions
+ ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
+ : RoundUp(base::bits::RoundUpToPowerOfTwo(reservation_size),
+ kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
- auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
- : WasmMemoryTracker::kHardLimit;
- if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
-
- did_retry = true;
- // After first and second GC: retry.
- if (trial == kAllocationRetries) {
- // Always reset reservation_size to initial size so that at least the
- // initial size can be allocated if maximum size reservation is not
- // possible.
- reservation_size = static_cast<uint32_t>(size);
-
- // If we fail to allocate guard regions and the fallback is enabled, then
- // retry without full guard regions.
- if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
- require_full_guard_regions = false;
- --trial; // one more try.
- continue;
- }
-
- // We are over the address space limit. Fail.
- //
- // When running under the correctness fuzzer (i.e.
- // --abort-on-stack-or-string-length-overflow is preset), we crash
- // instead so it is not incorrectly reported as a correctness
- // violation. See https://crbug.com/828293#c4
- if (FLAG_abort_on_stack_or_string_length_overflow) {
- FATAL("could not allocate wasm memory");
- }
- AddAllocationStatusSample(
- heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
- return nullptr;
+ return memory_tracker->ReserveAddressSpace(*allocation_length);
+ };
+ if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) {
+ // Reset reservation_size to initial size so that at least the initial size
+ // can be allocated if maximum size reservation is not possible.
+ reservation_size = size;
+
+ // We are over the address space limit. Fail.
+ //
+ // When running under the correctness fuzzer (i.e.
+ // --abort-on-stack-or-string-length-overflow is preset), we crash
+ // instead so it is not incorrectly reported as a correctness
+ // violation. See https://crbug.com/828293#c4
+ if (FLAG_abort_on_stack_or_string_length_overflow) {
+ FATAL("could not allocate wasm memory");
}
- // Collect garbage and retry.
- heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
+ AddAllocationStatusSample(
+ heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
+ return nullptr;
}
// The Reserve makes the whole region inaccessible by default.
DCHECK_NULL(*allocation_base);
- for (int trial = 0;; ++trial) {
+ auto allocate_pages = [&] {
*allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
kWasmPageSize, PageAllocator::kNoAccess);
- if (*allocation_base != nullptr) break;
- if (trial == kAllocationRetries) {
- memory_tracker->ReleaseReservation(*allocation_length);
- AddAllocationStatusSample(heap->isolate(),
- AllocationStatus::kOtherFailure);
- return nullptr;
- }
- heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
+ return *allocation_base != nullptr;
+ };
+ if (!RunWithGCAndRetry(allocate_pages, heap, &did_retry)) {
+ memory_tracker->ReleaseReservation(*allocation_length);
+ AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure);
+ return nullptr;
}
+
byte* memory = reinterpret_cast<byte*>(*allocation_base);
- if (require_full_guard_regions) {
+ if (kRequireFullGuardRegions) {
memory += kNegativeGuardSize;
}
// Make the part we care about accessible.
- if (size > 0) {
- bool result =
- SetPermissions(GetPlatformPageAllocator(), memory,
- RoundUp(size, kWasmPageSize), PageAllocator::kReadWrite);
- // SetPermissions commits the extra memory, which may put us over the
- // process memory limit. If so, report this as an OOM.
- if (!result) {
- V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
- }
+ auto commit_memory = [&] {
+ return size == 0 || SetPermissions(GetPlatformPageAllocator(), memory,
+ RoundUp(size, kWasmPageSize),
+ PageAllocator::kReadWrite);
+ };
+ // SetPermissions commits the extra memory, which may put us over the
+ // process memory limit. If so, report this as an OOM.
+ if (!RunWithGCAndRetry(commit_memory, heap, &did_retry)) {
+ V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
}
memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
@@ -150,14 +137,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
-constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
-constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
+constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
#elif V8_TARGET_ARCH_64_BIT
-constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L; // 384 GiB
-constexpr size_t kAddressSpaceHardLimit = 0x10100000000L; // 1 TiB + 4 GiB
+constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
-constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
-constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
+constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
} // namespace
@@ -167,6 +151,7 @@ WasmMemoryTracker::~WasmMemoryTracker() {
// is destroyed.
DCHECK_EQ(reserved_address_space_, 0u);
DCHECK_EQ(allocated_address_space_, 0u);
+ DCHECK(allocations_.empty());
}
void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
@@ -178,15 +163,14 @@ void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
void* buffer_start) {
- ReleaseAllocation(nullptr, buffer_start);
+ base::MutexGuard scope_lock(&mutex_);
+ ReleaseAllocation_Locked(nullptr, buffer_start);
CHECK(FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(memory.begin()), memory.size()));
}
-bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
- ReservationLimit limit) {
- size_t reservation_limit =
- limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
+bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
+ size_t reservation_limit = kAddressSpaceLimit;
while (true) {
size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
@@ -212,36 +196,30 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
base::MutexGuard scope_lock(&mutex_);
allocated_address_space_ += allocation_length;
- AddAddressSpaceSample(isolate);
+ // Report address space usage in MiB so the full range fits in an int on all
+ // platforms.
+ isolate->counters()->wasm_address_space_usage_mb()->AddSample(
+ static_cast<int>(allocated_address_space_ / MB));
allocations_.emplace(buffer_start,
AllocationData{allocation_base, allocation_length,
buffer_start, buffer_length});
}
-WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
+WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
Isolate* isolate, const void* buffer_start) {
- base::MutexGuard scope_lock(&mutex_);
-
auto find_result = allocations_.find(buffer_start);
CHECK_NE(find_result, allocations_.end());
- if (find_result != allocations_.end()) {
- size_t num_bytes = find_result->second.allocation_length;
- DCHECK_LE(num_bytes, reserved_address_space_);
- DCHECK_LE(num_bytes, allocated_address_space_);
- reserved_address_space_ -= num_bytes;
- allocated_address_space_ -= num_bytes;
- // ReleaseAllocation might be called with a nullptr as isolate if the
- // embedder is releasing the allocation and not a specific isolate. This
- // happens if the allocation was shared between multiple isolates (threads).
- if (isolate) AddAddressSpaceSample(isolate);
-
- AllocationData allocation_data = find_result->second;
- allocations_.erase(find_result);
- return allocation_data;
- }
- UNREACHABLE();
+ size_t num_bytes = find_result->second.allocation_length;
+ DCHECK_LE(num_bytes, reserved_address_space_);
+ DCHECK_LE(num_bytes, allocated_address_space_);
+ reserved_address_space_ -= num_bytes;
+ allocated_address_space_ -= num_bytes;
+
+ AllocationData allocation_data = find_result->second;
+ allocations_.erase(find_result);
+ return allocation_data;
}
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
@@ -259,49 +237,325 @@ bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
return allocations_.find(buffer_start) != allocations_.end();
}
-bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
+bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
- const auto allocation = allocations_.find(buffer_start);
+ const auto& result = allocations_.find(buffer_start);
+ // Should be a wasm allocation, and registered as a shared allocation.
+ return (result != allocations_.end() && result->second.is_shared);
+}
- if (allocation == allocations_.end()) {
- return false;
- }
+void WasmMemoryTracker::MarkWasmMemoryNotGrowable(
+ Handle<JSArrayBuffer> buffer) {
+ base::MutexGuard scope_lock(&mutex_);
+ const auto& allocation = allocations_.find(buffer->backing_store());
+ if (allocation == allocations_.end()) return;
+ allocation->second.is_growable = false;
+}
- Address start = reinterpret_cast<Address>(buffer_start);
- Address limit =
- reinterpret_cast<Address>(allocation->second.allocation_base) +
- allocation->second.allocation_length;
- return start + kWasmMaxHeapOffset < limit;
+bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer) {
+ base::MutexGuard scope_lock(&mutex_);
+ if (buffer->backing_store() == nullptr) return true;
+ const auto& allocation = allocations_.find(buffer->backing_store());
+ if (allocation == allocations_.end()) return false;
+ return allocation->second.is_growable;
}
bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
const void* buffer_start) {
- if (IsWasmMemory(buffer_start)) {
- const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
+ base::MutexGuard scope_lock(&mutex_);
+ const auto& result = allocations_.find(buffer_start);
+ if (result == allocations_.end()) return false;
+ if (result->second.is_shared) {
+ // This is a shared WebAssembly.Memory allocation
+ FreeMemoryIfNotShared_Locked(isolate, buffer_start);
+ return true;
+ }
+ // This is a WebAssembly.Memory allocation
+ const AllocationData allocation =
+ ReleaseAllocation_Locked(isolate, buffer_start);
+ CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
+ allocation.allocation_length));
+ return true;
+}
+
+void WasmMemoryTracker::RegisterWasmMemoryAsShared(
+ Handle<WasmMemoryObject> object, Isolate* isolate) {
+ const void* backing_store = object->array_buffer()->backing_store();
+ // TODO(V8:8810): This should be a DCHECK, currently some tests do not
+ // use a full WebAssembly.Memory, and fail on registering so return early.
+ if (!IsWasmMemory(backing_store)) return;
+ {
+ base::MutexGuard scope_lock(&mutex_);
+ // Register as shared allocation when it is post messaged. This happens only
+ // the first time a buffer is shared over Postmessage, and track all the
+ // memory objects that are associated with this backing store.
+ RegisterSharedWasmMemory_Locked(object, isolate);
+ // Add isolate to backing store mapping.
+ isolates_per_buffer_[backing_store].emplace(isolate);
+ }
+}
+
+void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
+ size_t new_size) {
+ base::MutexGuard scope_lock(&mutex_);
+ // Keep track of the new size of the buffer associated with each backing
+ // store.
+ AddBufferToGrowMap_Locked(old_buffer, new_size);
+ // Request interrupt to GROW_SHARED_MEMORY to other isolates
+ TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
+}
+
+void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
+ base::MutexGuard scope_lock(&mutex_);
+ // For every buffer in the grow_entry_map_, update the size for all the
+ // memory objects associated with this isolate.
+ for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
+ UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
+ // If all the isolates that share this buffer have hit a stack check, their
+ // memory objects are updated, and this grow entry can be erased.
+ if (AreAllIsolatesUpdated_Locked(it->first)) {
+ it = grow_update_map_.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
+ Handle<WasmMemoryObject> object, Isolate* isolate) {
+ DCHECK(object->array_buffer()->is_shared());
+
+ void* backing_store = object->array_buffer()->backing_store();
+ // The allocation of a WasmMemoryObject should always be registered with the
+ // WasmMemoryTracker.
+ const auto& result = allocations_.find(backing_store);
+ if (result == allocations_.end()) return;
+
+ // Register the allocation as shared, if not alreadt marked as shared.
+ if (!result->second.is_shared) result->second.is_shared = true;
+
+ // Create persistent global handles for the memory objects that are shared
+ GlobalHandles* global_handles = isolate->global_handles();
+ object = global_handles->Create(*object);
+
+ // Add to memory_object_vector to track memory objects, instance objects
+ // that will need to be updated on a Grow call
+ result->second.memory_object_vector.push_back(
+ SharedMemoryObjectState(object, isolate));
+}
+
+void WasmMemoryTracker::AddBufferToGrowMap_Locked(
+ Handle<JSArrayBuffer> old_buffer, size_t new_size) {
+ void* backing_store = old_buffer->backing_store();
+ auto entry = grow_update_map_.find(old_buffer->backing_store());
+ if (entry == grow_update_map_.end()) {
+ // No pending grow for this backing store, add to map.
+ grow_update_map_.emplace(backing_store, new_size);
+ return;
+ }
+ // If grow on the same buffer is requested before the update is complete,
+ // the new_size should always be greater or equal to the old_size. Equal
+ // in the case that grow(0) is called, but new buffer handles are mandated
+ // by the Spec.
+ CHECK_LE(entry->second, new_size);
+ entry->second = new_size;
+ // Flush instances_updated everytime a new grow size needs to be updates
+ ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
+}
+
+void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
+ Handle<JSArrayBuffer> old_buffer) {
+ // Request a GrowShareMemory interrupt on all the isolates that share
+ // the backing store.
+ const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
+ for (const auto& isolate : isolates->second) {
+ isolate->stack_guard()->RequestGrowSharedMemory();
+ }
+}
+
+void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
+ Isolate* isolate, void* backing_store, size_t new_size) {
+ // Update objects only if there are memory objects that share this backing
+ // store, and this isolate is marked as one of the isolates that shares this
+ // buffer.
+ if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
+ UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
+ // As the memory objects are updated, add this isolate to a set of isolates
+ // that are updated on grow. This state is maintained to track if all the
+ // isolates that share the backing store have hit a StackCheck.
+ isolates_updated_on_grow_[backing_store].emplace(isolate);
+ }
+}
+
+bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
+ const void* backing_store) {
+ const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
+ // No isolates share this buffer.
+ if (buffer_isolates == isolates_per_buffer_.end()) return true;
+ const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
+ // Some isolates share the buffer, but no isolates have been updated yet.
+ if (updated_isolates == isolates_updated_on_grow_.end()) return false;
+ if (buffer_isolates->second == updated_isolates->second) {
+ // If all the isolates that share this backing_store have hit a stack check,
+ // and the memory objects have been updated, remove the entry from the
+ // updatemap, and return true.
+ isolates_updated_on_grow_.erase(backing_store);
+ return true;
+ }
+ return false;
+}
+
+void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
+ const void* backing_store) {
+ // On multiple grows to the same buffer, the entries for that buffer should be
+ // flushed. This is done so that any consecutive grows to the same buffer will
+ // update all instances that share this buffer.
+ const auto& value = isolates_updated_on_grow_.find(backing_store);
+ if (value != isolates_updated_on_grow_.end()) {
+ value->second.clear();
+ }
+}
+
+void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
+ Isolate* isolate, void* backing_store, size_t new_size) {
+ const auto& result = allocations_.find(backing_store);
+ if (result == allocations_.end() || !result->second.is_shared) return;
+ for (const auto& memory_obj_state : result->second.memory_object_vector) {
+ DCHECK_NE(memory_obj_state.isolate, nullptr);
+ if (isolate == memory_obj_state.isolate) {
+ HandleScope scope(isolate);
+ Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
+ DCHECK(memory_object->IsWasmMemoryObject());
+ DCHECK(memory_object->array_buffer()->is_shared());
+ // Permissions adjusted, but create a new buffer with new size
+ // and old attributes. Buffer has already been allocated,
+ // just create a new buffer with same backing store.
+ bool is_external = memory_object->array_buffer()->is_external();
+ Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
+ isolate, backing_store, new_size, is_external, SharedFlag::kShared);
+ memory_obj_state.memory_object->update_instances(isolate, new_buffer);
+ }
+ }
+}
+
+bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
+ Isolate* isolate, const void* backing_store) {
+ // Return true if this buffer has memory_objects it needs to update.
+ const auto& result = allocations_.find(backing_store);
+ if (result == allocations_.end() || !result->second.is_shared) return false;
+ // Only update if the buffer has memory objects that need to be updated.
+ if (result->second.memory_object_vector.empty()) return false;
+ const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
+ return (isolate_entry != isolates_per_buffer_.end() &&
+ isolate_entry->second.count(isolate) != 0);
+}
+
+void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
+ Isolate* isolate, const void* backing_store) {
+ RemoveSharedBufferState_Locked(isolate, backing_store);
+ if (CanFreeSharedMemory_Locked(backing_store)) {
+ const AllocationData allocation =
+ ReleaseAllocation_Locked(isolate, backing_store);
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.allocation_length));
+ }
+}
+
+bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
+ const auto& value = isolates_per_buffer_.find(backing_store);
+ // If no isolates share this buffer, backing store can be freed.
+ // Erase the buffer entry.
+ if (value == isolates_per_buffer_.end()) return true;
+ if (value->second.empty()) {
+ // If no isolates share this buffer, the global handles to memory objects
+ // associated with this buffer should have been destroyed.
+ // DCHECK(shared_memory_map_.find(backing_store) ==
+ // shared_memory_map_.end());
return true;
}
return false;
}
-void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
- // Report address space usage in MiB so the full range fits in an int on all
- // platforms.
- isolate->counters()->wasm_address_space_usage_mb()->AddSample(
- static_cast<int>(allocated_address_space_ >> 20));
+void WasmMemoryTracker::RemoveSharedBufferState_Locked(
+ Isolate* isolate, const void* backing_store) {
+ if (isolate != nullptr) {
+ DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
+ RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
+ } else {
+ // This happens for externalized contents cleanup shared memory state
+ // associated with this buffer across isolates.
+ DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
+ }
+}
+
+void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
+ const void* backing_store) {
+ const auto& result = allocations_.find(backing_store);
+ CHECK(result != allocations_.end() && result->second.is_shared);
+ auto& object_vector = result->second.memory_object_vector;
+ if (object_vector.empty()) return;
+ for (const auto& mem_obj_state : object_vector) {
+ GlobalHandles::Destroy(mem_obj_state.memory_object.location());
+ }
+ object_vector.clear();
+ // Remove isolate from backing store map.
+ isolates_per_buffer_.erase(backing_store);
+}
+
+void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
+ Isolate* isolate, const void* backing_store) {
+ // This gets called when an internal handle to the ArrayBuffer should be
+ // freed, on heap tear down for that isolate, remove the memory objects
+ // that are associated with this buffer and isolate.
+ const auto& result = allocations_.find(backing_store);
+ CHECK(result != allocations_.end() && result->second.is_shared);
+ auto& object_vector = result->second.memory_object_vector;
+ if (object_vector.empty()) return;
+ for (auto it = object_vector.begin(); it != object_vector.end();) {
+ if (isolate == it->isolate) {
+ GlobalHandles::Destroy(it->memory_object.location());
+ it = object_vector.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
+ Isolate* isolate, const void* backing_store) {
+ const auto& isolates = isolates_per_buffer_.find(backing_store);
+ if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
+ return;
+ isolates->second.erase(isolate);
+}
+
+void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
+ base::MutexGuard scope_lock(&mutex_);
+ // This is possible for buffers that are externalized, and their handles have
+ // been freed, the backing store wasn't released because externalized contents
+ // were using it.
+ if (isolates_per_buffer_.empty()) return;
+ for (auto& entry : isolates_per_buffer_) {
+ if (entry.second.find(isolate) == entry.second.end()) continue;
+ const void* backing_store = entry.first;
+ entry.second.erase(isolate);
+ DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
+ }
+ for (auto& buffer_isolates : isolates_updated_on_grow_) {
+ auto& isolates = buffer_isolates.second;
+ isolates.erase(isolate);
+ }
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
size_t size, bool is_external,
SharedFlag shared) {
Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(shared, TENURED);
+ isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld);
constexpr bool is_wasm_memory = true;
JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
shared, is_wasm_memory);
buffer->set_is_detachable(false);
- buffer->set_is_growable(true);
return buffer;
}
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 09832146b7..8cda54eaf9 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -7,6 +7,7 @@
#include <atomic>
#include <unordered_map>
+#include <unordered_set>
#include "src/base/platform/mutex.h"
#include "src/flags.h"
@@ -30,20 +31,36 @@ class WasmMemoryTracker {
// ReserveAddressSpace attempts to increase the reserved address space counter
// by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// and reserve {num_bytes} bytes), false otherwise.
- // Use {kSoftLimit} if you can implement a fallback which needs less reserved
- // memory.
- enum ReservationLimit { kSoftLimit, kHardLimit };
- bool ReserveAddressSpace(size_t num_bytes, ReservationLimit limit);
+ bool ReserveAddressSpace(size_t num_bytes);
void RegisterAllocation(Isolate* isolate, void* allocation_base,
size_t allocation_length, void* buffer_start,
size_t buffer_length);
+ struct SharedMemoryObjectState {
+ Handle<WasmMemoryObject> memory_object;
+ Isolate* isolate;
+
+ SharedMemoryObjectState() = default;
+ SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object,
+ Isolate* isolate)
+ : memory_object(memory_object), isolate(isolate) {}
+ };
+
struct AllocationData {
void* allocation_base = nullptr;
size_t allocation_length = 0;
void* buffer_start = nullptr;
size_t buffer_length = 0;
+ bool is_shared = false;
+ // Wasm memories are growable by default, this will be false only when
+ // shared with an asmjs module.
+ bool is_growable = true;
+
+ // Track Wasm Memory instances across isolates, this is populated on
+ // PostMessage using persistent handles for memory objects.
+ std::vector<WasmMemoryTracker::SharedMemoryObjectState>
+ memory_object_vector;
private:
AllocationData() = default;
@@ -81,24 +98,44 @@ class WasmMemoryTracker {
// Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes);
- // Removes an allocation from the tracker.
- AllocationData ReleaseAllocation(Isolate* isolate, const void* buffer_start);
-
- bool IsWasmMemory(const void* buffer_start);
+ V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start);
- // Returns whether the given buffer is a Wasm memory with guard regions large
- // enough to safely use trap handlers.
- bool HasFullGuardRegions(const void* buffer_start);
+ bool IsWasmSharedMemory(const void* buffer_start);
// Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
// buffer is not tracked.
- const AllocationData* FindAllocationData(const void* buffer_start);
+ V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
+ const void* buffer_start);
// Checks if a buffer points to a Wasm memory and if so does any necessary
// work to reclaim the buffer. If this function returns false, the caller must
// free the buffer manually.
bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start);
+ void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer);
+
+ bool IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer);
+
+ // When WebAssembly.Memory is transferred over PostMessage, register the
+ // allocation as shared and track the memory objects that will need
+ // updating if memory is resized.
+ void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object,
+ Isolate* isolate);
+
+ // This method is called when the underlying backing store is grown, but
+ // instances that share the backing_store have not yet been updated.
+ void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
+ size_t new_size);
+
+ // Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects
+ // and instances that share the memory objects after a Grow call.
+ void UpdateSharedMemoryInstances(Isolate* isolate);
+
+ // Due to timing of when buffers are garbage collected, vs. when isolate
+ // object handles are destroyed, it is possible to leak global handles. To
+ // avoid this, cleanup any global handles on isolate destruction if any exist.
+ void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate);
+
// Allocation results are reported to UMA
//
// See wasm_memory_allocation_result in counters.h
@@ -114,7 +151,68 @@ class WasmMemoryTracker {
};
private:
- void AddAddressSpaceSample(Isolate* isolate);
+ // Helper methods to free memory only if not shared by other isolates, memory
+ // objects.
+ void FreeMemoryIfNotShared_Locked(Isolate* isolate,
+ const void* backing_store);
+ bool CanFreeSharedMemory_Locked(const void* backing_store);
+ void RemoveSharedBufferState_Locked(Isolate* isolate,
+ const void* backing_store);
+
+ // Registers the allocation as shared, and tracks all the memory objects
+ // associates with this allocation across isolates.
+ void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object,
+ Isolate* isolate);
+
+ // Map the new size after grow to the buffer backing store, so that instances
+ // and memory objects that share the WebAssembly.Memory across isolates can
+ // be updated..
+ void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer,
+ size_t new_size);
+
+ // Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory
+ // objects that share this buffer.
+ void TriggerSharedGrowInterruptOnAllIsolates_Locked(
+ Handle<JSArrayBuffer> old_buffer);
+
+ // When isolates hit a stack check, update the memory objects associated with
+ // that isolate.
+ void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate,
+ void* backing_store,
+ size_t new_size);
+
+ // Check if all the isolates that share a backing_store have hit a stack
+ // check. If a stack check is hit, and the backing store is pending grow,
+ // this isolate will have updated memory objects.
+ bool AreAllIsolatesUpdated_Locked(const void* backing_store);
+
+ // If a grow call is made to a buffer with a pending grow, and all the
+ // isolates that share this buffer have not hit a StackCheck, clear the set of
+ // already updated instances so they can be updated with the new size on the
+ // most recent grow call.
+ void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store);
+
+ // Helper functions to update memory objects on grow, and maintain state for
+ // which isolates hit a stack check.
+ void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate,
+ void* backing_store,
+ size_t new_size);
+ bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate,
+ const void* backing_store);
+
+ // Destroy global handles to memory objects, and remove backing store from
+ // isolates_per_buffer on Free.
+ void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
+ Isolate* isolate, const void* backing_store);
+ void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
+ const void* backing_store);
+
+ void RemoveIsolateFromBackingStore_Locked(Isolate* isolate,
+ const void* backing_store);
+
+ // Removes an allocation from the tracker.
+ AllocationData ReleaseAllocation_Locked(Isolate* isolate,
+ const void* buffer_start);
// Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether
@@ -132,31 +230,59 @@ class WasmMemoryTracker {
size_t allocated_address_space_ = 0;
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {mutex_}:
+
// Track Wasm memory allocation information. This is keyed by the start of the
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
+ // Maps each buffer to the isolates that share the backing store.
+ std::unordered_map<const void*, std::unordered_set<Isolate*>>
+ isolates_per_buffer_;
+
+ // Maps which isolates have had a grow interrupt handled on the buffer. This
+ // is maintained to ensure that the instances are updated with the right size
+ // on Grow.
+ std::unordered_map<const void*, std::unordered_set<Isolate*>>
+ isolates_updated_on_grow_;
+
+ // Maps backing stores(void*) to the size of the underlying memory in
+ // (size_t). An entry to this map is made on a grow call to the corresponding
+ // backing store. On consecutive grow calls to the same backing store,
+ // the size entry is updated. This entry is made right after the mprotect
+ // call to change the protections on a backing_store, so the memory objects
+ // have not been updated yet. The backing store entry in this map is erased
+ // when all the memory objects, or instances that share this backing store
+ // have their bounds updated.
+ std::unordered_map<void*, size_t> grow_update_map_;
+
+ // End of fields protected by {mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
+
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
// Attempts to allocate an array buffer with guard regions suitable for trap
// handling. If address space is not available, it will return a buffer with
// mini-guards that will require bounds checks.
-MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*, size_t size);
+V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*,
+ size_t size);
// Attempts to allocate a SharedArrayBuffer with guard regions suitable for
// trap handling. If address space is not available, it will try to reserve
// up to the maximum for that memory. If all else fails, it will return a
// buffer with mini-guards of initial size.
-MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate*, size_t initial_size,
- size_t max_size);
+V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(
+ Isolate*, size_t initial_size, size_t max_size);
Handle<JSArrayBuffer> SetupArrayBuffer(
Isolate*, void* backing_store, size_t size, bool is_external,
SharedFlag shared = SharedFlag::kNotShared);
-void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
- bool free_memory);
+V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer,
+ bool free_memory);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 15a4b0bbf1..20a33f2cb9 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -363,7 +363,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(functions_.size());
for (auto function : functions_) {
function->WriteSignature(buffer);
- if (!function->name_.is_empty()) ++num_function_names;
+ if (!function->name_.empty()) ++num_function_names;
}
FixupSection(buffer, start);
}
@@ -544,7 +544,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
uint32_t function_index = 0;
for (; function_index < num_imports; ++function_index) {
const WasmFunctionImport* import = &function_imports_[function_index];
- DCHECK(!import->name.is_empty());
+ DCHECK(!import->name.empty());
buffer.write_u32v(function_index);
buffer.write_string(import->name);
}
@@ -552,7 +552,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
for (auto function : functions_) {
DCHECK_EQ(function_index,
function->func_index() + function_imports_.size());
- if (!function->name_.is_empty()) {
+ if (!function->name_.empty()) {
buffer.write_u32v(function_index);
buffer.write_string(function->name_);
}
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 3502a03272..53fe290d55 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -27,6 +27,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+// static
+const uint32_t WasmElemSegment::kNullIndex;
+
WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes,
uint32_t function_index) const {
if (!function_names) {
@@ -63,7 +66,7 @@ WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
- if (!name.name_.is_empty()) {
+ if (!name.name_.empty()) {
if (name.name_.start()) {
os << ":";
os.write(name.name_.start(), name.name_.length());
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index bd507e5c15..c4f171ecf8 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -72,7 +72,7 @@ struct WasmGlobal {
// Note: An exception signature only uses the params portion of a
// function signature.
-typedef FunctionSig WasmExceptionSig;
+using WasmExceptionSig = FunctionSig;
// Static representation of a wasm exception type.
struct WasmException {
@@ -103,8 +103,6 @@ struct WasmTable {
uint32_t initial_size = 0; // initial table size.
uint32_t maximum_size = 0; // maximum table size.
bool has_maximum_size = false; // true if there is a maximum size.
- // TODO(titzer): Move this to WasmInstance. Needed by interpreter only.
- std::vector<int32_t> values; // function table, -1 indicating invalid.
bool imported = false; // true if imported.
bool exported = false; // true if exported.
};
@@ -122,7 +120,7 @@ struct WasmElemSegment {
// Used in the {entries} vector to represent a `ref.null` entry in a passive
// segment.
- static const uint32_t kNullIndex = ~0u;
+ V8_EXPORT_PRIVATE static const uint32_t kNullIndex = ~0u;
uint32_t table_index;
WasmInitExpr offset;
@@ -132,17 +130,37 @@ struct WasmElemSegment {
// Static representation of a wasm import.
struct WasmImport {
- WireBytesRef module_name; // module name.
- WireBytesRef field_name; // import name.
+ WireBytesRef module_name; // module name.
+ WireBytesRef field_name; // import name.
ImportExportKindCode kind; // kind of the import.
- uint32_t index; // index into the respective space.
+ uint32_t index; // index into the respective space.
};
// Static representation of a wasm export.
struct WasmExport {
- WireBytesRef name; // exported name.
+ WireBytesRef name; // exported name.
ImportExportKindCode kind; // kind of the export.
- uint32_t index; // index into the respective space.
+ uint32_t index; // index into the respective space.
+};
+
+enum class WasmCompilationHintStrategy : uint8_t {
+ kDefault = 0,
+ kLazy = 1,
+ kEager = 2,
+};
+
+enum class WasmCompilationHintTier : uint8_t {
+ kDefault = 0,
+ kInterpreter = 1,
+ kBaseline = 2,
+ kOptimized = 3,
+};
+
+// Static representation of a wasm compilation hint
+struct WasmCompilationHint {
+ WasmCompilationHintStrategy strategy;
+ WasmCompilationHintTier baseline_tier;
+ WasmCompilationHintTier top_tier;
};
enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
@@ -177,6 +195,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
+ uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
@@ -187,6 +206,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmExport> export_table;
std::vector<WasmException> exceptions;
std::vector<WasmElemSegment> elem_segments;
+ std::vector<WasmCompilationHint> compilation_hints;
SignatureMap signature_map; // canonicalizing map for signature indexes.
ModuleOrigin origin = kWasmOrigin; // origin of the module
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index b5180804cf..c1f9e7876a 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -97,8 +97,8 @@ OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
wasm::NativeModule* WasmModuleObject::native_module() const {
return managed_native_module()->raw();
}
-std::shared_ptr<wasm::NativeModule> WasmModuleObject::shared_native_module()
- const {
+const std::shared_ptr<wasm::NativeModule>&
+WasmModuleObject::shared_native_module() const {
return managed_native_module()->get();
}
const wasm::WasmModule* WasmModuleObject::module() const {
@@ -120,6 +120,7 @@ bool WasmModuleObject::is_asm_js() {
ACCESSORS(WasmTableObject, elements, FixedArray, kElementsOffset)
ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
+SMI_ACCESSORS(WasmTableObject, raw_type, kRawTypeOffset)
// WasmMemoryObject
ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
@@ -162,8 +163,9 @@ double WasmGlobalObject::GetF64() {
return ReadLittleEndianValue<double>(address());
}
-Handle<Object> WasmGlobalObject::GetAnyRef() {
- DCHECK_EQ(type(), wasm::kWasmAnyRef);
+Handle<Object> WasmGlobalObject::GetRef() {
+ // We use this getter for anyref, anyfunc, and except_ref.
+ DCHECK(wasm::ValueTypes::IsReferenceType(type()));
return handle(tagged_buffer()->get(offset()), GetIsolate());
}
@@ -184,10 +186,21 @@ void WasmGlobalObject::SetF64(double value) {
}
void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
- DCHECK_EQ(type(), wasm::kWasmAnyRef);
+ // We use this getter anyref and except_ref.
+ DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExceptRef);
tagged_buffer()->set(offset(), *value);
}
+bool WasmGlobalObject::SetAnyFunc(Isolate* isolate, Handle<Object> value) {
+ DCHECK_EQ(type(), wasm::kWasmAnyFunc);
+ if (!value->IsNull(isolate) &&
+ !WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ return false;
+ }
+ tagged_buffer()->set(offset(), *value);
+ return true;
+}
+
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
@@ -235,8 +248,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
FixedArray, kImportedMutableGlobalsBuffersOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
- kTableObjectOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
kImportedFunctionRefsOffset)
@@ -315,6 +326,10 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
uint32_t WasmTableObject::current_length() { return elements()->length(); }
+wasm::ValueType WasmTableObject::type() {
+ return static_cast<wasm::ValueType>(raw_type());
+}
+
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
// WasmExceptionTag
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 80e4f0f110..950fc8bc45 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -21,6 +21,7 @@
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/module-instantiate.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -232,8 +233,8 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Handle<Script> script, size_t code_size_estimate) {
const WasmModule* module = native_module->module();
int export_wrapper_size = static_cast<int>(module->num_exported_functions);
- Handle<FixedArray> export_wrappers =
- isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
+ Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(
+ export_wrapper_size, AllocationType::kOld);
return New(isolate, std::move(native_module), script, export_wrappers,
code_size_estimate);
}
@@ -345,7 +346,8 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
if (module_object->has_breakpoint_infos()) {
breakpoint_infos = handle(module_object->breakpoint_infos(), isolate);
} else {
- breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
+ breakpoint_infos =
+ isolate->factory()->NewFixedArray(4, AllocationType::kOld);
module_object->set_breakpoint_infos(*breakpoint_infos);
}
@@ -369,7 +371,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
Handle<FixedArray> new_breakpoint_infos = breakpoint_infos;
if (need_realloc) {
new_breakpoint_infos = isolate->factory()->NewFixedArray(
- 2 * breakpoint_infos->length(), TENURED);
+ 2 * breakpoint_infos->length(), AllocationType::kOld);
module_object->set_breakpoint_infos(*new_breakpoint_infos);
// Copy over the entries [0, insert_pos).
for (int i = 0; i < insert_pos; ++i)
@@ -472,7 +474,7 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
int total_size = 1 + num_entries * kOTESize * kIntSize;
Handle<ByteArray> decoded_table =
- isolate->factory()->NewByteArray(total_size, TENURED);
+ isolate->factory()->NewByteArray(total_size, AllocationType::kOld);
decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
module_object->set_asm_js_offset_table(*decoded_table);
@@ -776,26 +778,35 @@ bool WasmModuleObject::GetPositionInfo(uint32_t position,
return true;
}
-Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
+Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
+ wasm::ValueType type,
+ uint32_t initial, bool has_maximum,
uint32_t maximum,
- Handle<FixedArray>* js_functions) {
- Handle<JSFunction> table_ctor(
- isolate->native_context()->wasm_table_constructor(), isolate);
- auto table_obj = Handle<WasmTableObject>::cast(
- isolate->factory()->NewJSObject(table_ctor));
-
+ Handle<FixedArray>* elements) {
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(initial);
Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
backing_store->set(i, null);
}
+
+ Handle<JSFunction> table_ctor(
+ isolate->native_context()->wasm_table_constructor(), isolate);
+ auto table_obj = Handle<WasmTableObject>::cast(
+ isolate->factory()->NewJSObject(table_ctor));
+
+ table_obj->set_raw_type(static_cast<int>(type));
table_obj->set_elements(*backing_store);
- Handle<Object> max = isolate->factory()->NewNumberFromUint(maximum);
+ Handle<Object> max;
+ if (has_maximum) {
+ max = isolate->factory()->NewNumberFromUint(maximum);
+ } else {
+ max = isolate->factory()->undefined_value();
+ }
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
- if (js_functions != nullptr) {
- *js_functions = backing_store;
+ if (elements != nullptr) {
+ *elements = backing_store;
}
return Handle<WasmTableObject>::cast(table_obj);
}
@@ -848,30 +859,121 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
}
}
+bool WasmTableObject::IsInBounds(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ uint32_t entry_index) {
+ return (entry_index <
+ static_cast<uint32_t>(std::numeric_limits<int>::max()) &&
+ static_cast<int>(entry_index) < table->elements()->length());
+}
+
+bool WasmTableObject::IsValidElement(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ Handle<Object> element) {
+ // Anyref tables take everything.
+ if (table->type() == wasm::kWasmAnyRef) return true;
+ // Anyfunc tables can store {null} or {WasmExportedFunction} objects.
+ if (element->IsNull(isolate)) return true;
+ return WasmExportedFunction::IsWasmExportedFunction(*element);
+}
+
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t table_index, Handle<JSFunction> function) {
- Handle<FixedArray> array(table->elements(), isolate);
- if (function.is_null()) {
- ClearDispatchTables(isolate, table, table_index); // Degenerate case.
- array->set(table_index, ReadOnlyRoots(isolate).null_value());
+ uint32_t index, Handle<Object> element) {
+ // Callers need to perform bounds checks, type check, and error handling.
+ DCHECK(IsInBounds(isolate, table, index));
+ DCHECK(IsValidElement(isolate, table, element));
+
+ Handle<FixedArray> elements(table->elements(), isolate);
+ // The FixedArray is addressed with int's.
+ int entry_index = static_cast<int>(index);
+ if (table->type() == wasm::kWasmAnyRef) {
+ elements->set(entry_index, *element);
+ return;
+ }
+
+ if (element->IsNull(isolate)) {
+ ClearDispatchTables(isolate, table, entry_index); // Degenerate case.
+ elements->set(entry_index, ReadOnlyRoots(isolate).null_value());
return;
}
- auto exported_function = Handle<WasmExportedFunction>::cast(function);
+ DCHECK(WasmExportedFunction::IsWasmExportedFunction(*element));
+ auto exported_function = Handle<WasmExportedFunction>::cast(element);
Handle<WasmInstanceObject> target_instance(exported_function->instance(),
isolate);
int func_index = exported_function->function_index();
auto* wasm_function = &target_instance->module()->functions[func_index];
DCHECK_NOT_NULL(wasm_function);
DCHECK_NOT_NULL(wasm_function->sig);
- UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
+ UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
handle(exported_function->instance(), isolate),
func_index);
- array->set(table_index, *function);
+ elements->set(entry_index, *element);
+}
+
+Handle<Object> WasmTableObject::Get(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ uint32_t index) {
+ Handle<FixedArray> elements(table->elements(), isolate);
+ // Callers need to perform bounds checks and error handling.
+ DCHECK(IsInBounds(isolate, table, index));
+
+ // The FixedArray is addressed with int's.
+ int entry_index = static_cast<int>(index);
+
+ Handle<Object> element(elements->get(entry_index), isolate);
+
+ // First we handle the easy anyref table case.
+ if (table->type() == wasm::kWasmAnyRef) return element;
+
+ // Now we handle the anyfunc case.
+ if (WasmExportedFunction::IsWasmExportedFunction(*element)) {
+ return element;
+ }
+
+ if (element->IsNull(isolate)) {
+ return element;
+ }
+
+ // {element} is not a valid entry in the table. It has to be a placeholder
+ // for lazy initialization.
+ Handle<Tuple2> tuple = Handle<Tuple2>::cast(element);
+ auto instance = handle(WasmInstanceObject::cast(tuple->value1()), isolate);
+ int function_index = Smi::cast(tuple->value2()).value();
+
+ // Check if we already compiled a wrapper for the function but did not store
+ // it in the table slot yet.
+ MaybeHandle<Object> maybe_element =
+ WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+ function_index);
+ if (maybe_element.ToHandle(&element)) {
+ elements->set(entry_index, *element);
+ return element;
+ }
+
+ const WasmModule* module = instance->module_object()->module();
+ const WasmFunction& function = module->functions[function_index];
+ // Exported functions got their wrapper compiled during instantiation.
+ CHECK(!function.exported);
+ Handle<Code> wrapper_code =
+ compiler::CompileJSToWasmWrapper(isolate, function.sig, function.imported)
+ .ToHandleChecked();
+
+ MaybeHandle<String> function_name = WasmModuleObject::GetFunctionNameOrNull(
+ isolate, handle(instance->module_object(), isolate), function_index);
+
+ Handle<WasmExportedFunction> result = WasmExportedFunction::New(
+ isolate, instance, function_name, function_index,
+ static_cast<int>(function.sig->parameter_count()), wrapper_code);
+
+ elements->set(entry_index, *result);
+ WasmInstanceObject::SetWasmExportedFunction(isolate, instance, function_index,
+ result);
+ return result;
}
void WasmTableObject::UpdateDispatchTables(
- Isolate* isolate, Handle<WasmTableObject> table, int table_index,
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
wasm::FunctionSig* sig, Handle<WasmInstanceObject> target_instance,
int target_func_index) {
// We simply need to update the IFTs for each instance that imports
@@ -881,6 +983,13 @@ void WasmTableObject::UpdateDispatchTables(
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset))->value();
+ if (table_index > 0) {
+ // Only table 0 has a dispatch table in the instance at the moment.
+ // TODO(ahaas): Introduce dispatch tables for the other tables as well.
+ continue;
+ }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -888,7 +997,7 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(*sig);
- IndirectFunctionTableEntry(instance, table_index)
+ IndirectFunctionTableEntry(instance, entry_index)
.Set(sig_id, target_instance, target_func_index);
}
}
@@ -909,13 +1018,69 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
}
}
+void WasmTableObject::SetFunctionTablePlaceholder(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ Handle<WasmInstanceObject> instance, int func_index) {
+ // Put (instance, func_index) as a Tuple2 into the table_index.
+ // The {WasmExportedFunction} will be created lazily.
+ Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
+ instance, Handle<Smi>(Smi::FromInt(func_index), isolate),
+ AllocationType::kYoung);
+ table->elements()->set(entry_index, *tuple);
+}
+
+void WasmTableObject::GetFunctionTableEntry(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
+ int* function_index) {
+ DCHECK_EQ(table->type(), wasm::kWasmAnyFunc);
+ DCHECK_LT(entry_index, table->elements()->length());
+ // We initialize {is_valid} with {true}. We may change it later.
+ *is_valid = true;
+ Handle<Object> element(table->elements()->get(entry_index), isolate);
+
+ *is_null = element->IsNull(isolate);
+ if (*is_null) return;
+
+ if (WasmExportedFunction::IsWasmExportedFunction(*element)) {
+ auto target_func = Handle<WasmExportedFunction>::cast(element);
+ *instance = handle(target_func->instance(), isolate);
+ *function_index = target_func->function_index();
+ return;
+ } else if (element->IsTuple2()) {
+ auto tuple = Handle<Tuple2>::cast(element);
+ *instance = handle(WasmInstanceObject::cast(tuple->value1()), isolate);
+ *function_index = Smi::cast(tuple->value2()).value();
+ return;
+ }
+ *is_valid = false;
+}
+
namespace {
+bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
+ size_t new_size) {
+ if (new_size > old_buffer->allocation_length()) return false;
+ void* old_mem_start = old_buffer->backing_store();
+ size_t old_size = old_buffer->byte_length();
+ if (old_size != new_size) {
+ DCHECK_NOT_NULL(old_mem_start);
+ DCHECK_GE(new_size, old_size);
+ // If adjusting permissions fails, propagate error back to return
+ // failure to grow.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size,
+ PageAllocator::kReadWrite)) {
+ return false;
+ }
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
+ }
+ return true;
+}
+
MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
CHECK_EQ(0, new_size % wasm::kWasmPageSize);
- size_t old_size = old_buffer->byte_length();
- void* old_mem_start = old_buffer->backing_store();
// Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
@@ -927,15 +1092,8 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) {
return {};
}
- wasm::WasmMemoryTracker* const memory_tracker =
- isolate->wasm_engine()->memory_tracker();
- // If the old buffer had full guard regions, we can only safely use the new
- // buffer if it also has full guard regions. Otherwise, we'd have to
- // recompile all the instances using this memory to insert bounds checks.
- if (memory_tracker->HasFullGuardRegions(old_mem_start) &&
- !memory_tracker->HasFullGuardRegions(new_buffer->backing_store())) {
- return {};
- }
+ void* old_mem_start = old_buffer->backing_store();
+ size_t old_size = old_buffer->byte_length();
if (old_size == 0) return new_buffer;
memcpy(new_buffer->backing_store(), old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
@@ -943,18 +1101,7 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
} else {
- if (old_size != new_size) {
- DCHECK_NOT_NULL(old_buffer->backing_store());
- // If adjusting permissions fails, propagate error back to return
- // failure to grow.
- if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start,
- new_size, PageAllocator::kReadWrite)) {
- return {};
- }
- DCHECK_GE(new_size, old_size);
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
- }
+ if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {};
// NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size.
void* backing_store = old_buffer->backing_store();
@@ -993,20 +1140,21 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
uint32_t maximum) {
+ Handle<JSArrayBuffer> buffer;
+ if (!maybe_buffer.ToHandle(&buffer)) {
+ // If no buffer was provided, create a 0-length one.
+ buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
+ }
+
// TODO(kschimpf): Do we need to add an argument that defines the
// style of memory the user prefers (with/without trap handling), so
// that the memory will match the style of the compiled wasm module.
// See issue v8:7143
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor(), isolate);
- auto memory_obj = Handle<WasmMemoryObject>::cast(
- isolate->factory()->NewJSObject(memory_ctor, TENURED));
- Handle<JSArrayBuffer> buffer;
- if (!maybe_buffer.ToHandle(&buffer)) {
- // If no buffer was provided, create a 0-length one.
- buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
- }
+ auto memory_obj = Handle<WasmMemoryObject>::cast(
+ isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld));
memory_obj->set_array_buffer(*buffer);
memory_obj->set_maximum_pages(maximum);
@@ -1035,30 +1183,6 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
return New(isolate, buffer, maximum);
}
-bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
- const wasm::WasmMemoryTracker::AllocationData* allocation =
- isolate->wasm_engine()->memory_tracker()->FindAllocationData(
- array_buffer()->backing_store());
- CHECK_NOT_NULL(allocation);
-
- Address allocation_base =
- reinterpret_cast<Address>(allocation->allocation_base);
- Address buffer_start = reinterpret_cast<Address>(allocation->buffer_start);
-
- // Return whether the allocation covers every possible Wasm heap index.
- //
- // We always have the following relationship:
- // allocation_base <= buffer_start <= buffer_start + memory_size <=
- // allocation_base + allocation_length
- // (in other words, the buffer fits within the allocation)
- //
- // The space between buffer_start + memory_size and allocation_base +
- // allocation_length is the guard region. Here we make sure the guard region
- // is large enough for any Wasm heap offset.
- return buffer_start + wasm::kWasmMaxHeapOffset <=
- allocation_base + allocation->allocation_length;
-}
-
void WasmMemoryObject::AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) {
@@ -1074,15 +1198,34 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
SetInstanceMemory(instance, buffer);
}
+void WasmMemoryObject::update_instances(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer) {
+ if (has_instances()) {
+ Handle<WeakArrayList> instances(this->instances(), isolate);
+ for (int i = 0; i < instances->length(); i++) {
+ MaybeObject elem = instances->Get(i);
+ HeapObject heap_object;
+ if (elem->GetHeapObjectIfWeak(&heap_object)) {
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(heap_object), isolate);
+ SetInstanceMemory(instance, buffer);
+ } else {
+ DCHECK(elem->IsCleared());
+ }
+ }
+ }
+ set_array_buffer(*buffer);
+}
+
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
- // TODO(gdeepti): Remove check for is_shared when Growing Shared memory
- // is supported.
- if (!old_buffer->is_growable() || old_buffer->is_shared()) return -1;
+ if (old_buffer->is_shared() && !FLAG_wasm_grow_shared_memory) return -1;
+ auto* memory_tracker = isolate->wasm_engine()->memory_tracker();
+ if (!memory_tracker->IsWasmMemoryGrowable(old_buffer)) return -1;
// Checks for maximum memory size, compute new size.
uint32_t maximum_pages = wasm::max_mem_pages();
@@ -1102,28 +1245,43 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
- // Grow the buffer.
+ // Memory is grown, but the memory objects and instances are not yet updated.
+ // Handle this in the interrupt handler so that it's safe for all the isolates
+ // that share this buffer to be updated safely.
Handle<JSArrayBuffer> new_buffer;
- if (!MemoryGrowBuffer(isolate, old_buffer, new_size).ToHandle(&new_buffer)) {
- return -1;
- }
-
- // Update instances if any.
- if (memory_object->has_instances()) {
- Handle<WeakArrayList> instances(memory_object->instances(), isolate);
- for (int i = 0; i < instances->length(); i++) {
- MaybeObject elem = instances->Get(i);
- HeapObject heap_object;
- if (elem->GetHeapObjectIfWeak(&heap_object)) {
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(heap_object), isolate);
- SetInstanceMemory(instance, new_buffer);
- } else {
- DCHECK(elem->IsCleared());
- }
+ if (old_buffer->is_shared()) {
+ // Adjust protections for the buffer.
+ if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) {
+ return -1;
+ }
+ void* backing_store = old_buffer->backing_store();
+ if (memory_tracker->IsWasmSharedMemory(backing_store)) {
+ // This memory is shared between different isolates.
+ DCHECK(old_buffer->is_shared());
+ // Update pending grow state, and trigger a grow interrupt on all the
+ // isolates that share this buffer.
+ memory_tracker->SetPendingUpdateOnGrow(old_buffer, new_size);
+ // Handle interrupts for this isolate so that the instances with this
+ // isolate are updated.
+ isolate->stack_guard()->HandleInterrupts();
+ // Failure to allocate, or adjust pemissions already handled here, and
+ // updates to instances handled in the interrupt handler safe to return.
+ return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
+ }
+ // SharedArrayBuffer, but not shared across isolates. Setup a new buffer
+ // with updated permissions and update the instances.
+ new_buffer =
+ wasm::SetupArrayBuffer(isolate, backing_store, new_size,
+ old_buffer->is_external(), SharedFlag::kShared);
+ memory_object->update_instances(isolate, new_buffer);
+ } else {
+ if (!MemoryGrowBuffer(isolate, old_buffer, new_size)
+ .ToHandle(&new_buffer)) {
+ return -1;
}
}
- memory_object->set_array_buffer(*new_buffer);
+ // Update instances if any.
+ memory_object->update_instances(isolate, new_buffer);
return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
}
@@ -1137,12 +1295,13 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
- if (type == wasm::kWasmAnyRef) {
+ if (wasm::ValueTypes::IsReferenceType(type)) {
DCHECK(maybe_untagged_buffer.is_null());
Handle<FixedArray> tagged_buffer;
if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
// If no buffer was provided, create one.
- tagged_buffer = isolate->factory()->NewFixedArray(1, TENURED);
+ tagged_buffer =
+ isolate->factory()->NewFixedArray(1, AllocationType::kOld);
CHECK_EQ(offset, 0);
}
global_obj->set_tagged_buffer(*tagged_buffer);
@@ -1152,8 +1311,8 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
// If no buffer was provided, create one long enough for the given type.
- untagged_buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ untagged_buffer = isolate->factory()->NewJSArrayBuffer(
+ SharedFlag::kNotShared, AllocationType::kOld);
const bool initialize = true;
if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate,
@@ -1243,7 +1402,7 @@ void ImportedFunctionEntry::SetWasmToJs(
wasm_to_js_wrapper->instructions().start());
DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_to_js_wrapper->kind());
Handle<Tuple2> tuple =
- isolate->factory()->NewTuple2(instance_, callable, TENURED);
+ isolate->factory()->NewTuple2(instance_, callable, AllocationType::kOld);
instance_->imported_function_refs()->set(index_, *tuple);
instance_->imported_function_targets()[index_] =
wasm_to_js_wrapper->instruction_start();
@@ -1281,6 +1440,10 @@ Address ImportedFunctionEntry::target() {
return instance_->imported_function_targets()[index_];
}
+// static
+constexpr uint16_t WasmInstanceObject::kTaggedFieldOffsets[];
+
+// static
bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
Handle<WasmInstanceObject> instance, uint32_t minimum_size) {
uint32_t old_size = instance->indirect_function_table_size();
@@ -1334,7 +1497,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
Handle<JSFunction> instance_cons(
isolate->native_context()->wasm_instance_constructor(), isolate);
Handle<JSObject> instance_object =
- isolate->factory()->NewJSObject(instance_cons, TENURED);
+ isolate->factory()->NewJSObject(instance_cons, AllocationType::kOld);
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(*instance_object), isolate);
@@ -1412,9 +1575,11 @@ void WasmInstanceObject::InitDataSegmentArrays(
instance->dropped_data_segments()[i] = segment.active ? 1 : 0;
// Initialize the pointer and size of passive segments.
+ auto source_bytes = wire_bytes.SubVector(segment.source.offset(),
+ segment.source.end_offset());
instance->data_segment_starts()[i] =
- reinterpret_cast<Address>(&wire_bytes[segment.source.offset()]);
- instance->data_segment_sizes()[i] = segment.source.length();
+ reinterpret_cast<Address>(source_bytes.start());
+ instance->data_segment_sizes()[i] = source_bytes.length();
}
}
@@ -1442,9 +1607,9 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
namespace {
void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
- uint32_t src, uint32_t count) {
+ uint32_t src, uint32_t count, bool copy_backward) {
DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
- if (src < dst) {
+ if (copy_backward) {
for (uint32_t i = count; i > 0; i--) {
auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
@@ -1467,22 +1632,31 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
uint32_t table_dst_index,
uint32_t dst, uint32_t src,
uint32_t count) {
+ if (static_cast<int>(table_dst_index) >= instance->tables()->length()) {
+ return false;
+ }
+ if (static_cast<int>(table_src_index) >= instance->tables()->length()) {
+ return false;
+ }
+
// TODO(titzer): multiple tables in TableCopy
CHECK_EQ(0, table_src_index);
CHECK_EQ(0, table_dst_index);
auto max = instance->indirect_function_table_size();
- if (!IsInBounds(dst, count, max)) return false;
- if (!IsInBounds(src, count, max)) return false;
- if (dst == src) return true; // no-op
+ bool copy_backward = src < dst && dst - src < count;
+ bool ok = ClampToBounds(dst, &count, max);
+ // Use & instead of && so the clamp is not short-circuited.
+ ok &= ClampToBounds(src, &count, max);
- if (!instance->has_table_object()) {
- // No table object, only need to update this instance.
- CopyTableEntriesImpl(instance, dst, src, count);
- return true;
- }
+ // If performing a partial copy when copying backward, then the first access
+ // will be out-of-bounds, so no entries should be copied.
+ if (copy_backward && !ok) return ok;
+
+ if (dst == src || count == 0) return ok; // no-op
- Handle<WasmTableObject> table =
- Handle<WasmTableObject>(instance->table_object(), isolate);
+ // TODO(titzer): multiple tables in TableCopy
+ auto table = handle(
+ WasmTableObject::cast(instance->tables()->get(table_src_index)), isolate);
// Broadcast table copy operation to all instances that import this table.
Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
for (int i = 0; i < dispatch_tables->length();
@@ -1491,21 +1665,25 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
- CopyTableEntriesImpl(target_instance, dst, src, count);
+ CopyTableEntriesImpl(target_instance, dst, src, count, copy_backward);
}
// Copy the function entries.
- Handle<FixedArray> functions(table->elements(), isolate);
- if (src < dst) {
+ auto dst_table = handle(
+ WasmTableObject::cast(instance->tables()->get(table_dst_index)), isolate);
+ auto src_table = handle(
+ WasmTableObject::cast(instance->tables()->get(table_src_index)), isolate);
+ if (copy_backward) {
for (uint32_t i = count; i > 0; i--) {
- functions->set(dst + i - 1, functions->get(src + i - 1));
+ dst_table->elements()->set(dst + i - 1,
+ src_table->elements()->get(src + i - 1));
}
} else {
for (uint32_t i = 0; i < count; i++) {
- functions->set(dst + i, functions->get(src + i));
+ dst_table->elements()->set(dst + i, src_table->elements()->get(src + i));
}
}
- return true;
+ return ok;
}
// static
@@ -1556,21 +1734,22 @@ Handle<WasmExceptionObject> WasmExceptionObject::New(
Handle<HeapObject> exception_tag) {
Handle<JSFunction> exception_cons(
isolate->native_context()->wasm_exception_constructor(), isolate);
- Handle<JSObject> exception_object =
- isolate->factory()->NewJSObject(exception_cons, TENURED);
- Handle<WasmExceptionObject> exception =
- Handle<WasmExceptionObject>::cast(exception_object);
// Serialize the signature.
DCHECK_EQ(0, sig->return_count());
DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
int sig_size = static_cast<int>(sig->parameter_count());
Handle<PodArray<wasm::ValueType>> serialized_sig =
- PodArray<wasm::ValueType>::New(isolate, sig_size, TENURED);
+ PodArray<wasm::ValueType>::New(isolate, sig_size, AllocationType::kOld);
int index = 0; // Index into the {PodArray} above.
for (wasm::ValueType param : sig->parameters()) {
serialized_sig->set(index++, param);
}
+
+ Handle<JSObject> exception_object =
+ isolate->factory()->NewJSObject(exception_cons, AllocationType::kOld);
+ Handle<WasmExceptionObject> exception =
+ Handle<WasmExceptionObject>::cast(exception_object);
exception->set_serialized_signature(*serialized_sig);
exception->set_exception_tag(*exception_tag);
@@ -1722,7 +1901,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
}
Handle<WasmExportedFunctionData> function_data =
Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
+ WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
function_data->set_wrapper_code(*export_wrapper);
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
@@ -1756,8 +1935,9 @@ wasm::FunctionSig* WasmExportedFunction::sig() {
}
Handle<WasmExceptionTag> WasmExceptionTag::New(Isolate* isolate, int index) {
- Handle<WasmExceptionTag> result = Handle<WasmExceptionTag>::cast(
- isolate->factory()->NewStruct(WASM_EXCEPTION_TAG_TYPE, TENURED));
+ Handle<WasmExceptionTag> result =
+ Handle<WasmExceptionTag>::cast(isolate->factory()->NewStruct(
+ WASM_EXCEPTION_TAG_TYPE, AllocationType::kOld));
result->set_index(index);
return result;
}
@@ -1774,7 +1954,7 @@ Handle<AsmWasmData> AsmWasmData::New(
Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
std::move(native_module));
Handle<AsmWasmData> result = Handle<AsmWasmData>::cast(
- isolate->factory()->NewStruct(ASM_WASM_DATA_TYPE, TENURED));
+ isolate->factory()->NewStruct(ASM_WASM_DATA_TYPE, AllocationType::kOld));
result->set_managed_native_module(*managed_native_module);
result->set_export_wrappers(*export_wrappers);
result->set_asm_js_offset_table(*asm_js_offset_table);
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index ba1c4abb4e..10169ea2bb 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -30,6 +30,7 @@ struct WasmException;
struct WasmFeatures;
class WasmInterpreter;
struct WasmModule;
+class WasmValue;
class WireBytesRef;
} // namespace wasm
@@ -61,8 +62,9 @@ class IndirectFunctionTableEntry {
inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int index);
void clear();
- void Set(int sig_id, Handle<WasmInstanceObject> target_instance,
- int target_func_index);
+ V8_EXPORT_PRIVATE void Set(int sig_id,
+ Handle<WasmInstanceObject> target_instance,
+ int target_func_index);
void CopyFrom(const IndirectFunctionTableEntry& that);
@@ -91,8 +93,8 @@ class ImportedFunctionEntry {
// Initialize this entry as a WASM to JS call. This accepts the isolate as a
// parameter, since it must allocate a tuple.
- void SetWasmToJs(Isolate*, Handle<JSReceiver> callable,
- const wasm::WasmCode* wasm_to_js_wrapper);
+ V8_EXPORT_PRIVATE void SetWasmToJs(Isolate*, Handle<JSReceiver> callable,
+ const wasm::WasmCode* wasm_to_js_wrapper);
// Initialize this entry as a WASM to WASM call.
void SetWasmToWasm(WasmInstanceObject target_instance, Address call_target);
@@ -118,7 +120,8 @@ class WasmModuleObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
inline wasm::NativeModule* native_module() const;
- inline std::shared_ptr<wasm::NativeModule> shared_native_module() const;
+ inline const std::shared_ptr<wasm::NativeModule>& shared_native_module()
+ const;
inline const wasm::WasmModule* module() const;
inline void reset_breakpoint_infos();
@@ -126,22 +129,11 @@ class WasmModuleObject : public JSObject {
DECL_PRINTER(WasmModuleObject)
DECL_VERIFIER(WasmModuleObject)
-// Layout description.
-#define WASM_MODULE_OBJECT_FIELDS(V) \
- V(kNativeModuleOffset, kTaggedSize) \
- V(kExportWrappersOffset, kTaggedSize) \
- V(kScriptOffset, kTaggedSize) \
- V(kWeakInstanceListOffset, kTaggedSize) \
- V(kAsmJsOffsetTableOffset, kTaggedSize) \
- V(kBreakPointInfosOffset, kTaggedSize) \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- WASM_MODULE_OBJECT_FIELDS)
-#undef WASM_MODULE_OBJECT_FIELDS
+ TORQUE_GENERATED_WASM_MODULE_OBJECT_FIELDS)
// Creates a new {WasmModuleObject} with a new {NativeModule} underneath.
- static Handle<WasmModuleObject> New(
+ V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
Isolate* isolate, const wasm::WasmFeatures& enabled,
std::shared_ptr<const wasm::WasmModule> module,
OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
@@ -149,10 +141,10 @@ class WasmModuleObject : public JSObject {
// Creates a new {WasmModuleObject} for an existing {NativeModule} that is
// reference counted and might be shared between multiple Isolates.
- static Handle<WasmModuleObject> New(
+ V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<Script> script, size_t code_size_estimate);
- static Handle<WasmModuleObject> New(
+ V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<Script> script, Handle<FixedArray> export_wrappers,
size_t code_size_estimate);
@@ -163,8 +155,9 @@ class WasmModuleObject : public JSObject {
// location inside the same function.
// If it points outside a function, or behind the last breakable location,
// this function returns false and does not set any breakpoint.
- static bool SetBreakPoint(Handle<WasmModuleObject>, int* position,
- Handle<BreakPoint> break_point);
+ V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<WasmModuleObject>,
+ int* position,
+ Handle<BreakPoint> break_point);
// Check whether this module was generated from asm.js source.
inline bool is_asm_js();
@@ -224,7 +217,7 @@ class WasmModuleObject : public JSObject {
// entries, mapping wasm byte offsets to line and column in the disassembly.
// The list is guaranteed to be ordered by the byte_offset.
// Returns an empty string and empty vector if the function index is invalid.
- debug::WasmDisassembly DisassembleFunction(int func_index);
+ V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleFunction(int func_index);
// Extract a portion of the wire bytes as UTF-8 string.
// Returns a null handle if the respective bytes do not form a valid UTF-8
@@ -236,9 +229,9 @@ class WasmModuleObject : public JSObject {
wasm::WireBytesRef ref);
// Get a list of all possible breakpoints within a given range of this module.
- bool GetPossibleBreakpoints(const debug::Location& start,
- const debug::Location& end,
- std::vector<debug::BreakLocation>* locations);
+ V8_EXPORT_PRIVATE bool GetPossibleBreakpoints(
+ const debug::Location& start, const debug::Location& end,
+ std::vector<debug::BreakLocation>* locations);
// Return an empty handle if no breakpoint is hit at that location, or a
// FixedArray with all hit breakpoint objects.
@@ -250,7 +243,7 @@ class WasmModuleObject : public JSObject {
};
// Representation of a WebAssembly.Table JavaScript-level object.
-class WasmTableObject : public JSObject {
+class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
public:
DECL_CAST(WasmTableObject)
@@ -258,39 +251,65 @@ class WasmTableObject : public JSObject {
// TODO(titzer): introduce DECL_I64_ACCESSORS macro
DECL_ACCESSORS(maximum_length, Object)
DECL_ACCESSORS(dispatch_tables, FixedArray)
+ DECL_INT_ACCESSORS(raw_type)
-// Layout description.
-#define WASM_TABLE_OBJECT_FIELDS(V) \
- V(kElementsOffset, kTaggedSize) \
- V(kMaximumLengthOffset, kTaggedSize) \
- V(kDispatchTablesOffset, kTaggedSize) \
- V(kSize, 0)
+ // Dispatched behavior.
+ DECL_PRINTER(WasmTableObject)
+ DECL_VERIFIER(WasmTableObject)
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, WASM_TABLE_OBJECT_FIELDS)
-#undef WASM_TABLE_OBJECT_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_TABLE_OBJECT_FIELDS)
inline uint32_t current_length();
+ inline wasm::ValueType type();
void Grow(Isolate* isolate, uint32_t count);
- static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
+ static Handle<WasmTableObject> New(Isolate* isolate, wasm::ValueType type,
+ uint32_t initial, bool has_maximum,
uint32_t maximum,
- Handle<FixedArray>* js_functions);
+ Handle<FixedArray>* elements);
+
static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance,
int table_index);
+ static bool IsInBounds(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t entry_index);
+
+ static bool IsValidElement(Isolate* isolate, Handle<WasmTableObject> table,
+ Handle<Object> entry);
+
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t index, Handle<JSFunction> function);
+ uint32_t index, Handle<Object> element);
+
+ static Handle<Object> Get(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t index);
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
- int table_index, wasm::FunctionSig* sig,
+ int entry_index, wasm::FunctionSig* sig,
Handle<WasmInstanceObject> target_instance,
int target_func_index);
static void ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table, int index);
+ static void SetFunctionTablePlaceholder(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
+ Handle<WasmInstanceObject> instance,
+ int func_index);
+
+ // This function reads the content of a function table entry and returns it
+ // through the out parameters {is_valid}, {is_null}, {instance}, and
+ // {function_index}.
+ static void GetFunctionTableEntry(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index, bool* is_valid,
+ bool* is_null,
+ MaybeHandle<WasmInstanceObject>* instance,
+ int* function_index);
+
OBJECT_CONSTRUCTORS(WasmTableObject, JSObject);
};
@@ -303,26 +322,19 @@ class WasmMemoryObject : public JSObject {
DECL_INT_ACCESSORS(maximum_pages)
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
-// Layout description.
-#define WASM_MEMORY_OBJECT_FIELDS(V) \
- V(kArrayBufferOffset, kTaggedSize) \
- V(kMaximumPagesOffset, kTaggedSize) \
- V(kInstancesOffset, kTaggedSize) \
- V(kSize, 0)
+ // Dispatched behavior.
+ DECL_PRINTER(WasmMemoryObject)
+ DECL_VERIFIER(WasmMemoryObject)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- WASM_MEMORY_OBJECT_FIELDS)
-#undef WASM_MEMORY_OBJECT_FIELDS
+ TORQUE_GENERATED_WASM_MEMORY_OBJECT_FIELDS)
// Add an instance to the internal (weak) list.
- static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
- Handle<WasmInstanceObject> object);
+ V8_EXPORT_PRIVATE static void AddInstance(Isolate* isolate,
+ Handle<WasmMemoryObject> memory,
+ Handle<WasmInstanceObject> object);
inline bool has_maximum_pages();
- // Return whether the underlying backing store has guard regions large enough
- // to be used with trap handlers.
- bool has_full_guard_region(Isolate* isolate);
-
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
@@ -330,7 +342,10 @@ class WasmMemoryObject : public JSObject {
Isolate* isolate, uint32_t initial, uint32_t maximum,
bool is_shared_memory);
- static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
+ void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer);
+
+ V8_EXPORT_PRIVATE static int32_t Grow(Isolate*, Handle<WasmMemoryObject>,
+ uint32_t pages);
OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject);
};
@@ -347,6 +362,10 @@ class WasmGlobalObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
DECL_BOOLEAN_ACCESSORS(is_mutable)
+ // Dispatched behavior.
+ DECL_PRINTER(WasmGlobalObject)
+ DECL_VERIFIER(WasmGlobalObject)
+
#define WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS(V, _) \
V(TypeBits, wasm::ValueType, 8, _) \
V(IsMutableBit, bool, 1, _)
@@ -355,17 +374,8 @@ class WasmGlobalObject : public JSObject {
#undef WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS
-// Layout description.
-#define WASM_GLOBAL_OBJECT_FIELDS(V) \
- V(kUntaggedBufferOffset, kTaggedSize) \
- V(kTaggedBufferOffset, kTaggedSize) \
- V(kOffsetOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- WASM_GLOBAL_OBJECT_FIELDS)
-#undef WASM_GLOBAL_OBJECT_FIELDS
+ TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FIELDS)
V8_EXPORT_PRIVATE static MaybeHandle<WasmGlobalObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_untagged_buffer,
@@ -378,13 +388,14 @@ class WasmGlobalObject : public JSObject {
inline int64_t GetI64();
inline float GetF32();
inline double GetF64();
- inline Handle<Object> GetAnyRef();
+ inline Handle<Object> GetRef();
inline void SetI32(int32_t value);
inline void SetI64(int64_t value);
inline void SetF32(float value);
inline void SetF64(double value);
inline void SetAnyRef(Handle<Object> value);
+ inline bool SetAnyFunc(Isolate* isolate, Handle<Object> value);
private:
// This function returns the address of the global's data in the
@@ -408,7 +419,6 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(tagged_globals_buffer, FixedArray)
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
- DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
@@ -446,7 +456,25 @@ class WasmInstanceObject : public JSObject {
// Layout description.
#define WASM_INSTANCE_OBJECT_FIELDS(V) \
- /* Tagged values. */ \
+ /* Often-accessed fields go first to minimize generated code size. */ \
+ V(kMemoryStartOffset, kSystemPointerSize) \
+ V(kMemorySizeOffset, kSizetSize) \
+ V(kMemoryMaskOffset, kSizetSize) \
+ V(kStackLimitAddressOffset, kSystemPointerSize) \
+ V(kImportedFunctionRefsOffset, kTaggedSize) \
+ V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
+ V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
+ /* Optional padding to align system pointer size fields */ \
+ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
+ V(kGlobalsStartOffset, kSystemPointerSize) \
+ V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
+ V(kUndefinedValueOffset, kTaggedSize) \
+ V(kIsolateRootOffset, kSystemPointerSize) \
+ V(kJumpTableStartOffset, kSystemPointerSize) \
+ /* End of often-accessed fields. */ \
V(kModuleObjectOffset, kTaggedSize) \
V(kExportsObjectOffset, kTaggedSize) \
V(kNativeContextOffset, kTaggedSize) \
@@ -455,34 +483,13 @@ class WasmInstanceObject : public JSObject {
V(kTaggedGlobalsBufferOffset, kTaggedSize) \
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
V(kDebugInfoOffset, kTaggedSize) \
- V(kTableObjectOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \
- V(kImportedFunctionRefsOffset, kTaggedSize) \
- V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kExceptionsTableOffset, kTaggedSize) \
- V(kUndefinedValueOffset, kTaggedSize) \
V(kNullValueOffset, kTaggedSize) \
V(kCEntryStubOffset, kTaggedSize) \
V(kWasmExportedFunctionsOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
- /* Raw data. */ \
- V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
- /* Optional padding to align system pointer size fields */ \
- V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
- V(kFirstSystemPointerFieldOffset, 0) \
- V(kMemoryStartOffset, kSystemPointerSize) \
- V(kMemorySizeOffset, kSizetSize) \
- V(kMemoryMaskOffset, kSizetSize) \
- V(kIsolateRootOffset, kSystemPointerSize) \
- V(kStackLimitAddressOffset, kSystemPointerSize) \
V(kRealStackLimitAddressOffset, kSystemPointerSize) \
- V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
- V(kGlobalsStartOffset, kSystemPointerSize) \
- V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
- V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
- V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
- V(kJumpTableStartOffset, kSystemPointerSize) \
V(kDataSegmentStartsOffset, kSystemPointerSize) \
V(kDataSegmentSizesOffset, kSystemPointerSize) \
V(kDroppedDataSegmentsOffset, kSystemPointerSize) \
@@ -492,25 +499,54 @@ class WasmInstanceObject : public JSObject {
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
WASM_INSTANCE_OBJECT_FIELDS)
+ STATIC_ASSERT(IsAligned(kSize, kTaggedSize));
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only kTaggedSize
+ // aligned so checking for alignments of fields bigger than kTaggedSize
+ // doesn't make sense until v8:8875 is fixed.
+#define ASSERT_FIELD_ALIGNED(offset, size) \
+ STATIC_ASSERT(size == 0 || IsAligned(offset, size) || \
+ (COMPRESS_POINTERS_BOOL && (size == kSystemPointerSize) && \
+ IsAligned(offset, kTaggedSize)));
+ WASM_INSTANCE_OBJECT_FIELDS(ASSERT_FIELD_ALIGNED)
+#undef ASSERT_FIELD_ALIGNED
#undef WASM_INSTANCE_OBJECT_FIELDS
- STATIC_ASSERT(IsAligned(kFirstSystemPointerFieldOffset, kSystemPointerSize));
- STATIC_ASSERT(IsAligned(kSize, kTaggedSize));
+ static constexpr uint16_t kTaggedFieldOffsets[] = {
+ kImportedFunctionRefsOffset,
+ kIndirectFunctionTableRefsOffset,
+ kUndefinedValueOffset,
+ kModuleObjectOffset,
+ kExportsObjectOffset,
+ kNativeContextOffset,
+ kMemoryObjectOffset,
+ kUntaggedGlobalsBufferOffset,
+ kTaggedGlobalsBufferOffset,
+ kImportedMutableGlobalsBuffersOffset,
+ kDebugInfoOffset,
+ kTablesOffset,
+ kManagedNativeAllocationsOffset,
+ kExceptionsTableOffset,
+ kNullValueOffset,
+ kCEntryStubOffset,
+ kWasmExportedFunctionsOffset};
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
- static bool EnsureIndirectFunctionTableWithMinimumSize(
+ V8_EXPORT_PRIVATE static bool EnsureIndirectFunctionTableWithMinimumSize(
Handle<WasmInstanceObject> instance, uint32_t minimum_size);
bool has_indirect_function_table();
- void SetRawMemory(byte* mem_start, size_t mem_size);
+ V8_EXPORT_PRIVATE void SetRawMemory(byte* mem_start, size_t mem_size);
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
- static Handle<WasmDebugInfo> GetOrCreateDebugInfo(Handle<WasmInstanceObject>);
+ V8_EXPORT_PRIVATE static Handle<WasmDebugInfo> GetOrCreateDebugInfo(
+ Handle<WasmInstanceObject>);
- static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmModuleObject>);
+ V8_EXPORT_PRIVATE static Handle<WasmInstanceObject> New(
+ Isolate*, Handle<WasmModuleObject>);
Address GetCallTarget(uint32_t func_index);
@@ -557,15 +593,12 @@ class WasmExceptionObject : public JSObject {
DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
DECL_ACCESSORS(exception_tag, HeapObject)
-// Layout description.
-#define WASM_EXCEPTION_OBJECT_FIELDS(V) \
- V(kSerializedSignatureOffset, kTaggedSize) \
- V(kExceptionTagOffset, kTaggedSize) \
- V(kSize, 0)
+ // Dispatched behavior.
+ DECL_PRINTER(WasmExceptionObject)
+ DECL_VERIFIER(WasmExceptionObject)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- WASM_EXCEPTION_OBJECT_FIELDS)
-#undef WASM_EXCEPTION_OBJECT_FIELDS
+ TORQUE_GENERATED_WASM_EXCEPTION_OBJECT_FIELDS)
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this exception object.
@@ -605,11 +638,10 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE static bool IsWasmExportedFunction(Object object);
- static Handle<WasmExportedFunction> New(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- MaybeHandle<String> maybe_name,
- int func_index, int arity,
- Handle<Code> export_wrapper);
+ V8_EXPORT_PRIVATE static Handle<WasmExportedFunction> New(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ MaybeHandle<String> maybe_name, int func_index, int arity,
+ Handle<Code> export_wrapper);
Address GetWasmCallTarget();
@@ -636,16 +668,9 @@ class WasmExportedFunctionData : public Struct {
DECL_VERIFIER(WasmExportedFunctionData)
// Layout description.
-#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
- V(kWrapperCodeOffset, kTaggedSize) \
- V(kInstanceOffset, kTaggedSize) \
- V(kJumpTableOffsetOffset, kTaggedSize) /* Smi */ \
- V(kFunctionIndexOffset, kTaggedSize) /* Smi */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- WASM_EXPORTED_FUNCTION_DATA_FIELDS)
-#undef WASM_EXPORTED_FUNCTION_DATA_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS)
OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct);
};
@@ -667,17 +692,8 @@ class WasmDebugInfo : public Struct {
DECL_VERIFIER(WasmDebugInfo)
// Layout description.
-#define WASM_DEBUG_INFO_FIELDS(V) \
- V(kInstanceOffset, kTaggedSize) \
- V(kInterpreterHandleOffset, kTaggedSize) \
- V(kInterpretedFunctionsOffset, kTaggedSize) \
- V(kLocalsNamesOffset, kTaggedSize) \
- V(kCWasmEntriesOffset, kTaggedSize) \
- V(kCWasmEntryMapOffset, kTaggedSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WASM_DEBUG_INFO_FIELDS)
-#undef WASM_DEBUG_INFO_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_DEBUG_INFO_FIELDS)
static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
@@ -691,35 +707,38 @@ class WasmDebugInfo : public Struct {
// Set a breakpoint in the given function at the given byte offset within that
// function. This will redirect all future calls to this function to the
// interpreter and will always pause at the given offset.
- static void SetBreakpoint(Handle<WasmDebugInfo>, int func_index, int offset);
+ V8_EXPORT_PRIVATE static void SetBreakpoint(Handle<WasmDebugInfo>,
+ int func_index, int offset);
// Make a set of functions always execute in the interpreter without setting
// breakpoints.
- static void RedirectToInterpreter(Handle<WasmDebugInfo>,
- Vector<int> func_indexes);
+ V8_EXPORT_PRIVATE static void RedirectToInterpreter(Handle<WasmDebugInfo>,
+ Vector<int> func_indexes);
void PrepareStep(StepAction);
- // Execute the specified function in the interpreter. Read arguments from
- // arg_buffer.
+ // Execute the specified function in the interpreter. Read arguments from the
+ // {argument_values} vector and write to {return_values} on regular exit.
// The frame_pointer will be used to identify the new activation of the
// interpreter for unwinding and frame inspection.
// Returns true if exited regularly, false if a trap occurred. In the latter
// case, a pending exception will have been set on the isolate.
static bool RunInterpreter(Isolate* isolate, Handle<WasmDebugInfo>,
Address frame_pointer, int func_index,
- Address arg_buffer);
+ Vector<wasm::WasmValue> argument_values,
+ Vector<wasm::WasmValue> return_values);
// Get the stack of the wasm interpreter as pairs of <function index, byte
// offset>. The list is ordered bottom-to-top, i.e. caller before callee.
std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
Address frame_pointer);
+ V8_EXPORT_PRIVATE
std::unique_ptr<wasm::InterpretedFrame, wasm::InterpretedFrameDeleter>
GetInterpretedFrame(Address frame_pointer, int frame_index);
// Returns the number of calls / function frames executed in the interpreter.
- uint64_t NumInterpretedCalls();
+ V8_EXPORT_PRIVATE uint64_t NumInterpretedCalls();
// Get scope details for a specific interpreted frame.
// Both of these methods return a JSArrays (for the global scope and local
@@ -735,8 +754,8 @@ class WasmDebugInfo : public Struct {
Address frame_pointer,
int frame_index);
- static Handle<JSFunction> GetCWasmEntry(Handle<WasmDebugInfo>,
- wasm::FunctionSig*);
+ V8_EXPORT_PRIVATE static Handle<JSFunction> GetCWasmEntry(
+ Handle<WasmDebugInfo>, wasm::FunctionSig*);
OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
};
@@ -747,7 +766,8 @@ class WasmDebugInfo : public Struct {
// - {WasmInstanceObject::exceptions_table}: List of tags used by an instance.
class WasmExceptionTag : public Struct {
public:
- static Handle<WasmExceptionTag> New(Isolate* isolate, int index);
+ V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
+ int index);
// Note that this index is only useful for debugging purposes and it is not
// unique across modules. The GC however does not allow objects without at
@@ -780,17 +800,8 @@ class AsmWasmData : public Struct {
DECL_PRINTER(AsmWasmData)
DECL_VERIFIER(AsmWasmData)
-// Layout description.
-#define ASM_WASM_DATA_FIELDS(V) \
- V(kManagedNativeModuleOffset, kTaggedSize) \
- V(kExportWrappersOffset, kTaggedSize) \
- V(kAsmJsOffsetTableOffset, kTaggedSize) \
- V(kUsesBitsetOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, ASM_WASM_DATA_FIELDS)
-#undef ASM_WASM_DATA_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_ASM_WASM_DATA_FIELDS)
OBJECT_CONSTRUCTORS(AsmWasmData, Struct);
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 67e52a3a94..c37a94524c 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -268,7 +268,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
- CASE_OP(AtomicWake, "atomic_wake")
+ CASE_OP(AtomicNotify, "atomic_notify")
CASE_INT_OP(AtomicWait, "atomic_wait")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic_load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic_store")
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 96e96f20b0..71829b6479 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -423,7 +423,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TableCopy, 0xfc0e, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
- V(AtomicWake, 0xfe00, i_ii) \
+ V(AtomicNotify, 0xfe00, i_ii) \
V(I32AtomicWait, 0xfe01, i_iil) \
V(I64AtomicWait, 0xfe02, i_ill) \
V(I32AtomicLoad, 0xfe10, i_i) \
@@ -609,7 +609,7 @@ struct WasmInitExpr {
kI64Const,
kF32Const,
kF64Const,
- kAnyRefConst,
+ kRefNullConst,
} kind;
union {
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index aa27ba8035..1b7973aac9 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -188,19 +188,20 @@ constexpr size_t kHeaderSize =
sizeof(uint32_t); // imported functions (index of first wasm function)
constexpr size_t kCodeHeaderSize =
- sizeof(size_t) + // size of code section
- sizeof(size_t) + // offset of constant pool
- sizeof(size_t) + // offset of safepoint table
- sizeof(size_t) + // offset of handler table
- sizeof(size_t) + // offset of code comments
- sizeof(size_t) + // unpadded binary size
- sizeof(uint32_t) + // stack slots
- sizeof(uint32_t) + // tagged parameter slots
- sizeof(size_t) + // code size
- sizeof(size_t) + // reloc size
- sizeof(size_t) + // source positions size
- sizeof(size_t) + // protected instructions size
- sizeof(WasmCode::Tier); // tier
+ sizeof(size_t) + // size of code section
+ sizeof(size_t) + // offset of constant pool
+ sizeof(size_t) + // offset of safepoint table
+ sizeof(size_t) + // offset of handler table
+ sizeof(size_t) + // offset of code comments
+ sizeof(size_t) + // unpadded binary size
+ sizeof(uint32_t) + // stack slots
+ sizeof(uint32_t) + // tagged parameter slots
+ sizeof(size_t) + // code size
+ sizeof(size_t) + // reloc size
+ sizeof(size_t) + // source positions size
+ sizeof(size_t) + // protected instructions size
+ sizeof(WasmCode::Kind) + // code kind
+ sizeof(ExecutionTier); // tier
// A List of all isolate-independent external references. This is used to create
// a tag from the Address of an external reference and vice versa.
@@ -307,7 +308,8 @@ NativeModuleSerializer::NativeModuleSerializer(
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(size_t);
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK(code->kind() == WasmCode::kFunction ||
+ code->kind() == WasmCode::kInterpreterEntry);
return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() +
code->protected_instructions().size() *
@@ -335,7 +337,8 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(size_t{0});
return;
}
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK(code->kind() == WasmCode::kFunction ||
+ code->kind() == WasmCode::kInterpreterEntry);
// Write the size of the entire code section, followed by the code header.
writer->Write(MeasureCode(code));
writer->Write(code->constant_pool_offset());
@@ -349,6 +352,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->reloc_info().size());
writer->Write(code->source_positions().size());
writer->Write(code->protected_instructions().size());
+ writer->Write(code->kind());
writer->Write(code->tier());
// Get a pointer to the destination buffer, to hold relocated code.
@@ -499,7 +503,12 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>();
- if (code_section_size == 0) return true;
+ if (code_section_size == 0) {
+ DCHECK(FLAG_wasm_lazy_compilation ||
+ native_module_->enabled_features().compilation_hints);
+ native_module_->UseLazyStub(fn_index);
+ return true;
+ }
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
@@ -511,7 +520,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t reloc_size = reader->Read<size_t>();
size_t source_position_size = reader->Read<size_t>();
size_t protected_instructions_size = reader->Read<size_t>();
- WasmCode::Tier tier = reader->Read<WasmCode::Tier>();
+ WasmCode::Kind kind = reader->Read<WasmCode::Kind>();
+ ExecutionTier tier = reader->Read<ExecutionTier>();
Vector<const byte> code_buffer = {reader->current_location(), code_size};
reader->Skip(code_size);
@@ -530,7 +540,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comment_offset, unpadded_binary_size,
std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos), tier);
+ std::move(source_pos), kind, tier);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
@@ -603,9 +613,10 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
ModuleWireBytes wire_bytes(wire_bytes_vec);
// TODO(titzer): module features should be part of the serialization format.
WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
- ModuleResult decode_result = DecodeWasmModule(
- enabled_features, wire_bytes.start(), wire_bytes.end(), false,
- i::wasm::kWasmOrigin, isolate->counters(), isolate->allocator());
+ ModuleResult decode_result =
+ DecodeWasmModule(enabled_features, wire_bytes.start(), wire_bytes.end(),
+ false, i::wasm::kWasmOrigin, isolate->counters(),
+ isolate->wasm_engine()->allocator());
if (decode_result.failed()) return {};
CHECK_NOT_NULL(decode_result.value());
WasmModule* module = decode_result.value().get();
@@ -620,10 +631,10 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module();
- if (FLAG_wasm_lazy_compilation) {
- native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
- }
+ native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation);
+
NativeModuleDeserializer deserializer(native_module);
+ WasmCodeRefScope wasm_code_ref_scope;
Reader reader(data + kVersionSize);
if (!deserializer.Read(&reader)) return {};
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index eaa1ee7ffe..f35bc73460 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -14,7 +14,7 @@ namespace wasm {
// Support for serializing WebAssembly {NativeModule} objects. This class takes
// a snapshot of the module state at instantiation, and other code that modifies
// the module after that won't affect the serialized result.
-class WasmSerializer {
+class V8_EXPORT_PRIVATE WasmSerializer {
public:
explicit WasmSerializer(NativeModule* native_module);
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 1bd0b0ce89..7b654ec1cf 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -20,7 +20,7 @@ namespace wasm {
namespace {
bool IsValidFunctionName(const Vector<const char> &name) {
- if (name.is_empty()) return false;
+ if (name.empty()) return false;
const char *special_chars = "_.+-*/\\^~=<>!?@#$%&|:'`";
for (char c : name) {
bool valid_char = (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') ||
@@ -120,11 +120,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
break;
}
case kExprBrOnExn: {
- BranchDepthImmediate<Decoder::kNoValidate> imm_br(&i, i.pc());
- ExceptionIndexImmediate<Decoder::kNoValidate> imm_idx(
- &i, i.pc() + imm_br.length);
- os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm_br.depth << ' '
- << imm_idx.index;
+ BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.depth.depth << ' '
+ << imm.index.index;
break;
}
case kExprElse:
@@ -143,7 +141,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ CallIndirectImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
DCHECK_EQ(0, imm.table_index);
os << "call_indirect " << imm.sig_index;
break;
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index dbdaada428..6010d3f5fb 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -13,9 +13,10 @@ namespace wasm {
// All the tiers of WASM execution.
enum class ExecutionTier : int8_t {
- kInterpreter, // interpreter (used to provide debugging services).
- kBaseline, // Liftoff.
- kOptimized // TurboFan.
+ kNone,
+ kInterpreter,
+ kLiftoff,
+ kTurbofan,
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index c1538e8523..4201c14ae4 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -6,6 +6,7 @@
#define V8_WASM_WASM_VALUE_H_
#include "src/boxed-float.h"
+#include "src/handles.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
@@ -62,7 +63,10 @@ class Simd128 {
V(f32_boxed, kWasmF32, Float32) \
V(f64, kWasmF64, double) \
V(f64_boxed, kWasmF64, Float64) \
- V(s128, kWasmS128, Simd128)
+ V(s128, kWasmS128, Simd128) \
+ V(anyref, kWasmAnyRef, Handle<Object>)
+
+ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
// A wasm value with type information.
class WasmValue {