summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/DEPS7
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h46
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h40
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h33
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc8
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h3
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc163
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h19
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h57
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h62
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h82
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h81
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h31
-rw-r--r--deps/v8/src/wasm/c-api.cc19
-rw-r--r--deps/v8/src/wasm/decoder.h6
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h80
-rw-r--r--deps/v8/src/wasm/function-compiler.cc6
-rw-r--r--deps/v8/src/wasm/function-compiler.h2
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc141
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc178
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h108
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc21
-rw-r--r--deps/v8/src/wasm/module-compiler.cc75
-rw-r--r--deps/v8/src/wasm/module-decoder.cc149
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc240
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc25
-rw-r--r--deps/v8/src/wasm/value-type.h2
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc573
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h140
-rw-r--r--deps/v8/src/wasm/wasm-constants.h1
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc27
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc20
-rw-r--r--deps/v8/src/wasm/wasm-engine.h9
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h2
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc233
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h9
-rw-r--r--deps/v8/src/wasm/wasm-js.cc53
-rw-r--r--deps/v8/src/wasm/wasm-limits.h1
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc2
-rw-r--r--deps/v8/src/wasm/wasm-memory.h289
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc8
-rw-r--r--deps/v8/src/wasm/wasm-module.cc70
-rw-r--r--deps/v8/src/wasm/wasm-module.h34
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h16
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc497
-rw-r--r--deps/v8/src/wasm/wasm-objects.h102
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc23
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h29
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc27
-rw-r--r--deps/v8/src/wasm/wasm-text.cc16
53 files changed, 2325 insertions, 1548 deletions
diff --git a/deps/v8/src/wasm/DEPS b/deps/v8/src/wasm/DEPS
index eb0780f5e3..2d310c631c 100644
--- a/deps/v8/src/wasm/DEPS
+++ b/deps/v8/src/wasm/DEPS
@@ -1,4 +1,11 @@
specific_include_rules = {
+ "jump-table-assembler\.(cc|h)": [
+ # The JumpTableAssembler should not depend on any wasm-specific headers.
+ # The only allowed include is 'src/codegen' for assembler headers.
+ "-src",
+ "+src/codegen",
+ "+src/wasm/jump-table-assembler.h",
+ ],
"c-api\.cc": [
"+include/libplatform/libplatform.h",
"+third_party/wasm-api/wasm.h",
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 8aa6e24739..bc9ec357df 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,7 +1,7 @@
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
gdeepti@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 834eb181d8..e6c46e4a09 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -46,10 +46,12 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset =
- kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
@@ -635,6 +637,44 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
ldr(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ // We need a zero reg. Always use r0 for that, and push it before to restore
+ // its value afterwards.
+ push(r0);
+ mov(r0, Operand(0));
+
+ if (count <= 5) {
+ // Special straight-line code for up to five slots. Generates two
+ // instructions per slot.
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ str(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
+ str(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
+ }
+ } else {
+ // General case for bigger counts (9 instructions).
+ // Use r1 for start address (inclusive), r2 for end address (exclusive).
+ push(r1);
+ push(r2);
+ sub(r1, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ sub(r2, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ str(r0, MemOperand(r1, /* offset */ kSystemPointerSize, PostIndex));
+ cmp(r1, r2);
+ b(&loop, ne);
+
+ pop(r2);
+ pop(r1);
+ }
+
+ pop(r0);
+}
+
#define I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index dc68267825..dede53b7a4 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -43,10 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset =
- kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetInstanceOperand() {
@@ -398,6 +400,38 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ int max_stp_offset = -liftoff::GetStackSlotOffset(index + count - 1);
+ if (count <= 20 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
+ // Special straight-line code for up to 20 slots. Generates one
+ // instruction per two slots (<= 10 instructions total).
+ for (; count > 1; count -= 2) {
+ STATIC_ASSERT(kStackSlotSize == kSystemPointerSize);
+ stp(xzr, xzr, liftoff::GetStackSlot(index + count - 1));
+ }
+ DCHECK(count == 0 || count == 1);
+ if (count) str(xzr, liftoff::GetStackSlot(index));
+ } else {
+ // General case for bigger counts (7 instructions).
+ // Use x0 for start address (inclusive), x1 for end address (exclusive).
+ Push(x1, x0);
+ Sub(x0, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ Sub(x1, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ str(xzr, MemOperand(x0, /* offset */ kSystemPointerSize, PostIndex));
+ cmp(x0, x1);
+ b(&loop, ne);
+
+ Pop(x0, x1);
+ }
+}
+
#define I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 7bc3596d2e..fa88d20df6 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -41,7 +41,7 @@ inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
-// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
+// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return Operand(ebp, -8); }
static constexpr LiftoffRegList kByteRegs =
@@ -511,6 +511,37 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
mov(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 2) {
+ // Special straight-line code for up to two slots (6-9 bytes per word:
+ // C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ mov(liftoff::GetHalfStackSlot(index + offset, kLowWord), Immediate(0));
+ mov(liftoff::GetHalfStackSlot(index + offset, kHighWord), Immediate(0));
+ }
+ } else {
+ // General case for bigger counts.
+ // This sequence takes 19-22 bytes (3 for pushes, 3-6 for lea, 2 for xor, 5
+ // for mov, 3 for repstosq, 3 for pops).
+ // Note: rep_stos fills ECX doublewords at [EDI] with EAX.
+ push(eax);
+ push(ecx);
+ push(edi);
+ lea(edi, liftoff::GetStackSlot(last_stack_slot));
+ xor_(eax, eax);
+ // Number of words is number of slots times two.
+ mov(ecx, Immediate(count * 2));
+ rep_stos();
+ pop(edi);
+ pop(ecx);
+ pop(eax);
+ }
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
lea(dst, Operand(lhs, rhs, times_1, 0));
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 0fcfb8dbfc..389c065507 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -297,7 +297,7 @@ class StackTransferRecipe {
// process all remaining moves in that cycle. Repeat for all cycles.
uint32_t next_spill_slot = asm_->cache_state()->stack_height();
while (!move_dst_regs_.is_empty()) {
- // TODO(clemensh): Use an unused register if available.
+ // TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
LiftoffRegister spill_reg = move->src;
@@ -412,7 +412,7 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
} // namespace
-// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+// TODO(clemensb): Don't copy the full parent state (this makes us N^2).
void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
uint32_t num_locals,
uint32_t arity,
@@ -484,7 +484,7 @@ constexpr AssemblerOptions DefaultLiftoffOptions() {
} // namespace
-// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
+// TODO(clemensb): Provide a reasonably sized buffer, based on wasm function
// size.
LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
: TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
@@ -526,7 +526,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
const CacheState& source) {
DCHECK_EQ(source.stack_height(), target.stack_height());
- // TODO(clemensh): Reuse the same StackTransferRecipe object to save some
+ // TODO(clemensb): Reuse the same StackTransferRecipe object to save some
// allocations.
StackTransferRecipe transfers(this);
for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 766ce71db1..f0d49a8782 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -228,7 +228,7 @@ class LiftoffAssembler : public TurboAssembler {
return reg;
}
- // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+ // TODO(clemensb): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
uint32_t arity, uint32_t stack_depth);
@@ -386,6 +386,7 @@ class LiftoffAssembler : public TurboAssembler {
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t index, RegPairHalf);
+ inline void FillStackSlotsWithZero(uint32_t index, uint32_t count);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 02de06763c..997c8ff52b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -6,7 +6,7 @@
#include "src/base/optional.h"
#include "src/codegen/assembler-inl.h"
-// TODO(clemensh): Remove dependences on compiler stuff.
+// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
@@ -121,7 +121,7 @@ constexpr Vector<const ValueType> kSupportedTypes =
class LiftoffCompiler {
public:
- // TODO(clemensh): Make this a template parameter.
+ // TODO(clemensb): Make this a template parameter.
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
@@ -341,6 +341,24 @@ class LiftoffCompiler {
__ bind(ool.continuation.get());
}
+ bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
+ int actual_locals = __ num_locals() - num_params;
+ DCHECK_LE(0, actual_locals);
+ constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs);
+ // If we have many locals, we put them on the stack initially. This avoids
+ // having to spill them on merge points. Use of these initial values should
+ // be rare anyway.
+ if (actual_locals > kNumCacheRegisters / 2) return true;
+ // If there are locals which are not i32 or i64, we also spill all locals,
+ // because other types cannot be initialized to constants.
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ if (type != kWasmI32 && type != kWasmI64) return true;
+ }
+ return false;
+ }
+
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
@@ -373,6 +391,7 @@ class LiftoffCompiler {
// LiftoffAssembler methods.
if (DidAssemblerBailout(decoder)) return;
+ // Process parameters.
__ SpillInstance(instance_reg);
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
@@ -380,32 +399,20 @@ class LiftoffCompiler {
input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
DCHECK_EQ(input_idx, descriptor_->InputCount());
- // Set to a gp register, to mark this uninitialized.
- LiftoffRegister zero_double_reg = kGpCacheRegList.GetFirstRegSet();
- DCHECK(zero_double_reg.is_gp());
- for (uint32_t param_idx = num_params; param_idx < __ num_locals();
- ++param_idx) {
- ValueType type = decoder->GetLocalType(param_idx);
- switch (type) {
- case kWasmI32:
- __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
- break;
- case kWasmI64:
- __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
- break;
- case kWasmF32:
- case kWasmF64:
- if (zero_double_reg.is_gp()) {
- // Note: This might spill one of the registers used to hold
- // parameters.
- zero_double_reg = __ GetUnusedRegister(kFpReg);
- // Zero is represented by the bit pattern 0 for both f32 and f64.
- __ LoadConstant(zero_double_reg, WasmValue(0.));
- }
- __ PushRegister(type, zero_double_reg);
- break;
- default:
- UNIMPLEMENTED();
+
+ // Initialize locals beyond parameters.
+ if (SpillLocalsInitially(decoder, num_params)) {
+ __ FillStackSlotsWithZero(num_params, __ num_locals() - num_params);
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ __ cache_state()->stack_state.emplace_back(type);
+ }
+ } else {
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
+ ValueType type = decoder->GetLocalType(param_idx);
+ __ cache_state()->stack_state.emplace_back(type, int32_t{0});
}
}
@@ -488,7 +495,7 @@ class LiftoffCompiler {
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
- // TODO(clemensh): Come up with a better strategy here, involving
+ // TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
@@ -519,7 +526,7 @@ class LiftoffCompiler {
}
// Allocate the else state.
- if_block->else_state = base::make_unique<ElseState>();
+ if_block->else_state = std::make_unique<ElseState>();
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
@@ -617,8 +624,8 @@ class LiftoffCompiler {
template <ValueType src_type, ValueType result_type, class EmitFn>
void EmitUnOp(EmitFn fn) {
- static RegClass src_rc = reg_class_for(src_type);
- static RegClass result_rc = reg_class_for(result_type);
+ constexpr RegClass src_rc = reg_class_for(src_type);
+ constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src})
@@ -693,45 +700,44 @@ class LiftoffCompiler {
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.gp(), src.gp()); \
}); \
break;
#define CASE_I32_SIGN_EXTENSION(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.gp(), src.gp()); \
}); \
break;
#define CASE_I64_SIGN_EXTENSION(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst, src); \
}); \
break;
#define CASE_FLOAT_UNOP(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitUnOp<kWasm##type, kWasm##type>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.fp(), src.fp()); \
}); \
break;
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn); \
break;
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
- case WasmOpcode::kExpr##opcode: \
+ case kExpr##opcode: \
EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); \
break;
switch (opcode) {
- CASE_I32_UNOP(I32Eqz, i32_eqz)
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
@@ -786,29 +792,41 @@ class LiftoffCompiler {
CASE_I64_SIGN_EXTENSION(I64SExtendI8, i64_signextend_i8)
CASE_I64_SIGN_EXTENSION(I64SExtendI16, i64_signextend_i16)
CASE_I64_SIGN_EXTENSION(I64SExtendI32, i64_signextend_i32)
+ case kExprI32Eqz:
+ DCHECK(decoder->lookahead(0, kExprI32Eqz));
+ if (decoder->lookahead(1, kExprBrIf)) {
+ DCHECK(!has_outstanding_op());
+ outstanding_op_ = kExprI32Eqz;
+ break;
+ }
+ EmitUnOp<kWasmI32, kWasmI32>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i32_eqz(dst.gp(), src.gp());
+ });
+ break;
case kExprI32Popcnt:
EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
&ExternalReference::wasm_word32_popcnt);
break;
- case WasmOpcode::kExprI64Eqz:
+ case kExprI64Eqz:
EmitUnOp<kWasmI64, kWasmI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
__ emit_i64_eqz(dst.gp(), src);
});
break;
- case WasmOpcode::kExprI64Clz:
- case WasmOpcode::kExprI64Ctz:
- case WasmOpcode::kExprI64Popcnt:
+ case kExprI64Clz:
+ case kExprI64Ctz:
+ case kExprI64Popcnt:
return unsupported(decoder, kComplexOperation,
WasmOpcodes::OpcodeName(opcode));
- case WasmOpcode::kExprI32SConvertSatF32:
- case WasmOpcode::kExprI32UConvertSatF32:
- case WasmOpcode::kExprI32SConvertSatF64:
- case WasmOpcode::kExprI32UConvertSatF64:
- case WasmOpcode::kExprI64SConvertSatF32:
- case WasmOpcode::kExprI64UConvertSatF32:
- case WasmOpcode::kExprI64SConvertSatF64:
- case WasmOpcode::kExprI64UConvertSatF64:
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
return unsupported(decoder, kNonTrappingFloatToInt,
WasmOpcodes::OpcodeName(opcode));
default:
@@ -1224,7 +1242,7 @@ class LiftoffCompiler {
ReturnImpl(decoder);
}
- void GetLocal(FullDecoder* decoder, Value* result,
+ void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
DCHECK_EQ(slot.type(), imm.type);
@@ -1245,7 +1263,7 @@ class LiftoffCompiler {
}
}
- void SetLocalFromStackSlot(LiftoffAssembler::VarState* dst_slot,
+ void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
ValueType type = dst_slot->type();
@@ -1266,7 +1284,7 @@ class LiftoffCompiler {
__ cache_state()->inc_used(dst_reg);
}
- void SetLocal(uint32_t local_index, bool is_tee) {
+ void LocalSet(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
auto& target_slot = state.stack_state[local_index];
@@ -1281,20 +1299,20 @@ class LiftoffCompiler {
target_slot = source_slot;
break;
case kStack:
- SetLocalFromStackSlot(&target_slot, local_index);
+ LocalSetFromStackSlot(&target_slot, local_index);
break;
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
- void SetLocal(FullDecoder* decoder, const Value& value,
+ void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
- SetLocal(imm.index, false);
+ LocalSet(imm.index, false);
}
- void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
+ void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
- SetLocal(imm.index, true);
+ LocalSet(imm.index, true);
}
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
@@ -1312,7 +1330,7 @@ class LiftoffCompiler {
return addr;
}
- void GetGlobal(FullDecoder* decoder, Value* result,
+ void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
@@ -1327,7 +1345,7 @@ class LiftoffCompiler {
__ PushRegister(global->type, value);
}
- void SetGlobal(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
@@ -1402,10 +1420,18 @@ class LiftoffCompiler {
}
}
- void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
+ void BrIf(FullDecoder* decoder, const Value& /* cond */, uint32_t depth) {
+ Condition cond = kEqual; // Unary "equal" means "equals zero".
+
+ if (has_outstanding_op()) {
+ DCHECK_EQ(kExprI32Eqz, outstanding_op_);
+ cond = kUnequal; // Unary "unequal" means "not equals zero".
+ outstanding_op_ = kNoOutstandingOp;
+ }
+
Label cont_false;
Register value = __ PopToRegister().gp();
- __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
+ __ emit_cond_jump(cond, &cont_false, kWasmI32, value);
BrOrRet(decoder, depth);
__ bind(&cont_false);
@@ -2056,7 +2082,14 @@ class LiftoffCompiler {
}
private:
+ static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
+
LiftoffAssembler asm_;
+
+ // Used for merging code generation of subsequent operations (via look-ahead).
+ // Set by the first opcode, reset by the second.
+ WasmOpcode outstanding_op_ = kNoOutstandingOp;
+
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
LiftoffBailoutReason bailout_reason_ = kSuccess;
@@ -2072,6 +2105,10 @@ class LiftoffCompiler {
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
+ bool has_outstanding_op() const {
+ return outstanding_op_ != kNoOutstandingOp;
+ }
+
void TraceCacheState(FullDecoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 267a005547..b322f7eb68 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -32,15 +32,18 @@ static inline constexpr bool needs_reg_pair(ValueType type) {
return kNeedI64RegPair && type == kWasmI64;
}
-// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
- return needs_reg_pair(type) // i64 on 32 bit
- ? kGpRegPair
- : type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
+ switch (type) {
+ case kWasmF32:
+ case kWasmF64:
+ return kFpReg;
+ case kWasmI32:
+ return kGpReg;
+ case kWasmI64:
+ return kNeedI64RegPair ? kGpRegPair : kGpReg;
+ default:
+ return kNoReg; // unsupported type
+ }
}
// Maximum code of a gp cache register.
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index e82ffe8f67..4c69e423c1 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -13,6 +13,28 @@ namespace wasm {
namespace liftoff {
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
#if defined(V8_TARGET_BIG_ENDIAN)
constexpr int32_t kLowWordOffset = 4;
constexpr int32_t kHighWordOffset = 0;
@@ -27,9 +49,12 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset = index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
@@ -583,6 +608,34 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
lw(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 12) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<=12 instructions total).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ Sw(zero_reg, liftoff::GetStackSlot(index + offset));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
+ Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ Sw(zero_reg, MemOperand(a0, kSystemPointerSize));
+ addiu(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 9c87dca733..5314a65da5 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -13,15 +13,44 @@ namespace wasm {
namespace liftoff {
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
inline MemOperand GetStackSlot(uint32_t index) {
- int32_t offset = index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(fp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
@@ -498,6 +527,35 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 12) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ Sd(zero_reg, liftoff::GetStackSlot(index + offset));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
+ Daddu(a1, fp,
+ Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ Sd(zero_reg, MemOperand(a0, kSystemPointerSize));
+ daddiu(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index a690a1c090..3b436a96d5 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -12,6 +12,49 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace liftoff {
+
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (lr) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset =
+ kInstanceOffset + 2 * kSystemPointerSize;
+
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = kFirstStackSlotOffset +
+ index * LiftoffAssembler::kStackSlotSize - half_offset;
+ return MemOperand(fp, -offset);
+}
+
+} // namespace liftoff
+
int LiftoffAssembler::PrepareStackFrame() {
bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
@@ -108,6 +151,45 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ // We need a zero reg. Always use r0 for that, and push it before to restore
+ // its value afterwards.
+ push(r0);
+ mov(r0, Operand(0));
+
+ if (count <= 5) {
+ // Special straight-line code for up to five slots. Generates two
+ // instructions per slot.
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
+ }
+ } else {
+ // General case for bigger counts (9 instructions).
+ // Use r4 for start address (inclusive), r5 for end address (exclusive).
+ push(r4);
+ push(r5);
+ subi(r4, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ subi(r5, fp, Operand(liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ StoreP(r0, MemOperand(r0));
+ addi(r0, r0, Operand(kSystemPointerSize));
+ cmp(r4, r5);
+ bne(&loop);
+
+ pop(r4);
+ pop(r5);
+ }
+
+ pop(r0);
+}
+
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index d17c7dada1..36267560dd 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -12,6 +12,48 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace liftoff {
+
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (lr) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset =
+ kInstanceOffset + 2 * kSystemPointerSize;
+
+inline int GetStackSlotOffset(uint32_t index) {
+ return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = kFirstStackSlotOffset +
+ index * LiftoffAssembler::kStackSlotSize - half_offset;
+ return MemOperand(fp, -offset);
+}
+
+} // namespace liftoff
+
int LiftoffAssembler::PrepareStackFrame() {
bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
@@ -108,6 +150,45 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ // We need a zero reg. Always use r0 for that, and push it before to restore
+ // its value afterwards.
+ push(r0);
+ mov(r0, Operand(0));
+
+ if (count <= 5) {
+ // Special straight-line code for up to five slots. Generates two
+ // instructions per slot.
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
+ StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
+ }
+ } else {
+ // General case for bigger counts (9 instructions).
+ // Use r3 for start address (inclusive), r4 for end address (exclusive).
+ push(r3);
+ push(r4);
+ SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
+ SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(index) + kStackSlotSize));
+
+ Label loop;
+ bind(&loop);
+ StoreP(r0, MemOperand(r0));
+ la(r0, MemOperand(r0, kSystemPointerSize));
+ CmpLogicalP(r3, r4);
+ bne(&loop);
+
+ pop(r4);
+ pop(r3);
+ }
+
+ pop(r0);
+}
+
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 43637985d0..f4185de070 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -47,7 +47,7 @@ inline Operand GetStackSlot(uint32_t index) {
return Operand(rbp, -kFirstStackSlotOffset - offset);
}
-// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
+// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
@@ -452,6 +452,35 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
+ DCHECK_LT(0, count);
+ uint32_t last_stack_slot = index + count - 1;
+ RecordUsedSpillSlot(last_stack_slot);
+
+ if (count <= 3) {
+ // Special straight-line code for up to three slots
+ // (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>).
+ for (uint32_t offset = 0; offset < count; ++offset) {
+ movq(liftoff::GetStackSlot(index + offset), Immediate(0));
+ }
+ } else {
+ // General case for bigger counts.
+ // This sequence takes 20-23 bytes (3 for pushes, 4-7 for lea, 2 for xor, 5
+ // for mov, 3 for repstosq, 3 for pops).
+ // From intel manual: repstosq fills RCX quadwords at [RDI] with RAX.
+ pushq(rax);
+ pushq(rcx);
+ pushq(rdi);
+ leaq(rdi, liftoff::GetStackSlot(last_stack_slot));
+ xorl(rax, rax);
+ movl(rcx, Immediate(count));
+ repstosq();
+ popq(rdi);
+ popq(rcx);
+ popq(rax);
+ }
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
leal(dst, Operand(lhs, rhs, times_1, 0));
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index e812dd7994..31b68e9cdc 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -1692,17 +1692,17 @@ auto Global::type() const -> own<GlobalType> {
auto Global::get() const -> Val {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
- switch (type()->content()->kind()) {
- case I32:
+ switch (v8_global->type()) {
+ case i::wasm::kWasmI32:
return Val(v8_global->GetI32());
- case I64:
+ case i::wasm::kWasmI64:
return Val(v8_global->GetI64());
- case F32:
+ case i::wasm::kWasmF32:
return Val(v8_global->GetF32());
- case F64:
+ case i::wasm::kWasmF64:
return Val(v8_global->GetF64());
- case ANYREF:
- case FUNCREF: {
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef: {
StoreImpl* store = impl(this)->store();
i::HandleScope scope(store->i_isolate());
return Val(V8RefValueToWasm(store, v8_global->GetRef()));
@@ -1883,9 +1883,10 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> {
if (maximum < minimum) return nullptr;
if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr;
}
- bool is_shared = false; // TODO(wasm+): Support shared memory.
+ // TODO(wasm+): Support shared memory.
+ i::SharedFlag shared = i::SharedFlag::kNotShared;
i::Handle<i::WasmMemoryObject> memory_obj;
- if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared)
+ if (!i::WasmMemoryObject::New(isolate, minimum, maximum, shared)
.ToHandle(&memory_obj)) {
return own<Memory>();
}
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index abb7b8ee86..71c06467f1 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -267,6 +267,12 @@ class Decoder {
}
const byte* end() const { return end_; }
+ // Check if the byte at {offset} from the current pc equals {expected}.
+ bool lookahead(int offset, byte expected) {
+ DCHECK_LE(pc_, end_);
+ return end_ - pc_ > offset && pc_[offset] == expected;
+ }
+
protected:
const byte* start_;
const byte* pc_;
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 582934e19f..1f29571e40 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -714,12 +714,12 @@ struct ControlBase {
F(RefFunc, uint32_t function_index, Value* result) \
F(Drop, const Value& value) \
F(DoReturn, Vector<Value> values) \
- F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \
- F(SetLocal, const Value& value, const LocalIndexImmediate<validate>& imm) \
- F(TeeLocal, const Value& value, Value* result, \
+ F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
+ F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
+ F(LocalTee, const Value& value, Value* result, \
const LocalIndexImmediate<validate>& imm) \
- F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
- F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
F(TableGet, const Value& index, Value* result, \
const TableIndexImmediate<validate>& imm) \
F(TableSet, const Value& index, const Value& value, \
@@ -910,8 +910,8 @@ class WasmDecoder : public Decoder {
length = OpcodeLength(decoder, pc);
depth++;
break;
- case kExprSetLocal: // fallthru
- case kExprTeeLocal: {
+ case kExprLocalSet: // fallthru
+ case kExprLocalTee: {
LocalIndexImmediate<validate> imm(decoder, pc);
if (assigned->length() > 0 &&
imm.index < static_cast<uint32_t>(assigned->length())) {
@@ -1045,8 +1045,8 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
size_t block_depth) {
- if (!VALIDATE(imm.table_count < kV8MaxWasmFunctionSize)) {
- errorf(pc + 1, "invalid table count (> max function size): %u",
+ if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
+ errorf(pc + 1, "invalid table count (> max br_table size): %u",
imm.table_count);
return false;
}
@@ -1069,11 +1069,13 @@ class WasmDecoder : public Decoder {
case kExprI32x4ReplaceLane:
num_lanes = 4;
break;
- case kExprI16x8ExtractLane:
+ case kExprI16x8ExtractLaneS:
+ case kExprI16x8ExtractLaneU:
case kExprI16x8ReplaceLane:
num_lanes = 8;
break;
- case kExprI8x16ExtractLane:
+ case kExprI8x16ExtractLaneS:
+ case kExprI8x16ExtractLaneU:
case kExprI8x16ReplaceLane:
num_lanes = 16;
break;
@@ -1252,8 +1254,8 @@ class WasmDecoder : public Decoder {
BranchDepthImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprGetGlobal:
- case kExprSetGlobal: {
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
GlobalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1291,9 +1293,9 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
- case kExprSetLocal:
- case kExprTeeLocal:
- case kExprGetLocal: {
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
LocalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1458,19 +1460,19 @@ class WasmDecoder : public Decoder {
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprTableGet:
- case kExprTeeLocal:
+ case kExprLocalTee:
case kExprMemoryGrow:
return {1, 1};
- case kExprSetLocal:
- case kExprSetGlobal:
+ case kExprLocalSet:
+ case kExprGlobalSet:
case kExprDrop:
case kExprBrIf:
case kExprBrTable:
case kExprIf:
case kExprRethrow:
return {1, 0};
- case kExprGetLocal:
- case kExprGetGlobal:
+ case kExprLocalGet:
+ case kExprGlobalGet:
case kExprI32Const:
case kExprI64Const:
case kExprF32Const:
@@ -2125,28 +2127,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + imm.length;
break;
}
- case kExprGetLocal: {
+ case kExprLocalGet: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto* value = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
len = 1 + imm.length;
break;
}
- case kExprSetLocal: {
+ case kExprLocalSet: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(0, local_type_vec_[imm.index]);
- CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
len = 1 + imm.length;
break;
}
- case kExprTeeLocal: {
+ case kExprLocalTee: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(0, local_type_vec_[imm.index]);
auto* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
len = 1 + imm.length;
break;
}
@@ -2155,15 +2157,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(Drop, value);
break;
}
- case kExprGetGlobal: {
+ case kExprGlobalGet: {
GlobalIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
break;
}
- case kExprSetGlobal: {
+ case kExprGlobalSet: {
GlobalIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
@@ -2173,7 +2175,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
auto value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
break;
}
case kExprTableGet: {
@@ -2447,15 +2449,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TRACE_PART("[%d]", imm.value);
break;
}
- case kExprGetLocal:
- case kExprSetLocal:
- case kExprTeeLocal: {
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
TRACE_PART("[%u]", imm.index);
break;
}
- case kExprGetGlobal:
- case kExprSetGlobal: {
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
TRACE_PART("[%u]", imm.index);
break;
@@ -2700,8 +2702,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprI32x4ExtractLane:
- case kExprI16x8ExtractLane:
- case kExprI8x16ExtractLane: {
+ case kExprI16x8ExtractLaneS:
+ case kExprI16x8ExtractLaneU:
+ case kExprI8x16ExtractLaneS:
+ case kExprI8x16ExtractLaneU: {
len = SimdExtractLane(opcode, kWasmI32);
break;
}
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 4940134d53..e89c31d729 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -49,7 +49,7 @@ class WasmInstructionBufferImpl {
holder_->old_buffer_ = std::move(holder_->buffer_);
holder_->buffer_ = OwnedVector<uint8_t>::New(new_size);
- return base::make_unique<View>(holder_->buffer_.as_vector(), holder_);
+ return std::make_unique<View>(holder_->buffer_.as_vector(), holder_);
}
private:
@@ -59,7 +59,7 @@ class WasmInstructionBufferImpl {
std::unique_ptr<AssemblerBuffer> CreateView() {
DCHECK_NOT_NULL(buffer_);
- return base::make_unique<View>(buffer_.as_vector(), this);
+ return std::make_unique<View>(buffer_.as_vector(), this);
}
std::unique_ptr<uint8_t[]> ReleaseBuffer() {
@@ -170,7 +170,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
if (FLAG_trace_wasm_compiler) {
- PrintF("Compiling wasm function %d with %s\n\n", func_index_,
+ PrintF("Compiling wasm function %d with %s\n", func_index_,
ExecutionTierToString(tier_));
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 2da028a047..bdebfebe14 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
+#include <memory>
+
#include "src/codegen/code-desc.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-environment.h"
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 923e1154ea..b08aa9215e 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -166,7 +166,8 @@ class WasmGraphBuildingInterface {
// Wrap input merge into phis.
for (uint32_t i = 0; i < block->start_merge.arity; ++i) {
Value& val = block->start_merge[i];
- val.node = builder_->Phi(val.type, 1, &val.node, block->end_env->control);
+ TFNode* inputs[] = {val.node, block->end_env->control};
+ val.node = builder_->Phi(val.type, 1, inputs);
}
}
@@ -212,7 +213,10 @@ class WasmGraphBuildingInterface {
if (block->is_onearmed_if()) {
// Merge the else branch into the end merge.
SetEnv(block->false_env);
- MergeValuesInto(decoder, block, &block->end_merge);
+ DCHECK_EQ(block->start_merge.arity, block->end_merge.arity);
+ Value* values =
+ block->start_merge.arity > 0 ? &block->start_merge[0] : nullptr;
+ MergeValuesInto(decoder, block, &block->end_merge, values);
}
// Now continue with the merged environment.
SetEnv(block->end_env);
@@ -258,37 +262,38 @@ class WasmGraphBuildingInterface {
void Drop(FullDecoder* decoder, const Value& value) {}
void DoReturn(FullDecoder* decoder, Vector<Value> values) {
- Vector<TFNode*> nodes = GetNodes(values);
- BUILD(Return, nodes);
+ base::SmallVector<TFNode*, 8> nodes(values.size());
+ GetNodes(nodes.begin(), values);
+ BUILD(Return, VectorOf(nodes));
}
- void GetLocal(FullDecoder* decoder, Value* result,
+ void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[imm.index];
}
- void SetLocal(FullDecoder* decoder, const Value& value,
+ void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
- void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
+ void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
- void GetGlobal(FullDecoder* decoder, Value* result,
+ void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
- result->node = BUILD(GetGlobal, imm.index);
+ result->node = BUILD(GlobalGet, imm.index);
}
- void SetGlobal(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
- BUILD(SetGlobal, imm.index, value.node);
+ BUILD(GlobalSet, imm.index, value.node);
}
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
@@ -310,8 +315,8 @@ class WasmGraphBuildingInterface {
TFNode* controls[2];
BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
TFNode* merge = BUILD(Merge, 2, controls);
- TFNode* vals[2] = {tval.node, fval.node};
- TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge);
+ TFNode* inputs[] = {tval.node, fval.node, merge};
+ TFNode* phi = BUILD(Phi, tval.type, 2, inputs);
result->node = phi;
ssa_env_->control = merge;
}
@@ -319,10 +324,11 @@ class WasmGraphBuildingInterface {
void BrOrRet(FullDecoder* decoder, uint32_t depth) {
if (depth == decoder->control_depth() - 1) {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
- Vector<TFNode*> values =
- ret_count == 0 ? Vector<TFNode*>{}
- : GetNodes(decoder->stack_value(ret_count), ret_count);
- BUILD(Return, values);
+ base::SmallVector<TFNode*, 8> values(ret_count);
+ if (ret_count > 0) {
+ GetNodes(values.begin(), decoder->stack_value(ret_count), ret_count);
+ }
+ BUILD(Return, VectorOf(values));
} else {
Br(decoder, decoder->control_at(depth));
}
@@ -431,7 +437,8 @@ class WasmGraphBuildingInterface {
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
- Vector<TFNode*> inputs = GetNodes(args);
+ base::SmallVector<TFNode*, 8> inputs(args.size());
+ GetNodes(inputs.begin(), args);
TFNode* node = BUILD(SimdOp, opcode, inputs.begin());
if (result) result->node = node;
}
@@ -439,7 +446,8 @@ class WasmGraphBuildingInterface {
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
Value* result) {
- Vector<TFNode*> nodes = GetNodes(inputs);
+ base::SmallVector<TFNode*, 8> nodes(inputs.size());
+ GetNodes(nodes.begin(), inputs);
result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes.begin());
}
@@ -486,12 +494,11 @@ class WasmGraphBuildingInterface {
// If the tags match we extract the values from the exception object and
// push them onto the operand stack using the passed {values} vector.
SetEnv(if_match_env);
- // TODO(mstarzinger): Can't use BUILD() here, GetExceptionValues() returns
- // TFNode** rather than TFNode*. Fix to add landing pads.
- Vector<TFNode*> caught_values =
- builder_->GetExceptionValues(exception.node, imm.exception);
+ base::SmallVector<TFNode*, 8> caught_values(values.size());
+ Vector<TFNode*> caught_vector = VectorOf(caught_values);
+ BUILD(GetExceptionValues, exception.node, imm.exception, caught_vector);
for (size_t i = 0, e = values.size(); i < e; ++i) {
- values[i].node = caught_values[i];
+ values[i].node = caught_vector[i];
}
BrOrRet(decoder, depth);
@@ -519,7 +526,8 @@ class WasmGraphBuildingInterface {
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
- Vector<TFNode*> inputs = GetNodes(args);
+ base::SmallVector<TFNode*, 8> inputs(args.size());
+ GetNodes(inputs.begin(), args);
TFNode* node = BUILD(AtomicOp, opcode, inputs.begin(), imm.alignment,
imm.offset, decoder->position());
if (result) result->node = node;
@@ -591,16 +599,14 @@ class WasmGraphBuildingInterface {
->try_info;
}
- Vector<TFNode*> GetNodes(Value* values, size_t count) {
- Vector<TFNode*> nodes = builder_->Buffer(count);
+ void GetNodes(TFNode** nodes, Value* values, size_t count) {
for (size_t i = 0; i < count; ++i) {
nodes[i] = values[i].node;
}
- return nodes;
}
- Vector<TFNode*> GetNodes(Vector<Value> values) {
- return GetNodes(values.begin(), values.size());
+ void GetNodes(TFNode** nodes, Vector<Value> values) {
+ GetNodes(nodes, values.begin(), values.size());
}
void SetEnv(SsaEnv* env) {
@@ -656,10 +662,10 @@ class WasmGraphBuildingInterface {
SsaEnv* exception_env = Split(decoder, success_env);
exception_env->control = if_exception;
+ exception_env->effect = if_exception;
TryInfo* try_info = current_try_info(decoder);
Goto(decoder, exception_env, try_info->catch_env);
- TFNode* exception = try_info->exception;
- if (exception == nullptr) {
+ if (try_info->exception == nullptr) {
DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
try_info->exception = if_exception;
} else {
@@ -694,7 +700,8 @@ class WasmGraphBuildingInterface {
}
}
- void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge,
+ Value* values) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
SsaEnv* target = c->end_env;
@@ -703,13 +710,8 @@ class WasmGraphBuildingInterface {
if (merge->arity == 0) return;
- uint32_t avail =
- decoder->stack_size() - decoder->control_at(0)->stack_depth;
- DCHECK_GE(avail, merge->arity);
- uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail;
- Value* stack_values = decoder->stack_value(merge->arity);
- for (uint32_t i = start; i < merge->arity; ++i) {
- Value& val = stack_values[i];
+ for (uint32_t i = 0; i < merge->arity; ++i) {
+ Value& val = values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
DCHECK(val.type == kWasmBottom ||
@@ -722,6 +724,17 @@ class WasmGraphBuildingInterface {
}
}
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
+#ifdef DEBUG
+ uint32_t avail =
+ decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ DCHECK_GE(avail, merge->arity);
+#endif
+ Value* stack_values =
+ merge->arity > 0 ? decoder->stack_value(merge->arity) : nullptr;
+ MergeValuesInto(decoder, c, merge, stack_values);
+ }
+
void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
switch (to->state) {
@@ -741,17 +754,16 @@ class WasmGraphBuildingInterface {
to->control = merge;
// Merge effects.
if (from->effect != to->effect) {
- TFNode* effects[] = {to->effect, from->effect, merge};
- to->effect = builder_->EffectPhi(2, effects, merge);
+ TFNode* inputs[] = {to->effect, from->effect, merge};
+ to->effect = builder_->EffectPhi(2, inputs);
}
// Merge SSA values.
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
TFNode* a = to->locals[i];
TFNode* b = from->locals[i];
if (a != b) {
- TFNode* vals[] = {a, b};
- to->locals[i] =
- builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
+ TFNode* inputs[] = {a, b, merge};
+ to->locals[i] = builder_->Phi(decoder->GetLocalType(i), 2, inputs);
}
}
// Start a new merge from the instance cache.
@@ -787,7 +799,8 @@ class WasmGraphBuildingInterface {
env->state = SsaEnv::kMerged;
env->control = builder_->Loop(env->control);
- env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ TFNode* effect_inputs[] = {env->effect, env->control};
+ env->effect = builder_->EffectPhi(1, effect_inputs);
builder_->TerminateLoop(env->effect, env->control);
// The '+ 1' here is to be able to set the instance cache as assigned.
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
@@ -798,8 +811,8 @@ class WasmGraphBuildingInterface {
int instance_cache_index = decoder->total_locals();
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
- env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
- &env->locals[i], env->control);
+ TFNode* inputs[] = {env->locals[i], env->control};
+ env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, inputs);
}
// Introduce phis for instance cache pointers if necessary.
if (assigned->Contains(instance_cache_index)) {
@@ -815,8 +828,8 @@ class WasmGraphBuildingInterface {
// Conservatively introduce phis for all local variables.
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
- env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
- &env->locals[i], env->control);
+ TFNode* inputs[] = {env->locals[i], env->control};
+ env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, inputs);
}
// Conservatively introduce phis for instance cache.
@@ -877,22 +890,22 @@ class WasmGraphBuildingInterface {
void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node,
FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- int param_count = static_cast<int>(sig->parameter_count());
- Vector<TFNode*> arg_nodes = builder_->Buffer(param_count + 1);
- TFNode** return_nodes = nullptr;
+ size_t param_count = sig->parameter_count();
+ size_t return_count = sig->return_count();
+ base::SmallVector<TFNode*, 16> arg_nodes(param_count + 1);
+ base::SmallVector<TFNode*, 1> return_nodes(return_count);
arg_nodes[0] = index_node;
- for (int i = 0; i < param_count; ++i) {
+ for (size_t i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
if (index_node) {
- BUILD(CallIndirect, table_index, sig_index, arg_nodes.begin(),
- &return_nodes, decoder->position());
+ BUILD(CallIndirect, table_index, sig_index, VectorOf(arg_nodes),
+ VectorOf(return_nodes), decoder->position());
} else {
- BUILD(CallDirect, sig_index, arg_nodes.begin(), &return_nodes,
+ BUILD(CallDirect, sig_index, VectorOf(arg_nodes), VectorOf(return_nodes),
decoder->position());
}
- int return_count = static_cast<int>(sig->return_count());
- for (int i = 0; i < return_count; ++i) {
+ for (size_t i = 0; i < return_count; ++i) {
returns[i].node = return_nodes[i];
}
// The invoked function could have used grow_memory, so we need to
@@ -903,17 +916,17 @@ class WasmGraphBuildingInterface {
void DoReturnCall(FullDecoder* decoder, uint32_t table_index,
TFNode* index_node, FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- int arg_count = static_cast<int>(sig->parameter_count());
- Vector<TFNode*> arg_nodes = builder_->Buffer(arg_count + 1);
+ size_t arg_count = sig->parameter_count();
+ base::SmallVector<TFNode*, 16> arg_nodes(arg_count + 1);
arg_nodes[0] = index_node;
- for (int i = 0; i < arg_count; ++i) {
+ for (size_t i = 0; i < arg_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
if (index_node) {
- BUILD(ReturnCallIndirect, table_index, sig_index, arg_nodes.begin(),
+ BUILD(ReturnCallIndirect, table_index, sig_index, VectorOf(arg_nodes),
decoder->position());
} else {
- BUILD(ReturnCall, sig_index, arg_nodes.begin(), decoder->position());
+ BUILD(ReturnCall, sig_index, VectorOf(arg_nodes), decoder->position());
}
}
};
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 7c41c0a209..adb7e19158 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -21,17 +21,37 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
EmitJumpSlot(lazy_compile_target); // 5 bytes
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
- // On x64, all code is allocated within a single code section, so we can use
- // relative jumps.
- static_assert(kMaxWasmCodeMemory <= size_t{2} * GB, "can use relative jump");
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
intptr_t displacement = static_cast<intptr_t>(
reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize);
- near_jmp(displacement, RelocInfo::NONE);
+ if (!is_int32(displacement)) return false;
+ near_jmp(displacement, RelocInfo::NONE); // 5 bytes
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ Label data;
+ int start_offset = pc_offset();
+ jmp(Operand(&data)); // 6 bytes
+ Nop(2); // 2 bytes
+ // The data must be properly aligned, so it can be patched atomically (see
+ // {PatchFarJumpSlot}).
+ DCHECK_EQ(start_offset + kSystemPointerSize, pc_offset());
+ USE(start_offset);
+ bind(&data);
+ dq(target); // 8 bytes
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ // The slot needs to be pointer-size aligned so we can atomically update it.
+ DCHECK(IsAligned(slot, kSystemPointerSize));
+ // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
+ reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize)
+ ->store(target, std::memory_order_relaxed);
+ // The update is atomic because the address is properly aligned.
+ // Because of cache coherence, the data update will eventually be seen by all
+ // cores. It's ok if they temporarily jump to the old target.
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -46,14 +66,20 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ jmp(target, RelocInfo::NONE);
+ return true;
}
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
jmp(target, RelocInfo::NONE);
}
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
@@ -74,16 +100,26 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
EmitJumpSlot(lazy_compile_target);
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
- CheckConstPool(true, false); // force emit of const pool
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
// Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
// mode used below, we need this to allow concurrent patching of this slot.
Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
CheckConstPool(true, false); // force emit of const pool
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ // Load from [pc + kInstrSize] to pc. Note that {pc} points two instructions
+ // after the currently executing one.
+ ldr_pcrel(pc, -kInstrSize); // 1 instruction
+ dd(target); // 4 bytes (== 1 instruction)
+ STATIC_ASSERT(kInstrSize == kInt32Size);
+ STATIC_ASSERT(kFarJumpTableSlotSize == 2 * kInstrSize);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -105,19 +141,43 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
if (nop_bytes) nop();
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
- ForceConstantPoolEmissionWithoutJump();
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ if (!TurboAssembler::IsNearCallOffset(
+ (reinterpret_cast<byte*>(target) - pc_) / kInstrSize)) {
+ return false;
+ }
+
+ Jump(target, RelocInfo::NONE);
+ return true;
}
-void JumpTableAssembler::EmitJumpSlot(Address target) {
- // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
- // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
- // sure concurrent patching is still supported.
- DCHECK(TurboAssembler::IsNearCallOffset(
- (reinterpret_cast<byte*>(target) - pc_) / kInstrSize));
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ // This code uses hard-coded registers and instructions (and avoids
+ // {UseScratchRegisterScope} or {InstructionAccurateScope}) because this code
+ // will only be called for the very specific runtime slot table, and we want
+ // to have maximum control over the generated code.
+ // Do not reuse this code without validating that the same assumptions hold.
+ constexpr Register kTmpReg = x16;
+ DCHECK(TmpList()->IncludesAliasOf(kTmpReg));
+ // Load from [pc + 2 * kInstrSize] to {kTmpReg}, then branch there.
+ ldr_pcrel(kTmpReg, 2); // 1 instruction
+ br(kTmpReg); // 1 instruction
+ dq(target); // 8 bytes (== 2 instructions)
+ STATIC_ASSERT(2 * kInstrSize == kSystemPointerSize);
+ STATIC_ASSERT(kFarJumpTableSlotSize == 4 * kInstrSize);
+}
- Jump(target, RelocInfo::NONE);
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ // The slot needs to be pointer-size aligned so we can atomically update it.
+ DCHECK(IsAligned(slot, kSystemPointerSize));
+ // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
+ reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize)
+ ->store(target, std::memory_order_relaxed);
+ // The data update is guaranteed to be atomic since it's a properly aligned
+ // and stores a single machine word. This update will eventually be observed
+ // by any concurrent [ldr] on the same address because of the data cache
+ // coherence. It's ok if other cores temporarily jump to the old target.
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -138,13 +198,19 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
b(r1); // 2 bytes
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
mov(r1, Operand(target));
b(r1);
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -168,12 +234,18 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ Jump(target, RelocInfo::NONE);
+ return true;
}
-void JumpTableAssembler::EmitJumpSlot(Address target) {
- Jump(target, RelocInfo::NONE);
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -199,14 +271,20 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- JumpToInstructionStream(builtin_target);
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) {
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
mov(r0, Operand(target));
mtctr(r0);
bctr();
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
}
void JumpTableAssembler::NopBytes(int bytes) {
@@ -218,21 +296,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
#else
-void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
- Address lazy_compile_target) {
- UNIMPLEMENTED();
-}
-
-void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
- UNIMPLEMENTED();
-}
-
-void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
-
-void JumpTableAssembler::NopBytes(int bytes) {
- DCHECK_LE(0, bytes);
- UNIMPLEMENTED();
-}
+#error Unknown architecture.
#endif
} // namespace wasm
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 8889c18e9c..2100e44199 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -6,7 +6,6 @@
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#include "src/codegen/macro-assembler.h"
-#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -19,9 +18,11 @@ namespace wasm {
//
// Additionally to this main jump table, there exist special jump tables for
// other purposes:
-// - the runtime stub table contains one entry per wasm runtime stub (see
+// - the far stub table contains one entry per wasm runtime stub (see
// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
-// builtin.
+// builtin, plus (if {FLAG_wasm_far_jump_table} is enabled and not the full
+// address space can be reached via the jump table) one entry per wasm
+// function.
// - the lazy compile table contains one entry per wasm function which jumps to
// the common {WasmCompileLazy} builtin and passes the function index that was
// invoked.
@@ -73,16 +74,28 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// Determine the size of a jump table containing the given number of slots.
static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) {
- // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values,
- // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize}
return ((slot_count + kJumpTableSlotsPerLine - 1) /
kJumpTableSlotsPerLine) *
kJumpTableLineSize;
}
- // Translate a stub slot index to an offset into the continuous jump table.
- static uint32_t StubSlotIndexToOffset(uint32_t slot_index) {
- return slot_index * kJumpTableStubSlotSize;
+ // Translate a far jump table index to an offset into the table.
+ static uint32_t FarJumpSlotIndexToOffset(uint32_t slot_index) {
+ return slot_index * kFarJumpTableSlotSize;
+ }
+
+ // Translate a far jump table offset to the index into the table.
+ static uint32_t FarJumpSlotOffsetToIndex(uint32_t offset) {
+ DCHECK_EQ(0, offset % kFarJumpTableSlotSize);
+ return offset / kFarJumpTableSlotSize;
+ }
+
+ // Determine the size of a far jump table containing the given number of
+ // slots.
+ static constexpr uint32_t SizeForNumberOfFarJumpSlots(
+ int num_runtime_slots, int num_function_slots) {
+ int num_entries = num_runtime_slots + num_function_slots;
+ return num_entries * kFarJumpTableSlotSize;
}
// Translate a slot index to an offset into the lazy compile table.
@@ -90,11 +103,6 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
return slot_index * kLazyCompileTableSlotSize;
}
- // Determine the size of a jump table containing only runtime stub slots.
- static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
- return slot_count * kJumpTableStubSlotSize;
- }
-
// Determine the size of a lazy compile table.
static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count) {
return slot_count * kLazyCompileTableSlotSize;
@@ -115,32 +123,41 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
FlushInstructionCache(base, lazy_compile_table_size);
}
- static void GenerateRuntimeStubTable(Address base, Address* targets,
- int num_stubs) {
- uint32_t table_size = num_stubs * kJumpTableStubSlotSize;
+ static void GenerateFarJumpTable(Address base, Address* stub_targets,
+ int num_runtime_slots,
+ int num_function_slots) {
+ uint32_t table_size =
+ SizeForNumberOfFarJumpSlots(num_runtime_slots, num_function_slots);
// Assume enough space, so the Assembler does not try to grow the buffer.
JumpTableAssembler jtasm(base, table_size + 256);
int offset = 0;
- for (int index = 0; index < num_stubs; ++index) {
- DCHECK_EQ(offset, StubSlotIndexToOffset(index));
+ for (int index = 0; index < num_runtime_slots + num_function_slots;
+ ++index) {
+ DCHECK_EQ(offset, FarJumpSlotIndexToOffset(index));
+ // Functions slots initially jump to themselves. They are patched before
+ // being used.
+ Address target =
+ index < num_runtime_slots ? stub_targets[index] : base + offset;
+ jtasm.EmitFarJumpSlot(target);
+ offset += kFarJumpTableSlotSize;
DCHECK_EQ(offset, jtasm.pc_offset());
- jtasm.EmitRuntimeStubSlot(targets[index]);
- offset += kJumpTableStubSlotSize;
- jtasm.NopBytes(offset - jtasm.pc_offset());
}
FlushInstructionCache(base, table_size);
}
- static void PatchJumpTableSlot(Address base, uint32_t slot_index,
- Address new_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + JumpSlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitJumpSlot(new_target);
- jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableSlotSize);
+ static void PatchJumpTableSlot(Address jump_table_slot,
+ Address far_jump_table_slot, Address target) {
+ // First, try to patch the jump table slot.
+ JumpTableAssembler jtasm(jump_table_slot);
+ if (!jtasm.EmitJumpSlot(target)) {
+ // If that fails, we need to patch the far jump table slot, and then
+ // update the jump table slot to jump to this far jump table slot.
+ DCHECK_NE(kNullAddress, far_jump_table_slot);
+ JumpTableAssembler::PatchFarJumpSlot(far_jump_table_slot, target);
+ CHECK(jtasm.EmitJumpSlot(far_jump_table_slot));
}
+ jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
+ FlushInstructionCache(jump_table_slot, kJumpTableSlotSize);
}
private:
@@ -157,48 +174,45 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kFarJumpTableSlotSize = 16;
static constexpr int kLazyCompileTableSlotSize = 10;
- static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kFarJumpTableSlotSize = 5;
static constexpr int kLazyCompileTableSlotSize = 10;
- static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 2 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 1 * kInstrSize;
static constexpr int kJumpTableSlotSize = 1 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 3 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 128;
static constexpr int kJumpTableSlotSize = 14;
+ static constexpr int kFarJumpTableSlotSize = 14;
static constexpr int kLazyCompileTableSlotSize = 20;
- static constexpr int kJumpTableStubSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 7 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 7 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#else
- static constexpr int kJumpTableLineSize = 1;
- static constexpr int kJumpTableSlotSize = 1;
- static constexpr int kLazyCompileTableSlotSize = 1;
- static constexpr int kJumpTableStubSlotSize = 1;
+#error Unknown architecture.
#endif
static constexpr int kJumpTableSlotsPerLine =
@@ -218,9 +232,15 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
- void EmitRuntimeStubSlot(Address builtin_target);
+ // Returns {true} if the jump fits in the jump table slot, {false} otherwise.
+ bool EmitJumpSlot(Address target);
+
+ // Initially emit a far jump slot.
+ void EmitFarJumpSlot(Address target);
- void EmitJumpSlot(Address target);
+ // Patch an existing far jump slot, and make sure that this updated eventually
+ // becomes available to all execution units that might execute this code.
+ static void PatchFarJumpSlot(Address slot, Address target);
void NopBytes(int bytes);
};
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index b11a557195..300c7afcf9 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -16,7 +16,7 @@ namespace wasm {
void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
int func_index, int position, uint8_t* mem_start) {
- EmbeddedVector<char, 64> value;
+ EmbeddedVector<char, 91> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
switch (mem_rep) {
#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
@@ -34,6 +34,25 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
TRACE_TYPE(kFloat32, "f32", "%f / %08x", float, uint32_t)
TRACE_TYPE(kFloat64, "f64", "%f / %016" PRIx64, double, uint64_t)
#undef TRACE_TYPE
+ case MachineRepresentation::kSimd128:
+ SNPrintF(value, "s128:%d %d %d %d / %08x %08x %08x %08x",
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 4),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 8),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 12),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 4),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 8),
+ base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(mem_start) + info->address + 12));
+ break;
default:
SNPrintF(value, "???");
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 2847b02c64..9e08f8d109 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -14,7 +14,6 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
-#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
@@ -31,7 +30,6 @@
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -152,6 +150,9 @@ class CompilationUnitQueues {
for (int task_id = 0; task_id < max_tasks; ++task_id) {
queues_[task_id].next_steal_task_id = next_task_id(task_id);
}
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
}
base::Optional<WasmCompilationUnit> GetNextUnit(
@@ -254,15 +255,14 @@ class CompilationUnitQueues {
};
struct BigUnitsQueue {
- BigUnitsQueue() = default;
+ BigUnitsQueue() {
+ for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+ }
base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
- std::atomic_bool has_units[kNumTiers] = {
- ATOMIC_VAR_INIT(false),
- ATOMIC_VAR_INIT(false)
- };
+ std::atomic<bool> has_units[kNumTiers];
// Protected by {mutex}:
std::priority_queue<BigUnit> units[kNumTiers];
@@ -271,11 +271,8 @@ class CompilationUnitQueues {
std::vector<Queue> queues_;
BigUnitsQueue big_units_queue_;
- std::atomic_size_t num_units_[kNumTiers] = {
- ATOMIC_VAR_INIT(0),
- ATOMIC_VAR_INIT(0)
- };
- std::atomic_int next_queue_to_add{0};
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
int next_task_id(int task_id) const {
int next = task_id + 1;
@@ -482,7 +479,7 @@ class CompilationStateImpl {
// Compilation error, atomically updated. This flag can be updated and read
// using relaxed semantics.
- std::atomic_bool compile_failed_{false};
+ std::atomic<bool> compile_failed_{false};
const int max_background_tasks_ = 0;
@@ -967,6 +964,10 @@ bool ExecuteJSToWasmWrapperCompilationUnits(
return true;
}
+bool NeedsDeterministicCompile() {
+ return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
+}
+
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
bool ExecuteCompilationUnits(
@@ -994,6 +995,7 @@ bool ExecuteCompilationUnits(
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
double deadline = 0;
+ const bool deterministic = NeedsDeterministicCompile();
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
@@ -1087,7 +1089,7 @@ bool ExecuteCompilationUnits(
}
// Get next unit.
- if (deadline < platform->MonotonicallyIncreasingTime()) {
+ if (deterministic || deadline < platform->MonotonicallyIncreasingTime()) {
unit = {};
} else {
unit = compile_scope.compilation_state()->GetNextCompilationUnit(
@@ -1199,10 +1201,6 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
builder.Commit();
}
-bool NeedsDeterministicCompile() {
- return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
-}
-
bool MayCompriseLazyFunctions(const WasmModule* module,
const WasmFeatures& enabled_features,
bool lazy_module) {
@@ -1373,7 +1371,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
auto native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled, std::move(module));
native_module->SetWireBytes(std::move(wire_bytes_copy));
- native_module->SetRuntimeStubs(isolate);
CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
if (thrower->error()) return {};
@@ -1468,7 +1465,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
DCHECK_NULL(stream_);
stream_.reset(
- new StreamingDecoder(base::make_unique<AsyncStreamingProcessor>(this)));
+ new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(this)));
return stream_;
}
@@ -1504,7 +1501,7 @@ void AsyncCompileJob::CreateNativeModule(
// Create the module object and populate with compiled functions and
// information needed at instantiation time.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // TODO(clemensb): For the same module (same bytes / same hash), we should
// only have one {WasmModuleObject}. Otherwise, we might only set
// breakpoints on a (potentially empty) subset of the instances.
// Create the module object.
@@ -1512,7 +1509,6 @@ void AsyncCompileJob::CreateNativeModule(
native_module_ = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, std::move(module));
native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
- native_module_->SetRuntimeStubs(isolate_);
if (stream_) stream_->NotifyNativeModuleCreated(native_module_);
}
@@ -1707,7 +1703,7 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
void AsyncCompileJob::StartForegroundTask() {
DCHECK_NULL(pending_foreground_task_);
- auto new_task = base::make_unique<CompileTask>(this, true);
+ auto new_task = std::make_unique<CompileTask>(this, true);
pending_foreground_task_ = new_task.get();
foreground_task_runner_->PostTask(std::move(new_task));
}
@@ -1715,7 +1711,7 @@ void AsyncCompileJob::StartForegroundTask() {
void AsyncCompileJob::ExecuteForegroundTaskImmediately() {
DCHECK_NULL(pending_foreground_task_);
- auto new_task = base::make_unique<CompileTask>(this, true);
+ auto new_task = std::make_unique<CompileTask>(this, true);
pending_foreground_task_ = new_task.get();
new_task->Run();
}
@@ -1727,7 +1723,7 @@ void AsyncCompileJob::CancelPendingForegroundTask() {
}
void AsyncCompileJob::StartBackgroundTask() {
- auto task = base::make_unique<CompileTask>(this, false);
+ auto task = std::make_unique<CompileTask>(this, false);
// If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
// tasks. This is used to make timing deterministic.
@@ -2210,11 +2206,9 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
}
int GetMaxBackgroundTasks() {
- if (NeedsDeterministicCompile()) return 1;
+ if (NeedsDeterministicCompile()) return 0;
int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
- int num_compile_tasks =
- std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
- return std::max(1, num_compile_tasks);
+ return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
}
CompilationStateImpl::CompilationStateImpl(
@@ -2228,7 +2222,7 @@ CompilationStateImpl::CompilationStateImpl(
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_background_tasks_(GetMaxBackgroundTasks()),
+ max_background_tasks_(std::max(GetMaxBackgroundTasks(), 1)),
compilation_unit_queues_(max_background_tasks_),
available_task_ids_(max_background_tasks_) {
for (int i = 0; i < max_background_tasks_; ++i) {
@@ -2617,7 +2611,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
auto& function = module->functions[exp.index];
JSToWasmWrapperKey key(function.imported, *function.sig);
if (queue.insert(key)) {
- auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
+ auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, isolate->wasm_engine(), function.sig, function.imported,
enabled_features);
compilation_units.emplace(key, std::move(unit));
@@ -2628,7 +2622,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
CancelableTaskManager task_manager;
const int max_background_tasks = GetMaxBackgroundTasks();
for (int i = 0; i < max_background_tasks; ++i) {
- auto task = base::make_unique<CompileJSToWasmWrapperTask>(
+ auto task = std::make_unique<CompileJSToWasmWrapperTask>(
&task_manager, &queue, &compilation_units);
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
@@ -2699,12 +2693,21 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
const int kBufferSize = 32;
char buffer[kBufferSize];
+ Handle<String> url_prefix =
+ isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
+
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
- AllocationType::kOld);
- script->set_name(*name_str.ToHandleChecked());
+ Handle<String> name_str =
+ isolate->factory()
+ ->NewStringFromOneByte(
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ AllocationType::kOld)
+ .ToHandleChecked();
+ script->set_name(*name_str);
+ MaybeHandle<String> url_str =
+ isolate->factory()->NewConsString(url_prefix, name_str);
+ script->set_source_url(*url_str.ToHandleChecked());
if (source_map_url.size() != 0) {
MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 56712977b1..b89d06b881 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -6,7 +6,6 @@
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
-#include "src/base/template-utils.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
@@ -31,6 +30,7 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
+constexpr char kDebugInfoString[] = ".debug_info";
template <size_t N>
constexpr size_t num_chars(const char (&)[N]) {
@@ -89,6 +89,8 @@ const char* SectionName(SectionCode code) {
return kNameString;
case kSourceMappingURLSectionCode:
return kSourceMappingURLString;
+ case kDebugInfoSectionCode:
+ return kDebugInfoString;
case kCompilationHintsSectionCode:
return kCompilationHintsString;
default:
@@ -304,7 +306,7 @@ class ModuleDecoderImpl : public Decoder {
CHECK_NULL(module_);
SetCounters(counters);
module_.reset(
- new WasmModule(base::make_unique<Zone>(allocator, "signatures")));
+ new WasmModule(std::make_unique<Zone>(allocator, "signatures")));
module_->initial_pages = 0;
module_->maximum_pages = 0;
module_->mem_export = false;
@@ -399,6 +401,10 @@ class ModuleDecoderImpl : public Decoder {
// sourceMappingURL is a custom section and currently can occur anywhere
// in the module. In case of multiple sourceMappingURL sections, all
// except the first occurrence are ignored.
+ case kDebugInfoSectionCode:
+ // .debug_info is a custom section containing core DWARF information
+ // if produced by compiler. Its presence likely means that Wasm was
+ // built in a debug mode.
case kCompilationHintsSectionCode:
// TODO(frgossen): report out of place compilation hints section as a
// warning.
@@ -453,6 +459,13 @@ class ModuleDecoderImpl : public Decoder {
case kSourceMappingURLSectionCode:
DecodeSourceMappingURLSection();
break;
+ case kDebugInfoSectionCode:
+ // If there is an explicit source map, prefer it over DWARF info.
+ if (!has_seen_unordered_section(kSourceMappingURLSectionCode)) {
+ module_->source_map_url.assign("wasm://dwarf");
+ }
+ consume_bytes(static_cast<uint32_t>(end_ - start_), ".debug_info");
+ break;
case kCompilationHintsSectionCode:
if (enabled_features_.compilation_hints) {
DecodeCompilationHintsSection();
@@ -798,9 +811,11 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc();
bool is_active;
+ bool functions_as_elements;
uint32_t table_index;
WasmInitExpr offset;
- consume_segment_header("table index", &is_active, &table_index, &offset);
+ consume_element_segment_header(&is_active, &functions_as_elements,
+ &table_index, &offset);
if (failed()) return;
if (is_active) {
@@ -815,12 +830,6 @@ class ModuleDecoderImpl : public Decoder {
table_index);
break;
}
- } else {
- ValueType type = consume_reference_type();
- if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
- error(pc_ - 1, "invalid element segment type");
- break;
- }
}
uint32_t num_elem =
@@ -833,8 +842,8 @@ class ModuleDecoderImpl : public Decoder {
WasmElemSegment* init = &module_->elem_segments.back();
for (uint32_t j = 0; j < num_elem; j++) {
- uint32_t index = is_active ? consume_element_func_index()
- : consume_passive_element();
+ uint32_t index = functions_as_elements ? consume_element_expr()
+ : consume_element_func_index();
if (failed()) break;
init->entries.push_back(index);
}
@@ -911,8 +920,7 @@ class ModuleDecoderImpl : public Decoder {
bool is_active;
uint32_t memory_index;
WasmInitExpr dest_addr;
- consume_segment_header("memory index", &is_active, &memory_index,
- &dest_addr);
+ consume_data_segment_header(&is_active, &memory_index, &dest_addr);
if (failed()) break;
if (is_active && memory_index != 0) {
@@ -1483,7 +1491,7 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr expr;
uint32_t len = 0;
switch (opcode) {
- case kExprGetGlobal: {
+ case kExprGlobalGet: {
GlobalIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
if (module->globals.size() <= imm.index) {
error("global index is out of bounds");
@@ -1544,6 +1552,10 @@ class ModuleDecoderImpl : public Decoder {
case kExprRefFunc: {
if (enabled_features_.anyref) {
FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
+ if (module->functions.size() <= imm.index) {
+ errorf(pc() - 1, "invalid function index: %u", imm.index);
+ break;
+ }
expr.kind = WasmInitExpr::kRefFuncConst;
expr.val.function_index = imm.index;
len = imm.length;
@@ -1678,8 +1690,103 @@ class ModuleDecoderImpl : public Decoder {
return attribute;
}
- void consume_segment_header(const char* name, bool* is_active,
- uint32_t* index, WasmInitExpr* offset) {
+ void consume_element_segment_header(bool* is_active,
+ bool* functions_as_elements,
+ uint32_t* table_index,
+ WasmInitExpr* offset) {
+ const byte* pos = pc();
+ uint8_t flag;
+ if (enabled_features_.bulk_memory || enabled_features_.anyref) {
+ flag = consume_u8("flag");
+ } else {
+ uint32_t table_index = consume_u32v("table index");
+ // The only valid flag value without bulk_memory or anyref is '0'.
+ if (table_index != 0) {
+ error(
+ "Element segments with table indices require "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ return;
+ }
+ flag = 0;
+ }
+
+ // The mask for the bit in the flag which indicates if the segment is
+ // active or not.
+ constexpr uint8_t kIsPassiveMask = 0x01;
+ // The mask for the bit in the flag which indicates if the segment has an
+ // explicit table index field.
+ constexpr uint8_t kHasTableIndexMask = 0x02;
+ // The mask for the bit in the flag which indicates if the functions of this
+ // segment are defined as function indices (=0) or elements(=1).
+ constexpr uint8_t kFunctionsAsElementsMask = 0x04;
+ constexpr uint8_t kFullMask =
+ kIsPassiveMask | kHasTableIndexMask | kFunctionsAsElementsMask;
+
+ bool is_passive = flag & kIsPassiveMask;
+ *is_active = !is_passive;
+ *functions_as_elements = flag & kFunctionsAsElementsMask;
+ bool has_table_index = flag & kHasTableIndexMask;
+
+ if (is_passive && !enabled_features_.bulk_memory) {
+ error("Passive element segments require --experimental-wasm-bulk-memory");
+ return;
+ }
+ if (*functions_as_elements && !enabled_features_.bulk_memory) {
+ error(
+ "Illegal segment flag. Did you forget "
+ "--experimental-wasm-bulk-memory?");
+ return;
+ }
+ if (flag != 0 && !enabled_features_.bulk_memory &&
+ !enabled_features_.anyref) {
+ error(
+ "Invalid segment flag. Did you forget "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref?");
+ return;
+ }
+ if ((flag & kFullMask) != flag || (!(*is_active) && has_table_index)) {
+ errorf(pos, "illegal flag value %u. Must be 0, 1, 2, 4, 5 or 6", flag);
+ }
+
+ if (has_table_index) {
+ *table_index = consume_u32v("table index");
+ } else {
+ *table_index = 0;
+ }
+
+ if (*is_active) {
+ *offset = consume_init_expr(module_.get(), kWasmI32);
+ }
+
+ if (*is_active && !has_table_index) {
+ // Active segments without table indices are a special case for backwards
+ // compatibility. These cases have an implicit element kind or element
+ // type, so we are done already with the segment header.
+ return;
+ }
+
+ if (*functions_as_elements) {
+ // We have to check that there is an element type of type FuncRef. All
+ // other element types are not valid yet.
+ ValueType type = consume_reference_type();
+ if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
+ error(pc_ - 1, "invalid element segment type");
+ return;
+ }
+ } else {
+ // We have to check that there is an element kind of type Function. All
+ // other element kinds are not valid yet.
+ uint8_t val = consume_u8("element kind");
+ ImportExportKindCode kind = static_cast<ImportExportKindCode>(val);
+ if (kind != kExternalFunction) {
+ errorf(pos, "illegal element kind %x. Must be 0x00", val);
+ return;
+ }
+ }
+ }
+
+ void consume_data_segment_header(bool* is_active, uint32_t* index,
+ WasmInitExpr* offset) {
const byte* pos = pc();
uint32_t flag = consume_u32v("flag");
@@ -1715,7 +1822,7 @@ class ModuleDecoderImpl : public Decoder {
}
if (flag == SegmentFlags::kActiveWithIndex) {
*is_active = true;
- *index = consume_u32v(name);
+ *index = consume_u32v("memory index");
*offset = consume_init_expr(module_.get(), kWasmI32);
}
}
@@ -1731,7 +1838,7 @@ class ModuleDecoderImpl : public Decoder {
return index;
}
- uint32_t consume_passive_element() {
+ uint32_t consume_element_expr() {
uint32_t index = WasmElemSegment::kNullIndex;
uint8_t opcode = consume_u8("element opcode");
if (failed()) return index;
@@ -1857,6 +1964,10 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder,
kCompilationHintsString,
num_chars(kCompilationHintsString)) == 0) {
return kCompilationHintsSectionCode;
+ } else if (string.length() == num_chars(kDebugInfoString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kDebugInfoString, num_chars(kDebugInfoString)) == 0) {
+ return kDebugInfoSectionCode;
}
return kUnknownSectionCode;
}
@@ -1895,7 +2006,7 @@ FunctionResult DecodeWasmFunctionForTesting(
ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
return decoder.DecodeSingleFunction(zone, wire_bytes, module,
- base::make_unique<WasmFunction>());
+ std::make_unique<WasmFunction>());
}
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 8e121c9d30..5ee324b109 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_MODULE_DECODER_H_
#define V8_WASM_MODULE_DECODER_H_
+#include <memory>
+
#include "src/common/globals.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-constants.h"
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 976c3cde00..95d892ab50 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -93,7 +93,7 @@ class InstanceBuilder {
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory);
+ MaybeHandle<JSArrayBuffer> memory_buffer);
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build();
@@ -114,7 +114,8 @@ class InstanceBuilder {
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
MaybeHandle<JSReceiver> ffi_;
- MaybeHandle<JSArrayBuffer> memory_;
+ MaybeHandle<JSArrayBuffer> memory_buffer_;
+ Handle<WasmMemoryObject> memory_object_;
Handle<JSArrayBuffer> untagged_globals_;
Handle<FixedArray> tagged_globals_;
std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
@@ -165,9 +166,11 @@ class InstanceBuilder {
void SanitizeImports();
- // Find the imported memory buffer if there is one. This is used to see if we
- // need to recompile with bounds checks before creating the instance.
- MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
+ // Find the imported memory if there is one.
+ bool FindImportedMemory();
+
+ // Allocate the memory.
+ bool AllocateMemory();
// Processes a single imported function.
bool ProcessImportedFunction(Handle<WasmInstanceObject> instance,
@@ -221,9 +224,6 @@ class InstanceBuilder {
// Process initialization of globals.
void InitGlobals(Handle<WasmInstanceObject> instance);
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t initial_pages,
- uint32_t maximum_pages);
bool NeedsWrappers() const;
@@ -243,8 +243,9 @@ class InstanceBuilder {
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ MaybeHandle<JSArrayBuffer> memory_buffer) {
+ InstanceBuilder builder(isolate, thrower, module_object, imports,
+ memory_buffer);
auto instance = builder.Build();
if (!instance.is_null() && builder.ExecuteStartFunction()) {
return instance;
@@ -256,14 +257,14 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory)
+ MaybeHandle<JSArrayBuffer> memory_buffer)
: isolate_(isolate),
enabled_(module_object->native_module()->enabled_features()),
module_(module_object->module()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
- memory_(memory) {
+ memory_buffer_(memory_buffer) {
sanitized_imports_.reserve(module_->import_table.size());
}
@@ -289,7 +290,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
- // Allocate the memory array buffer.
+ // Set up the memory buffer and memory objects.
//--------------------------------------------------------------------------
uint32_t initial_pages = module_->initial_pages;
auto initial_pages_counter = SELECT_WASM_COUNTER(
@@ -301,29 +302,41 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
isolate_->counters()->wasm_wasm_max_mem_pages_count();
max_pages_counter->AddSample(module_->maximum_pages);
}
- // Asm.js has memory_ already set at this point, so we don't want to
- // overwrite it.
- if (memory_.is_null()) {
- memory_ = FindImportedMemoryBuffer();
- }
- if (!memory_.is_null()) {
- // Set externally passed ArrayBuffer non detachable.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- memory->set_is_detachable(false);
-
- DCHECK_IMPLIES(native_module->use_trap_handler(),
- is_asmjs_module(module_) || memory->is_wasm_memory() ||
- memory->backing_store() == nullptr);
- } else if (initial_pages > 0 || native_module->use_trap_handler()) {
- // We need to unconditionally create a guard region if using trap handlers,
- // even when the size is zero to prevent null-dereference issues
- // (e.g. https://crbug.com/769637).
- // Allocate memory if the initial size is more than 0 pages.
- memory_ = AllocateMemory(initial_pages, module_->maximum_pages);
- if (memory_.is_null()) {
- // failed to allocate memory
- DCHECK(isolate_->has_pending_exception() || thrower_->error());
- return {};
+
+ if (is_asmjs_module(module_)) {
+ Handle<JSArrayBuffer> buffer;
+ if (memory_buffer_.ToHandle(&buffer)) {
+ // asm.js instantiation should have changed the state of the buffer.
+ CHECK(!buffer->is_detachable());
+ CHECK(buffer->is_asmjs_memory());
+ } else {
+ // Use an empty JSArrayBuffer for degenerate asm.js modules.
+ memory_buffer_ = isolate_->factory()->NewJSArrayBufferAndBackingStore(
+ 0, InitializedFlag::kUninitialized);
+ if (!memory_buffer_.ToHandle(&buffer)) {
+ thrower_->RangeError("Out of memory: asm.js memory");
+ return {};
+ }
+ buffer->set_is_asmjs_memory(true);
+ buffer->set_is_detachable(false);
+ }
+
+ // The maximum number of pages isn't strictly necessary for memory
+ // objects used for asm.js, as they are never visible, but we might
+ // as well make it accurate.
+ auto maximum_pages = static_cast<uint32_t>(
+ RoundUp(buffer->byte_length(), wasm::kWasmPageSize) /
+ wasm::kWasmPageSize);
+ memory_object_ =
+ WasmMemoryObject::New(isolate_, memory_buffer_, maximum_pages);
+ } else {
+ // Actual wasm module must have either imported or created memory.
+ CHECK(memory_buffer_.is_null());
+ if (!FindImportedMemory()) {
+ if (module_->has_memory && !AllocateMemory()) {
+ DCHECK(isolate_->has_pending_exception() || thrower_->error());
+ return {};
+ }
}
}
@@ -333,33 +346,42 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_);
- NativeModuleModificationScope native_modification_scope(native_module);
+
+ //--------------------------------------------------------------------------
+ // Attach the memory to the instance.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ DCHECK(!memory_object_.is_null());
+ if (!instance->has_memory_object()) {
+ instance->set_memory_object(*memory_object_);
+ }
+ // Add the instance object to the list of instances for this memory.
+ WasmMemoryObject::AddInstance(isolate_, memory_object_, instance);
+
+ // Double-check the {memory} array buffer matches the instance.
+ Handle<JSArrayBuffer> memory = memory_buffer_.ToHandleChecked();
+ CHECK_EQ(instance->memory_size(), memory->byte_length());
+ CHECK_EQ(instance->memory_start(), memory->backing_store());
+ }
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size;
if (untagged_globals_buffer_size > 0) {
- void* backing_store = isolate_->array_buffer_allocator()->Allocate(
- untagged_globals_buffer_size);
- if (backing_store == nullptr) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- untagged_globals_ = isolate_->factory()->NewJSArrayBuffer(
- SharedFlag::kNotShared, AllocationType::kOld);
- constexpr bool is_external = false;
- constexpr bool is_wasm_memory = false;
- JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external,
- backing_store, untagged_globals_buffer_size,
- SharedFlag::kNotShared, is_wasm_memory);
- if (untagged_globals_.is_null()) {
+ MaybeHandle<JSArrayBuffer> result =
+ isolate_->factory()->NewJSArrayBufferAndBackingStore(
+ untagged_globals_buffer_size, InitializedFlag::kZeroInitialized,
+ AllocationType::kOld);
+
+ if (!result.ToHandle(&untagged_globals_)) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
+
+ instance->set_untagged_globals_buffer(*untagged_globals_);
instance->set_globals_start(
reinterpret_cast<byte*>(untagged_globals_->backing_store()));
- instance->set_untagged_globals_buffer(*untagged_globals_);
}
uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size;
@@ -425,6 +447,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables);
}
+ NativeModuleModificationScope native_modification_scope(native_module);
+
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
@@ -450,30 +474,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
InitializeExceptions(instance);
}
- //--------------------------------------------------------------------------
- // Create the WebAssembly.Memory object.
- //--------------------------------------------------------------------------
- if (module_->has_memory) {
- if (!instance->has_memory_object()) {
- // No memory object exists. Create one.
- Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
- isolate_, memory_,
- module_->maximum_pages != 0 ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- }
-
- // Add the instance object to the list of instances for this memory.
- Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
- WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
-
- if (!memory_.is_null()) {
- // Double-check the {memory} array buffer matches the instance.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- CHECK_EQ(instance->memory_size(), memory->byte_length());
- CHECK_EQ(instance->memory_start(), memory->backing_store());
- }
- }
-
// The bulk memory proposal changes the MVP behavior here; the segments are
// written as if `memory.init` and `table.init` are executed directly, and
// not bounds checked ahead of time.
@@ -536,7 +536,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Debugging support.
//--------------------------------------------------------------------------
// Set all breakpoints that were set on the shared module.
- WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
+ WasmModuleObject::SetBreakpointsOnNewInstance(
+ handle(module_object_->script(), isolate_), instance);
//--------------------------------------------------------------------------
// Create a wrapper for the start function.
@@ -547,7 +548,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<Code> wrapper_code =
JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
isolate_, function.sig, function.imported);
- // TODO(clemensh): Don't generate an exported function for the start
+ // TODO(clemensb): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
isolate_, instance, start_index,
@@ -807,22 +808,21 @@ void InstanceBuilder::SanitizeImports() {
}
}
-MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
+bool InstanceBuilder::FindImportedMemory() {
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (size_t index = 0; index < module_->import_table.size(); index++) {
- const WasmImport& import = module_->import_table[index];
+ WasmImport import = module_->import_table[index];
if (import.kind == kExternalMemory) {
- const auto& value = sanitized_imports_[index].value;
- if (!value->IsWasmMemoryObject()) {
- return {};
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- return buffer;
+ auto& value = sanitized_imports_[index].value;
+ if (!value->IsWasmMemoryObject()) return false;
+ memory_object_ = Handle<WasmMemoryObject>::cast(value);
+ memory_buffer_ =
+ Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_);
+ return true;
}
}
- return {};
+ return false;
}
bool InstanceBuilder::ProcessImportedFunction(
@@ -1016,19 +1016,19 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
Handle<String> module_name,
Handle<String> import_name,
Handle<Object> value) {
- // Validation should have failed if more than one memory object was
- // provided.
- DCHECK(!instance->has_memory_object());
if (!value->IsWasmMemoryObject()) {
ReportLinkError("memory import must be a WebAssembly.Memory object",
import_index, module_name, import_name);
return false;
}
- auto memory = Handle<WasmMemoryObject>::cast(value);
- instance->set_memory_object(*memory);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ auto memory_object = Handle<WasmMemoryObject>::cast(value);
+
+ // The imported memory should have been already set up early.
+ CHECK_EQ(instance->memory_object(), *memory_object);
+
+ Handle<JSArrayBuffer> buffer(memory_object_->array_buffer(), isolate_);
// memory_ should have already been assigned in Build().
- DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
+ DCHECK_EQ(*memory_buffer_.ToHandleChecked(), *buffer);
uint32_t imported_cur_pages =
static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
@@ -1037,7 +1037,7 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
imported_cur_pages);
return false;
}
- int32_t imported_maximum_pages = memory->maximum_pages();
+ int32_t imported_maximum_pages = memory_object_->maximum_pages();
if (module_->has_maximum_pages) {
if (imported_maximum_pages < 0) {
thrower_->LinkError(
@@ -1186,13 +1186,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return true;
}
- if (enabled_.bigint && global.type == kWasmI64) {
- Handle<BigInt> bigint;
-
- if (!BigInt::FromObject(isolate_, value).ToHandle(&bigint)) {
- return false;
- }
- WriteGlobalValue(global, bigint->AsInt64());
+ if (enabled_.bigint && global.type == kWasmI64 && value->IsBigInt()) {
+ WriteGlobalValue(global, BigInt::cast(*value).AsInt64());
return true;
}
@@ -1241,7 +1236,7 @@ void InstanceBuilder::CompileImportWrappers(
CancelableTaskManager task_manager;
const int max_background_tasks = GetMaxBackgroundTasks();
for (int i = 0; i < max_background_tasks; ++i) {
- auto task = base::make_unique<CompileImportWrapperTask>(
+ auto task = std::make_unique<CompileImportWrapperTask>(
&task_manager, isolate_->wasm_engine(), isolate_->counters(),
native_module, &import_wrapper_queue, &cache_scope);
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
@@ -1411,27 +1406,28 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
}
// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
- uint32_t maximum_pages) {
+bool InstanceBuilder::AllocateMemory() {
+ auto initial_pages = module_->initial_pages;
+ auto maximum_pages = module_->has_maximum_pages ? module_->maximum_pages
+ : wasm::max_mem_pages();
if (initial_pages > max_mem_pages()) {
thrower_->RangeError("Out of memory: wasm memory too large");
- return Handle<JSArrayBuffer>::null();
- }
- const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
- Handle<JSArrayBuffer> mem_buffer;
- if (is_shared_memory) {
- if (!NewSharedArrayBuffer(isolate_, initial_pages * kWasmPageSize,
- maximum_pages * kWasmPageSize)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm shared memory");
- }
- } else {
- if (!NewArrayBuffer(isolate_, initial_pages * kWasmPageSize)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm memory");
- }
+ return false;
}
- return mem_buffer;
+ auto shared = (module_->has_shared_memory && enabled_.threads)
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+
+ MaybeHandle<WasmMemoryObject> result =
+ WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared);
+
+ if (!result.ToHandle(&memory_object_)) {
+ thrower_->RangeError("Out of memory: wasm memory");
+ return false;
+ }
+ memory_buffer_ =
+ Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_);
+ return true;
}
bool InstanceBuilder::NeedsWrappers() const {
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 94945ea58a..37aaf05605 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -4,7 +4,6 @@
#include "src/wasm/streaming-decoder.h"
-#include "src/base/template-utils.h"
#include "src/handles/handles.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
@@ -364,14 +363,14 @@ StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeModuleHeader\n");
streaming->ProcessModuleHeader();
if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %s section\n",
SectionName(static_cast<SectionCode>(id_)));
- return base::make_unique<DecodeSectionLength>(id_, module_offset_);
+ return std::make_unique<DecodeSectionLength>(id_, module_offset_);
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -391,7 +390,7 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
streaming->ProcessSection(buf);
if (!streaming->ok()) return nullptr;
// There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>(streaming->module_offset_);
+ return std::make_unique<DecodeSectionID>(streaming->module_offset_);
} else {
if (section_id_ == SectionCode::kCodeSectionCode) {
// Explicitly check for multiple code sections as module decoder never
@@ -404,9 +403,9 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
streaming->code_section_processed_ = true;
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
- return base::make_unique<DecodeNumberOfFunctions>(buf);
+ return std::make_unique<DecodeNumberOfFunctions>(buf);
}
- return base::make_unique<DecodeSectionPayload>(buf);
+ return std::make_unique<DecodeSectionPayload>(buf);
}
}
@@ -415,7 +414,7 @@ StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
streaming->ProcessSection(section_buffer_);
if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -434,14 +433,14 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
if (payload_buf.size() != bytes_consumed_) {
return streaming->Error("not all code section bytes were used");
}
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
DCHECK_GE(kMaxInt, value_);
streaming->StartCodeSection(static_cast<int>(value_),
streaming->section_buffers_.back());
if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeFunctionLength>(
+ return std::make_unique<DecodeFunctionLength>(
section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
value_);
}
@@ -464,7 +463,7 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
return streaming->Error("not enough code section bytes");
}
- return base::make_unique<DecodeFunctionBody>(
+ return std::make_unique<DecodeFunctionBody>(
section_buffer_, buffer_offset_ + bytes_consumed_, value_,
num_remaining_functions_, streaming->module_offset());
}
@@ -477,14 +476,14 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
size_t end_offset = buffer_offset_ + function_body_length_;
if (num_remaining_functions_ > 0) {
- return base::make_unique<DecodeFunctionLength>(section_buffer_, end_offset,
- num_remaining_functions_);
+ return std::make_unique<DecodeFunctionLength>(section_buffer_, end_offset,
+ num_remaining_functions_);
}
// We just read the last function body. Continue with the next section.
if (end_offset != section_buffer_->length()) {
return streaming->Error("not all code section bytes were used");
}
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
StreamingDecoder::StreamingDecoder(
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index bca5c2b941..49f348b714 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -44,7 +44,7 @@ using FunctionSig = Signature<ValueType>;
inline size_t hash_value(ValueType type) { return static_cast<size_t>(type); }
-// TODO(clemensh): Compute memtype and size from ValueType once we have c++14
+// TODO(clemensb): Compute memtype and size from ValueType once we have c++14
// constexpr support.
#define FOREACH_LOAD_TYPE(V) \
V(I32, , Int32, 2) \
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 91cfc01cea..55695259f0 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/small-vector.h"
@@ -192,7 +192,7 @@ void WasmCode::LogCode(Isolate* isolate) const {
Local<v8::String> source_map_str =
load_wasm_source_map(v8_isolate, source_map_url.c_str());
native_module()->SetWasmSourceMap(
- base::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
+ std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
if (!name_vec.empty()) {
@@ -235,7 +235,10 @@ void WasmCode::Validate() const {
switch (mode) {
case RelocInfo::WASM_CALL: {
Address target = it.rinfo()->wasm_call_address();
- DCHECK(native_module_->is_jump_table_slot(target));
+ WasmCode* code = native_module_->Lookup(target);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(WasmCode::kJumpTable, code->kind());
+ CHECK(code->contains(target));
break;
}
case RelocInfo::WASM_STUB_CALL: {
@@ -244,7 +247,6 @@ void WasmCode::Validate() const {
CHECK_NOT_NULL(code);
#ifdef V8_EMBEDDED_BUILTINS
CHECK_EQ(WasmCode::kJumpTable, code->kind());
- CHECK_EQ(native_module()->runtime_stub_table_, code);
CHECK(code->contains(target));
#else
CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
@@ -385,8 +387,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
return "wasm-to-capi";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
- case WasmCode::kRuntimeStub:
- return "runtime-stub";
case WasmCode::kInterpreterEntry:
return "interpreter entry";
case WasmCode::kJumpTable:
@@ -430,6 +430,16 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
if (engine) engine->FreeDeadCode(dead_code);
}
+WasmCodeAllocator::OptionalLock::~OptionalLock() {
+ if (allocator_) allocator_->mutex_.Unlock();
+}
+
+void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
+ DCHECK(!is_locked());
+ allocator_ = allocator;
+ allocator->mutex_.Lock();
+}
+
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
bool can_request_more,
@@ -448,6 +458,11 @@ WasmCodeAllocator::~WasmCodeAllocator() {
committed_code_space());
}
+void WasmCodeAllocator::Init(NativeModule* native_module) {
+ DCHECK_EQ(1, owned_code_space_.size());
+ native_module->AddCodeSpace(owned_code_space_[0].region(), {});
+}
+
namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
@@ -487,17 +502,70 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
#endif
return split_ranges;
}
+
+int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
+ return NativeModule::kNeedsFarJumpsBetweenCodeSpaces &&
+ FLAG_wasm_far_jump_table
+ ? static_cast<int>(num_declared_functions)
+ : 0;
+}
+
+// Returns an overapproximation of the code size overhead per new code space
+// created by the jump tables.
+size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
+ // Overhead for the jump table.
+ size_t overhead = RoundUp<kCodeAlignment>(
+ JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));
+
+#if defined(V8_OS_WIN64)
+ // On Win64, we need to reserve some pages at the beginning of an executable
+ // space. See {AddCodeSpace}.
+ overhead += Heap::GetCodeRangeReservedAreaSize();
+#endif // V8_OS_WIN64
+
+ // Overhead for the far jump table.
+ overhead +=
+ RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
+ WasmCode::kRuntimeStubCount,
+ NumWasmFunctionsInFarJumpTable(num_declared_functions)));
+
+ return overhead;
+}
+
+size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
+ size_t total_reserved) {
+ size_t overhead = OverheadPerCodeSpace(num_declared_functions);
+
+ // Reserve a power of two at least as big as any of
+ // a) needed size + overhead (this is the minimum needed)
+ // b) 2 * overhead (to not waste too much space by overhead)
+ // c) 1/4 of current total reservation size (to grow exponentially)
+ size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
+ std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
+ 2 * overhead),
+ total_reserved / 4));
+
+ // Limit by the maximum supported code space size.
+ return std::min(kMaxWasmCodeSpaceSize, reserve_size);
+}
+
} // namespace
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
return AllocateForCodeInRegion(
- native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()});
+ native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()},
+ WasmCodeAllocator::OptionalLock{});
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
- NativeModule* native_module, size_t size, base::AddressRegion region) {
- base::MutexGuard lock(&mutex_);
+ NativeModule* native_module, size_t size, base::AddressRegion region,
+ const WasmCodeAllocator::OptionalLock& optional_lock) {
+ OptionalLock new_lock;
+ if (!optional_lock.is_locked()) new_lock.Lock(this);
+ const auto& locked_lock =
+ optional_lock.is_locked() ? optional_lock : new_lock;
+ DCHECK(locked_lock.is_locked());
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
@@ -517,12 +585,10 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
Address hint = owned_code_space_.empty() ? kNullAddress
: owned_code_space_.back().end();
- // Reserve at least 20% of the total generated code size so far, and of
- // course at least {size}. Round up to the next power of two.
size_t total_reserved = 0;
for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
- size_t reserve_size =
- base::bits::RoundUpToPowerOfTwo(std::max(size, total_reserved / 5));
+ size_t reserve_size = ReservationSize(
+ size, native_module->module()->num_declared_functions, total_reserved);
VirtualMemory new_mem =
code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) {
@@ -534,7 +600,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
- native_module->AddCodeSpace(new_region);
+ native_module->AddCodeSpace(new_region, locked_lock);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
@@ -660,10 +726,9 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
}
-base::AddressRegion WasmCodeAllocator::GetSingleCodeRegion() const {
+size_t WasmCodeAllocator::GetNumCodeSpaces() const {
base::MutexGuard lock(&mutex_);
- DCHECK_EQ(1, owned_code_space_.size());
- return owned_code_space_[0].region();
+ return owned_code_space_.size();
}
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
@@ -689,27 +754,34 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
CompilationState::New(*shared_this, std::move(async_counters));
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
- code_table_.reset(new WasmCode* [module_->num_declared_functions] {});
+ code_table_ =
+ std::make_unique<WasmCode*[]>(module_->num_declared_functions);
}
- AddCodeSpace(code_allocator_.GetSingleCodeRegion());
+ code_allocator_.Init(this);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
WasmCodeRefScope code_ref_scope;
- DCHECK_LE(num_functions(), max_functions);
- WasmCode** new_table = new WasmCode* [max_functions] {};
+ DCHECK_LE(module_->num_declared_functions, max_functions);
+ auto new_table = std::make_unique<WasmCode*[]>(max_functions);
if (module_->num_declared_functions > 0) {
- memcpy(new_table, code_table_.get(),
- module_->num_declared_functions * sizeof(*new_table));
+ memcpy(new_table.get(), code_table_.get(),
+ module_->num_declared_functions * sizeof(WasmCode*));
}
- code_table_.reset(new_table);
+ code_table_ = std::move(new_table);
- CHECK_EQ(1, code_space_data_.size());
+ base::AddressRegion single_code_space_region;
+ {
+ base::MutexGuard guard(&allocation_mutex_);
+ CHECK_EQ(1, code_space_data_.size());
+ single_code_space_region = code_space_data_[0].region;
+ }
// Re-allocate jump table.
- code_space_data_[0].jump_table = CreateEmptyJumpTableInRegion(
+ main_jump_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
- code_space_data_[0].region);
- main_jump_table_ = code_space_data_[0].jump_table;
+ single_code_space_region, WasmCodeAllocator::OptionalLock{});
+ base::MutexGuard guard(&allocation_mutex_);
+ code_space_data_[0].jump_table = main_jump_table_;
}
void NativeModule::LogWasmCodes(Isolate* isolate) {
@@ -731,89 +803,6 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
- return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
-}
-
-void NativeModule::UseLazyStub(uint32_t func_index) {
- DCHECK_LE(module_->num_imported_functions, func_index);
- DCHECK_LT(func_index,
- module_->num_imported_functions + module_->num_declared_functions);
-
- if (!lazy_compile_table_) {
- uint32_t num_slots = module_->num_declared_functions;
- WasmCodeRefScope code_ref_scope;
- DCHECK_EQ(1, code_space_data_.size());
- lazy_compile_table_ = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
- code_space_data_[0].region);
- JumpTableAssembler::GenerateLazyCompileTable(
- lazy_compile_table_->instruction_start(), num_slots,
- module_->num_imported_functions,
- runtime_stub_entry(WasmCode::kWasmCompileLazy));
- }
-
- // Add jump table entry for jump to the lazy compile stub.
- uint32_t slot_index = func_index - module_->num_imported_functions;
- DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
- Address lazy_compile_target =
- lazy_compile_table_->instruction_start() +
- JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
- JumpTableAssembler::PatchJumpTableSlot(main_jump_table_->instruction_start(),
- slot_index, lazy_compile_target,
- WasmCode::kFlushICache);
-}
-
-// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
-// was removed and embedded builtins are no longer optional.
-void NativeModule::SetRuntimeStubs(Isolate* isolate) {
- DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
-#ifdef V8_EMBEDDED_BUILTINS
- WasmCodeRefScope code_ref_scope;
- DCHECK_EQ(1, code_space_data_.size());
- WasmCode* jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfStubSlots(WasmCode::kRuntimeStubCount),
- code_space_data_[0].region);
- Address base = jump_table->instruction_start();
- EmbeddedData embedded_data = EmbeddedData::FromBlob();
-#define RUNTIME_STUB(Name) Builtins::k##Name,
-#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- Builtins::Name wasm_runtime_stubs[WasmCode::kRuntimeStubCount] = {
- WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
-#undef RUNTIME_STUB
-#undef RUNTIME_STUB_TRAP
- Address builtin_address[WasmCode::kRuntimeStubCount];
- for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Builtins::Name builtin = wasm_runtime_stubs[i];
- CHECK(embedded_data.ContainsBuiltin(builtin));
- builtin_address[i] = embedded_data.InstructionStartOfBuiltin(builtin);
- runtime_stub_entries_[i] =
- base + JumpTableAssembler::StubSlotIndexToOffset(i);
- }
- JumpTableAssembler::GenerateRuntimeStubTable(base, builtin_address,
- WasmCode::kRuntimeStubCount);
- DCHECK_NULL(runtime_stub_table_);
- runtime_stub_table_ = jump_table;
-#else // V8_EMBEDDED_BUILTINS
- HandleScope scope(isolate);
- WasmCodeRefScope code_ref_scope;
- USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header.
-#define COPY_BUILTIN(Name) \
- runtime_stub_entries_[WasmCode::k##Name] = \
- AddAndPublishAnonymousCode( \
- isolate->builtins()->builtin_handle(Builtins::k##Name), \
- WasmCode::kRuntimeStub, #Name) \
- ->instruction_start();
-#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
- WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
-#undef COPY_BUILTIN_TRAP
-#undef COPY_BUILTIN
-#endif // V8_EMBEDDED_BUILTINS
- DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
-}
-
-WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
- WasmCode::Kind kind,
- const char* name) {
// For off-heap builtins, we create a copy of the off-heap instruction stream
// instead of the on-heap code object containing the trampoline. Ensure that
// we do not apply the on-heap reloc info to the off-heap instructions.
@@ -859,8 +848,10 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
code->InstructionStart();
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
- Address constant_pool_start =
- reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
+ auto jump_tables_ref =
+ FindJumpTablesForCode(reinterpret_cast<Address>(dst_code_bytes.begin()));
+ Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
+ Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
constant_pool_start, mode_mask);
@@ -869,8 +860,8 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
- Address entry = runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ Address entry = GetNearRuntimeStubEntry(
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
@@ -880,7 +871,6 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
// Flush the i-cache after relocation.
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
- DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
std::unique_ptr<WasmCode> new_code{new WasmCode{
this, // native_module
kAnonymousFuncIndex, // index
@@ -895,24 +885,63 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
OwnedVector<ProtectedInstructionData>{}, // protected_instructions
std::move(reloc_info), // reloc_info
std::move(source_pos), // source positions
- kind, // kind
+ WasmCode::kFunction, // kind
ExecutionTier::kNone}}; // tier
- new_code->MaybePrint(name);
+ new_code->MaybePrint(nullptr);
new_code->Validate();
return PublishCode(std::move(new_code));
}
+void NativeModule::UseLazyStub(uint32_t func_index) {
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ DCHECK_LT(func_index,
+ module_->num_imported_functions + module_->num_declared_functions);
+
+ if (!lazy_compile_table_) {
+ uint32_t num_slots = module_->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ base::AddressRegion single_code_space_region;
+ {
+ base::MutexGuard guard(&allocation_mutex_);
+ DCHECK_EQ(1, code_space_data_.size());
+ single_code_space_region = code_space_data_[0].region;
+ }
+ lazy_compile_table_ = CreateEmptyJumpTableInRegion(
+ JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
+ single_code_space_region, WasmCodeAllocator::OptionalLock{});
+ JumpTableAssembler::GenerateLazyCompileTable(
+ lazy_compile_table_->instruction_start(), num_slots,
+ module_->num_imported_functions,
+ GetNearRuntimeStubEntry(
+ WasmCode::kWasmCompileLazy,
+ FindJumpTablesForCode(lazy_compile_table_->instruction_start())));
+ }
+
+ // Add jump table entry for jump to the lazy compile stub.
+ uint32_t slot_index = func_index - module_->num_imported_functions;
+ DCHECK_NULL(code_table_[slot_index]);
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ base::MutexGuard guard(&allocation_mutex_);
+ PatchJumpTablesLocked(slot_index, lazy_compile_target);
+}
+
std::unique_ptr<WasmCode> NativeModule::AddCode(
uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
uint32_t tagged_parameter_slots,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
- return AddCodeWithCodeSpace(
- index, desc, stack_slots, tagged_parameter_slots,
- std::move(protected_instructions), std::move(source_position_table), kind,
- tier, code_allocator_.AllocateForCode(this, desc.instr_size));
+ Vector<byte> code_space =
+ code_allocator_.AllocateForCode(this, desc.instr_size);
+ auto jump_table_ref =
+ FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin()));
+ return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
+ std::move(protected_instructions),
+ std::move(source_position_table), kind, tier,
+ code_space, jump_table_ref);
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
@@ -920,7 +949,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
uint32_t tagged_parameter_slots,
OwnedVector<ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier, Vector<uint8_t> dst_code_bytes) {
+ ExecutionTier tier, Vector<uint8_t> dst_code_bytes,
+ const JumpTablesRef& jump_tables_ref) {
OwnedVector<byte> reloc_info;
if (desc.reloc_size > 0) {
reloc_info = OwnedVector<byte>::New(desc.reloc_size);
@@ -949,21 +979,21 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
- Address constant_pool_start =
- reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset;
+ Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
+ Address constant_pool_start = code_start + constant_pool_offset;
for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
constant_pool_start, mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmCall(mode)) {
uint32_t call_tag = it.rinfo()->wasm_call_tag();
- Address target = GetCallTargetForFunction(call_tag);
+ Address target = GetNearCallTargetForFunction(call_tag, jump_tables_ref);
it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
- Address entry = runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ Address entry = GetNearRuntimeStubEntry(
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
@@ -1036,12 +1066,9 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// Populate optimized code to the jump table unless there is an active
// redirection to the interpreter that should be preserved.
- DCHECK_IMPLIES(
- main_jump_table_ == nullptr,
- engine_->code_manager()->IsImplicitAllocationsDisabledForTesting());
- bool update_jump_table = update_code_table &&
- !has_interpreter_redirection(code->index()) &&
- main_jump_table_;
+ DCHECK_NOT_NULL(main_jump_table_);
+ bool update_jump_table =
+ update_code_table && !has_interpreter_redirection(code->index());
// Ensure that interpreter entries always populate to the jump table.
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
@@ -1050,9 +1077,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
}
if (update_jump_table) {
- JumpTableAssembler::PatchJumpTableSlot(
- main_jump_table_->instruction_start(), slot_idx,
- code->instruction_start(), WasmCode::kFlushICache);
+ PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
}
WasmCodeRefScope::AddRef(code.get());
@@ -1120,11 +1145,12 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
}
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
- uint32_t jump_table_size, base::AddressRegion region) {
+ uint32_t jump_table_size, base::AddressRegion region,
+ const WasmCodeAllocator::OptionalLock& allocator_lock) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- Vector<uint8_t> code_space =
- code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
+ Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
+ this, jump_table_size, region, allocator_lock);
DCHECK(!code_space.empty());
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
@@ -1146,12 +1172,63 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
return PublishCode(std::move(code));
}
-void NativeModule::AddCodeSpace(base::AddressRegion region) {
+void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
+ // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
+ DCHECK(!allocation_mutex_.TryLock());
+
+ for (auto& code_space_data : code_space_data_) {
+ DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
+ if (!code_space_data.jump_table) continue;
+ PatchJumpTableLocked(code_space_data, slot_index, target);
+ }
+}
+
+void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
+ uint32_t slot_index, Address target) {
+ // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
+ DCHECK(!allocation_mutex_.TryLock());
+
+ DCHECK_NOT_NULL(code_space_data.jump_table);
+ DCHECK_NOT_NULL(code_space_data.far_jump_table);
+
+ DCHECK_LT(slot_index, module_->num_declared_functions);
+ Address jump_table_slot =
+ code_space_data.jump_table->instruction_start() +
+ JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
+ uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
+ WasmCode::kRuntimeStubCount + slot_index);
+ // Only pass the far jump table start if the far jump table actually has a
+ // slot for this function index (i.e. does not only contain runtime stubs).
+ bool has_far_jump_slot =
+ far_jump_table_offset <
+ code_space_data.far_jump_table->instructions().size();
+ Address far_jump_table_start =
+ code_space_data.far_jump_table->instruction_start();
+ Address far_jump_table_slot =
+ has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
+ : kNullAddress;
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
+ target);
+}
+
+void NativeModule::AddCodeSpace(
+ base::AddressRegion region,
+ const WasmCodeAllocator::OptionalLock& allocator_lock) {
+#ifndef V8_EMBEDDED_BUILTINS
+ // The far jump table contains far jumps to the embedded builtins. This
+ // requires a build with embedded builtins enabled.
+ FATAL(
+ "WebAssembly is not supported in no-embed builds. no-embed builds are "
+ "deprecated. See\n"
+ " - https://groups.google.com/d/msg/v8-users/9F53xqBjpkI/9WmKSbcWBAAJ\n"
+ " - https://crbug.com/v8/8519\n"
+ " - https://crbug.com/v8/8531\n");
+#endif // V8_EMBEDDED_BUILTINS
+
// Each code space must be at least twice as large as the overhead per code
// space. Otherwise, we are wasting too much memory.
- const bool is_first_code_space = code_space_data_.empty();
- const bool implicit_alloc_disabled =
- engine_->code_manager()->IsImplicitAllocationsDisabledForTesting();
+ DCHECK_GE(region.size(),
+ 2 * OverheadPerCodeSpace(module()->num_declared_functions));
#if defined(V8_OS_WIN64)
// On some platforms, specifically Win64, we need to reserve some pages at
@@ -1160,32 +1237,82 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
// https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
if (engine_->code_manager()
- ->CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- !implicit_alloc_disabled) {
+ ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
- Vector<byte> padding = code_allocator_.AllocateForCode(this, size);
- CHECK(region.contains(reinterpret_cast<Address>(padding.begin()),
- padding.size()));
+ Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
+ this, size, region, allocator_lock);
+ CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
+ win64_unwindinfo::RegisterNonABICompliantCodeRange(
+ reinterpret_cast<void*>(region.begin()), region.size());
}
#endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope;
WasmCode* jump_table = nullptr;
+ WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool has_functions = num_wasm_functions > 0;
+ const bool is_first_code_space = code_space_data_.empty();
+ // TODO(clemensb): Avoid additional jump table if the code space is close
+ // enough to another existing code space.
const bool needs_jump_table =
- has_functions && is_first_code_space && !implicit_alloc_disabled;
+ has_functions && (kNeedsFarJumpsBetweenCodeSpaces || is_first_code_space);
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
+ allocator_lock);
CHECK(region.contains(jump_table->instruction_start()));
}
+ // Always allocate a far jump table, because it contains the runtime stubs.
+ int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
+ far_jump_table = CreateEmptyJumpTableInRegion(
+ JumpTableAssembler::SizeForNumberOfFarJumpSlots(
+ WasmCode::kRuntimeStubCount, num_function_slots),
+ region, allocator_lock);
+ CHECK(region.contains(far_jump_table->instruction_start()));
+ EmbeddedData embedded_data = EmbeddedData::FromBlob();
+#define RUNTIME_STUB(Name) Builtins::k##Name,
+#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
+ Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
+ WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
+#undef RUNTIME_STUB
+#undef RUNTIME_STUB_TRAP
+ Address builtin_addresses[WasmCode::kRuntimeStubCount];
+ for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Builtins::Name builtin = stub_names[i];
+ CHECK(embedded_data.ContainsBuiltin(builtin));
+ builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ }
+ JumpTableAssembler::GenerateFarJumpTable(
+ far_jump_table->instruction_start(), builtin_addresses,
+ WasmCode::kRuntimeStubCount, num_function_slots);
+
if (is_first_code_space) main_jump_table_ = jump_table;
- code_space_data_.push_back(CodeSpaceData{region, jump_table});
+ base::MutexGuard guard(&allocation_mutex_);
+ code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
+
+ if (jump_table && !is_first_code_space) {
+ // Patch the new jump table(s) with existing functions. If this is the first
+ // code space, there cannot be any functions that have been compiled yet.
+ const CodeSpaceData& new_code_space_data = code_space_data_.back();
+ for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
+ ++slot_index) {
+ if (code_table_[slot_index]) {
+ PatchJumpTableLocked(new_code_space_data, slot_index,
+ code_table_[slot_index]->instruction_start());
+ } else if (lazy_compile_table_) {
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ PatchJumpTableLocked(new_code_space_data, slot_index,
+ lazy_compile_target);
+ }
+ }
+ }
}
namespace {
@@ -1241,26 +1368,86 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
return main_jump_table_->instruction_start() + slot_offset;
}
+NativeModule::JumpTablesRef NativeModule::FindJumpTablesForCode(
+ Address code_addr) const {
+ base::MutexGuard guard(&allocation_mutex_);
+ for (auto& code_space_data : code_space_data_) {
+ const bool jump_table_reachable =
+ !kNeedsFarJumpsBetweenCodeSpaces ||
+ code_space_data.region.contains(code_addr);
+ if (jump_table_reachable && code_space_data.far_jump_table) {
+ // We might not have a jump table if we have no functions.
+ return {code_space_data.jump_table
+ ? code_space_data.jump_table->instruction_start()
+ : kNullAddress,
+ code_space_data.far_jump_table->instruction_start()};
+ }
+ }
+ FATAL("code_addr is not part of a code space");
+}
+
+Address NativeModule::GetNearCallTargetForFunction(
+ uint32_t func_index, const JumpTablesRef& jump_tables) const {
+ uint32_t slot_offset = GetJumpTableOffset(func_index);
+ return jump_tables.jump_table_start + slot_offset;
+}
+
+Address NativeModule::GetNearRuntimeStubEntry(
+ WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
+ auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
+ return jump_tables.far_jump_table_start + offset;
+}
+
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
Address slot_address) const {
- DCHECK(is_jump_table_slot(slot_address));
- uint32_t slot_offset = static_cast<uint32_t>(
- slot_address - main_jump_table_->instruction_start());
+ WasmCodeRefScope code_refs;
+ WasmCode* code = Lookup(slot_address);
+ DCHECK_NOT_NULL(code);
+ DCHECK_EQ(WasmCode::kJumpTable, code->kind());
+ uint32_t slot_offset =
+ static_cast<uint32_t>(slot_address - code->instruction_start());
uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
DCHECK_LT(slot_idx, module_->num_declared_functions);
+ DCHECK_EQ(slot_address,
+ code->instruction_start() +
+ JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
return module_->num_imported_functions + slot_idx;
}
-const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
-#define RETURN_NAME(Name) \
- if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
- return #Name; \
+WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
+ base::MutexGuard guard(&allocation_mutex_);
+
+ for (auto& code_space_data : code_space_data_) {
+ if (code_space_data.far_jump_table->contains(target)) {
+ uint32_t offset = static_cast<uint32_t>(
+ target - code_space_data.far_jump_table->instruction_start());
+ uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
+ if (index >= WasmCode::kRuntimeStubCount) continue;
+ if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
+ continue;
+ }
+ return static_cast<WasmCode::RuntimeStubId>(index);
+ }
}
-#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
- WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
-#undef RETURN_NAME_TRAP
-#undef RETURN_NAME
- return "<unknown>";
+
+ // Invalid address.
+ return WasmCode::kRuntimeStubCount;
+}
+
+const char* NativeModule::GetRuntimeStubName(Address target) const {
+ WasmCode::RuntimeStubId stub_id = GetRuntimeStubId(target);
+
+#define RUNTIME_STUB_NAME(Name) #Name,
+#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
+ constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
+ RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
+#undef RUNTIME_STUB_NAME
+#undef RUNTIME_STUB_NAME_TRAP
+ STATIC_ASSERT(arraysize(runtime_stub_names) ==
+ WasmCode::kRuntimeStubCount + 1);
+
+ DCHECK_GT(arraysize(runtime_stub_names), stub_id);
+ return runtime_stub_names[stub_id];
}
NativeModule::~NativeModule() {
@@ -1275,10 +1462,8 @@ NativeModule::~NativeModule() {
import_wrapper_cache_.reset();
}
-WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
- size_t max_committed)
- : memory_tracker_(memory_tracker),
- max_committed_code_space_(max_committed),
+WasmCodeManager::WasmCodeManager(size_t max_committed)
+ : max_committed_code_space_(max_committed),
critical_committed_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
}
@@ -1350,12 +1535,12 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
- if (!memory_tracker_->ReserveAddressSpace(size)) return {};
+ if (!BackingStore::ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
if (!mem.IsReserved()) {
- memory_tracker_->ReleaseReservation(size);
+ BackingStore::ReleaseReservation(size);
return {};
}
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
@@ -1369,13 +1554,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
return mem;
}
-void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
- // This has to be set before committing any memory.
- DCHECK_EQ(0, total_committed_code_space_.load());
- max_committed_code_space_ = limit;
- critical_committed_code_space_.store(limit / 2);
-}
-
// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
@@ -1387,8 +1565,6 @@ size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
for (auto& function : module->functions) {
estimate += kCodeOverhead + kCodeSizeMultiplier * function.code.length();
}
- estimate +=
- JumpTableAssembler::SizeForNumberOfSlots(module->num_declared_functions);
estimate += kImportSize * module->num_imported_functions;
return estimate;
@@ -1425,9 +1601,20 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
committed + (max_committed_code_space_ - committed) / 2);
}
- // If the code must be contiguous, reserve enough address space up front.
+ // If we cannot add code space later, reserve enough address space up front.
size_t code_vmem_size =
- kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
+ can_request_more ? ReservationSize(code_size_estimate,
+ module->num_declared_functions, 0)
+ : kMaxWasmCodeSpaceSize;
+
+ // The '--wasm-max-code-space-reservation' testing flag can be used to reduce
+ // the maximum size of the initial code space reservation (in MB).
+ if (FLAG_wasm_max_initial_code_space_reservation > 0) {
+ size_t flag_max_bytes =
+ static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
+ if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
+ }
+
// Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
@@ -1456,14 +1643,6 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
size);
-#if defined(V8_OS_WIN64)
- if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- !implicit_allocations_disabled_for_testing_) {
- win64_unwindinfo::RegisterNonABICompliantCodeRange(
- reinterpret_cast<void*>(start), size);
- }
-#endif // V8_OS_WIN64
-
base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
return ret;
@@ -1519,6 +1698,9 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
}
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, total_code_space);
+ // Lookup the jump tables to use once, then use for all code objects.
+ auto jump_tables_ref =
+ FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin()));
std::vector<std::unique_ptr<WasmCode>> generated_code;
generated_code.reserve(results.size());
@@ -1533,7 +1715,7 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), GetCodeKind(result),
- result.result_tier, this_code_space));
+ result.result_tier, this_code_space, jump_tables_ref));
}
DCHECK_EQ(0, code_space.size());
@@ -1567,6 +1749,10 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
}
}
+size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
+ return code_allocator_.GetNumCodeSpaces();
+}
+
void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
size_t committed_size) {
base::MutexGuard lock(&native_modules_mutex_);
@@ -1576,15 +1762,14 @@ void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
code_space.address(), code_space.end(), code_space.size());
#if defined(V8_OS_WIN64)
- if (CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- !implicit_allocations_disabled_for_testing_) {
+ if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::UnregisterNonABICompliantCodeRange(
reinterpret_cast<void*>(code_space.address()));
}
#endif // V8_OS_WIN64
lookup_map_.erase(code_space.address());
- memory_tracker_->ReleaseReservation(code_space.size());
+ BackingStore::ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}
@@ -1616,7 +1801,7 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
}
// TODO(v8:7424): Code protection scopes are not yet supported with shared code
-// enabled and need to be revisited to work with --wasm-shared-code as well.
+// enabled and need to be revisited.
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index c2e5249e5e..7deea9032a 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -39,7 +39,6 @@ class NativeModule;
class WasmCodeManager;
struct WasmCompilationResult;
class WasmEngine;
-class WasmMemoryTracker;
class WasmImportWrapperCache;
struct WasmModule;
@@ -79,7 +78,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
kFunction,
kWasmToCapiWrapper,
kWasmToJsWrapper,
- kRuntimeStub,
kInterpreterEntry,
kJumpTable
};
@@ -282,11 +280,33 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
public:
+ // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
+ // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
+ // optional to allow to also call methods without holding the lock.
+ class OptionalLock {
+ public:
+ // External users can only instantiate a non-locked {OptionalLock}.
+ OptionalLock() = default;
+ ~OptionalLock();
+ bool is_locked() const { return allocator_ != nullptr; }
+
+ private:
+ friend class WasmCodeAllocator;
+ // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
+ // is passed.
+ void Lock(WasmCodeAllocator*);
+
+ WasmCodeAllocator* allocator_ = nullptr;
+ };
+
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
bool can_request_more,
std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
+ // Call before use, after the {NativeModule} is set up completely.
+ void Init(NativeModule*);
+
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
}
@@ -303,7 +323,8 @@ class WasmCodeAllocator {
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
- base::AddressRegion);
+ base::AddressRegion,
+ const WasmCodeAllocator::OptionalLock&);
// Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success.
@@ -312,9 +333,8 @@ class WasmCodeAllocator {
// Free memory pages of all given code objects. Used for wasm code GC.
void FreeCode(Vector<WasmCode* const>);
- // Returns the region of the single code space managed by this code allocator.
- // Will fail if more than one code space has been created.
- base::AddressRegion GetSingleCodeRegion() const;
+ // Retrieve the number of separately reserved code spaces.
+ size_t GetNumCodeSpaces() const;
private:
// The engine-wide wasm code manager.
@@ -344,6 +364,8 @@ class WasmCodeAllocator {
bool is_executable_ = false;
+ // TODO(clemensb): Remove this field once multiple code spaces are supported
+ // everywhere.
const bool can_request_more_memory_;
std::shared_ptr<Counters> async_counters_;
@@ -352,9 +374,9 @@ class WasmCodeAllocator {
class V8_EXPORT_PRIVATE NativeModule final {
public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
- static constexpr bool kCanAllocateMoreMemory = false;
+ static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
#else
- static constexpr bool kCanAllocateMoreMemory = true;
+ static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
#endif
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
@@ -394,11 +416,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// table with trampolines accordingly.
void UseLazyStub(uint32_t func_index);
- // Initializes all runtime stubs by setting up entry addresses in the runtime
- // stub table. It must be called exactly once per native module before adding
- // other WasmCode so that runtime stub ids can be resolved during relocation.
- void SetRuntimeStubs(Isolate* isolate);
-
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
@@ -409,13 +426,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
WasmModuleSourceMap* GetWasmSourceMap() const;
- Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
- DCHECK_LT(index, WasmCode::kRuntimeStubCount);
- Address entry_address = runtime_stub_entries_[index];
- DCHECK_NE(kNullAddress, entry_address);
- return entry_address;
- }
-
Address jump_table_start() const {
return main_jump_table_ ? main_jump_table_->instruction_start()
: kNullAddress;
@@ -423,16 +433,33 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t GetJumpTableOffset(uint32_t func_index) const;
- bool is_jump_table_slot(Address address) const {
- return main_jump_table_->contains(address);
- }
-
// Returns the canonical target to call for the given function (the slot in
// the first jump table).
Address GetCallTargetForFunction(uint32_t func_index) const;
- // Reverse lookup from a given call target (i.e. a jump table slot as the
- // above {GetCallTargetForFunction} returns) to a function index.
+ struct JumpTablesRef {
+ const Address jump_table_start;
+ const Address far_jump_table_start;
+ };
+
+ // Finds the jump tables that should be used for the code at {code_addr}. This
+ // information is then passed to {GetNearCallTargetForFunction} and
+ // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
+ // up there.
+ JumpTablesRef FindJumpTablesForCode(Address code_addr) const;
+
+ // Similarly to {GetCallTargetForFunction}, but uses the jump table previously
+ // looked up via {FindJumpTablesForCode}.
+ Address GetNearCallTargetForFunction(uint32_t func_index,
+ const JumpTablesRef&) const;
+
+ // Get a runtime stub entry (which is a far jump table slot) in the jump table
+ // previously looked up via {FindJumpTablesForCode}.
+ Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
+ const JumpTablesRef&) const;
+
+ // Reverse lookup from a given call target (which must be a jump table slot)
+ // to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
bool SetExecutable(bool executable) {
@@ -481,7 +508,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
const WasmFeatures& enabled_features() const { return enabled_features_; }
- const char* GetRuntimeStubName(Address runtime_stub_entry) const;
+ // Returns the runtime stub id that corresponds to the given address (which
+ // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
+ WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;
+
+ const char* GetRuntimeStubName(Address runtime_stub_target) const;
// Sample the current code size of this modules to the given counters.
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
@@ -501,6 +532,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// its accounting.
void FreeCode(Vector<WasmCode* const>);
+ // Retrieve the number of separately reserved code spaces for this module.
+ size_t GetNumberOfCodeSpacesForTesting() const;
+
private:
friend class WasmCode;
friend class WasmCodeAllocator;
@@ -510,6 +544,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
struct CodeSpaceData {
base::AddressRegion region;
WasmCode* jump_table;
+ WasmCode* far_jump_table;
};
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
@@ -525,17 +560,23 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier, Vector<uint8_t> code_space);
+ ExecutionTier tier, Vector<uint8_t> code_space,
+ const JumpTablesRef& jump_tables_ref);
- // Add and publish anonymous code.
- WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
- const char* name = nullptr);
+ WasmCode* CreateEmptyJumpTableInRegion(
+ uint32_t jump_table_size, base::AddressRegion,
+ const WasmCodeAllocator::OptionalLock&);
- WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size,
- base::AddressRegion);
+ // Hold the {allocation_mutex_} when calling one of these methods.
+ // {slot_index} is the index in the declared functions, i.e. function index
+ // minus the number of imported functions.
+ void PatchJumpTablesLocked(uint32_t slot_index, Address target);
+ void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
+ Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
- void AddCodeSpace(base::AddressRegion);
+ void AddCodeSpace(base::AddressRegion,
+ const WasmCodeAllocator::OptionalLock&);
// Hold the {allocation_mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) {
@@ -580,12 +621,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {WireBytesStorage}, held by background compile tasks.
std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
- // Contains entry points for runtime stub calls via {WASM_STUB_CALL}.
- Address runtime_stub_entries_[WasmCode::kRuntimeStubCount] = {kNullAddress};
-
- // Jump table used for runtime stubs (i.e. trampolines to embedded builtins).
- WasmCode* runtime_stub_table_ = nullptr;
-
// Jump table used by external calls (from JS). Wasm calls use one of the jump
// tables stored in {code_space_data_}.
WasmCode* main_jump_table_ = nullptr;
@@ -612,7 +647,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// instruction start address of the value.
std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
- std::unique_ptr<WasmCode* []> code_table_;
+ // Table of the latest code object per function, updated on initial
+ // compilation and tier up. The number of entries is
+ // {WasmModule::num_declared_functions}, i.e. there are no entries for
+ // imported functions.
+ std::unique_ptr<WasmCode*[]> code_table_;
// Null if no redirections exist, otherwise a bitset over all functions in
// this module marking those functions that have been redirected.
@@ -634,8 +673,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
- explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
- size_t max_committed);
+ explicit WasmCodeManager(size_t max_committed);
#ifdef DEBUG
~WasmCodeManager() {
@@ -654,16 +692,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
return total_committed_code_space_.load();
}
- void SetMaxCommittedMemoryForTesting(size_t limit);
-
- void DisableImplicitAllocationsForTesting() {
- implicit_allocations_disabled_for_testing_ = true;
- }
-
- bool IsImplicitAllocationsDisabledForTesting() const {
- return implicit_allocations_disabled_for_testing_;
- }
-
static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
@@ -686,11 +714,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRange(base::AddressRegion, NativeModule*);
- WasmMemoryTracker* const memory_tracker_;
-
- size_t max_committed_code_space_;
-
- bool implicit_allocations_disabled_for_testing_ = false;
+ const size_t max_committed_code_space_;
std::atomic<size_t> total_committed_code_space_{0};
// If the committed code space exceeds {critical_committed_code_space_}, then
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index fbbe19396c..2b5cb6c9ec 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -81,6 +81,7 @@ enum SectionCode : int8_t {
// to be consistent.
kNameSectionCode, // Name section (encoded as a string)
kSourceMappingURLSectionCode, // Source Map URL section
+ kDebugInfoSectionCode, // DWARF section .debug_info
kCompilationHintsSectionCode, // Compilation hints section
// Helper values
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 2955bc602f..ea989c081d 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -184,7 +184,7 @@ class InterpreterHandle {
argument_values.begin());
bool finished = false;
while (!finished) {
- // TODO(clemensh): Add occasional StackChecks.
+ // TODO(clemensb): Add occasional StackChecks.
WasmInterpreter::State state = ContinueExecution(thread);
switch (state) {
case WasmInterpreter::State::PAUSED:
@@ -277,9 +277,10 @@ class InterpreterHandle {
if (isolate_->debug()->break_points_active()) {
Handle<WasmModuleObject> module_object(
GetInstanceObject()->module_object(), isolate_);
+ Handle<Script> script(module_object->script(), isolate_);
int position = GetTopPosition(module_object);
Handle<FixedArray> breakpoints;
- if (WasmModuleObject::CheckBreakPoints(isolate_, module_object, position)
+ if (WasmModuleObject::CheckBreakPoints(isolate_, script, position)
.ToHandle(&breakpoints)) {
// We hit one or several breakpoints. Clear stepping, notify the
// listeners and return.
@@ -318,7 +319,8 @@ class InterpreterHandle {
DCHECK_LT(0, thread->GetFrameCount());
auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
- return module_object->GetFunctionOffset(frame->function()->func_index) +
+ return GetWasmFunctionOffset(module_object->module(),
+ frame->function()->func_index) +
frame->pc();
}
@@ -502,9 +504,11 @@ wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) {
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
DCHECK(!instance->has_debug_info());
Factory* factory = instance->GetIsolate()->factory();
+ Handle<Cell> stack_cell = factory->NewCell(factory->empty_fixed_array());
Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld));
debug_info->set_wasm_instance(*instance);
+ debug_info->set_interpreter_reference_stack(*stack_cell);
instance->set_debug_info(*debug_info);
return debug_info;
}
@@ -524,6 +528,7 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
return interp_handle->raw()->interpreter();
}
+// static
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
@@ -533,6 +538,18 @@ void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
handle->interpreter()->SetBreakpoint(func, offset, true);
}
+// static
+void WasmDebugInfo::ClearBreakpoint(Handle<WasmDebugInfo> debug_info,
+ int func_index, int offset) {
+ Isolate* isolate = debug_info->GetIsolate();
+ auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+ // TODO(leese): If there are no more breakpoints left it would be good to
+ // undo redirecting to the interpreter.
+ const wasm::WasmFunction* func = &handle->module()->functions[func_index];
+ handle->interpreter()->SetBreakpoint(func, offset, false);
+}
+
+// static
void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
Vector<int> func_indexes) {
Isolate* isolate = debug_info->GetIsolate();
@@ -635,8 +652,8 @@ Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
if (index == -1) {
index = static_cast<int32_t>(map->FindOrInsert(*sig));
if (index == entries->length()) {
- entries = isolate->factory()->CopyFixedArrayAndGrow(
- entries, entries->length(), AllocationType::kOld);
+ entries =
+ isolate->factory()->CopyFixedArrayAndGrow(entries, entries->length());
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index).IsUndefined(isolate));
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 97111f8349..adb566cb41 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -211,8 +211,7 @@ struct WasmEngine::NativeModuleInfo {
int8_t num_code_gcs_triggered = 0;
};
-WasmEngine::WasmEngine()
- : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
+WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
// Synchronize on all background compile tasks.
@@ -307,7 +306,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
CreateWasmScript(isolate, bytes, native_module->module()->source_map_url);
// Create the module object.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // TODO(clemensb): For the same module (same bytes / same hash), we should
// only have one WasmModuleObject. Otherwise, we might only set
// breakpoints on a (potentially empty) subset of the instances.
@@ -337,7 +336,7 @@ void WasmEngine::AsyncInstantiate(
ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
- // TODO(clemensh): Avoid TryCatch, use Execution::TryCall internally to invoke
+ // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
// start function and report thrown exception explicitly via out argument.
v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
catcher.SetVerbose(false);
@@ -567,7 +566,7 @@ int GetGCTimeMicros(base::TimeTicks start) {
void WasmEngine::AddIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
DCHECK_EQ(0, isolates_.count(isolate));
- isolates_.emplace(isolate, base::make_unique<IsolateInfo>(isolate));
+ isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
@@ -631,7 +630,7 @@ void WasmEngine::LogCode(WasmCode* code) {
IsolateInfo* info = isolates_[isolate].get();
if (info->log_codes == false) continue;
if (info->log_codes_task == nullptr) {
- auto new_task = base::make_unique<LogCodesTask>(
+ auto new_task = std::make_unique<LogCodesTask>(
&mutex_, &info->log_codes_task, isolate, this);
info->log_codes_task = new_task.get();
info->foreground_task_runner->PostTask(std::move(new_task));
@@ -676,7 +675,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
return NewNativeModule(isolate, enabled, code_size_estimate,
- wasm::NativeModule::kCanAllocateMoreMemory,
+ !wasm::NativeModule::kNeedsFarJumpsBetweenCodeSpaces ||
+ FLAG_wasm_far_jump_table,
std::move(module));
}
@@ -688,7 +688,7 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
can_request_more, std::move(module));
base::MutexGuard lock(&mutex_);
auto pair = native_modules_.insert(std::make_pair(
- native_module.get(), base::make_unique<NativeModuleInfo>()));
+ native_module.get(), std::make_unique<NativeModuleInfo>()));
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
@@ -768,7 +768,7 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
info->foreground_task_runner->PostTask(
- base::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
+ std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
}
}
@@ -880,7 +880,7 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
for (auto* isolate : native_modules_[entry.first]->isolates) {
auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
if (!gc_task) {
- auto new_task = base::make_unique<WasmGCForegroundTask>(isolate);
+ auto new_task = std::make_unique<WasmGCForegroundTask>(isolate);
gc_task = new_task.get();
DCHECK_EQ(1, isolates_.count(isolate));
isolates_[isolate]->foreground_task_runner->PostTask(
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 401cf2b880..424f85fa79 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -10,7 +10,6 @@
#include "src/tasks/cancelable-task.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-tier.h"
#include "src/zone/accounting-allocator.h"
@@ -23,6 +22,7 @@ class CompilationStatistics;
class HeapNumber;
class WasmInstanceObject;
class WasmModuleObject;
+class JSArrayBuffer;
namespace wasm {
@@ -120,8 +120,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmCodeManager* code_manager() { return &code_manager_; }
- WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
-
AccountingAllocator* allocator() { return &allocator_; }
// Compilation statistics for TurboFan compilations.
@@ -156,8 +154,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
template <typename T, typename... Args>
std::unique_ptr<T> NewBackgroundCompileTask(Args&&... args) {
- return base::make_unique<T>(&background_compile_task_manager_,
- std::forward<Args>(args)...);
+ return std::make_unique<T>(&background_compile_task_manager_,
+ std::forward<Args>(args)...);
}
// Trigger code logging for this WasmCode in all Isolates which have access to
@@ -243,7 +241,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
// calling this method.
void PotentiallyFinishCurrentGC();
- WasmMemoryTracker memory_tracker_;
WasmCodeManager code_manager_;
AccountingAllocator allocator_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 9ca45183ef..13c159c0ef 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -247,6 +247,10 @@ int32_t int64_mod_wrapper(Address data) {
if (divisor == 0) {
return 0;
}
+ if (divisor == -1 && dividend == std::numeric_limits<int64_t>::min()) {
+ WriteUnalignedValue<int64_t>(data, 0);
+ return 1;
+ }
WriteUnalignedValue<int64_t>(data, dividend % divisor);
return 1;
}
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 36f9ebd8a4..b18fa90acf 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -10,12 +10,12 @@
V(eh, "exception handling opcodes", false) \
V(threads, "thread opcodes", false) \
V(simd, "SIMD opcodes", false) \
- V(bigint, "JS BigInt support", false) \
V(return_call, "return call opcodes", false) \
V(compilation_hints, "compilation hints section", false)
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) \
V(anyref, "anyref opcodes", false) \
+ V(bigint, "JS BigInt support", false) \
V(type_reflection, "wasm type reflection in JS", false)
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) \
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 299128860d..7c41f6a8e0 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -1128,13 +1128,41 @@ class ThreadImpl {
};
public:
+ // The {ReferenceStackScope} sets up the reference stack in the interpreter.
+ // The handle to the reference stack has to be re-initialized everytime we
+ // call into the interpreter because there is no HandleScope that could
+ // contain that handle. A global handle is not an option because it can lead
+ // to a memory leak if a reference to the {WasmInstanceObject} is put onto the
+ // reference stack and thereby transitively keeps the interpreter alive.
+ class ReferenceStackScope {
+ public:
+ explicit ReferenceStackScope(ThreadImpl* impl) : impl_(impl) {
+ // The reference stack is already initialized, we don't have to do
+ // anything.
+ if (!impl_->reference_stack_cell_.is_null()) return;
+ impl_->reference_stack_cell_ = handle(
+ impl_->instance_object_->debug_info().interpreter_reference_stack(),
+ impl_->isolate_);
+ // We initialized the reference stack, so we also have to reset it later.
+ do_reset_stack_ = true;
+ }
+
+ ~ReferenceStackScope() {
+ if (do_reset_stack_) {
+ impl_->reference_stack_cell_ = Handle<Cell>();
+ }
+ }
+
+ private:
+ ThreadImpl* impl_;
+ bool do_reset_stack_ = false;
+ };
+
ThreadImpl(Zone* zone, CodeMap* codemap,
- Handle<WasmInstanceObject> instance_object,
- Handle<Cell> reference_stack_cell)
+ Handle<WasmInstanceObject> instance_object)
: codemap_(codemap),
isolate_(instance_object->GetIsolate()),
instance_object_(instance_object),
- reference_stack_cell_(reference_stack_cell),
frames_(zone),
activations_(zone) {}
@@ -1394,6 +1422,7 @@ class ThreadImpl {
};
friend class InterpretedFrameImpl;
+ friend class ReferenceStackScope;
CodeMap* codemap_;
Isolate* isolate_;
@@ -1663,9 +1692,15 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
- sizeof(ctype));
+ int* const len, MachineRepresentation rep,
+ int prefix_len = 0) {
+ // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
+ // the memarg is 1 byte from pc. We don't increment pc at the caller,
+ // because we want to keep pc to the start of the operation to keep trap
+ // reporting and tracing accurate, otherwise those will report at the middle
+ // of an opcode.
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(
+ decoder, code->at(pc + prefix_len), sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
Address addr = BoundsCheckMem<mtype>(imm.offset, index);
if (!addr) {
@@ -1690,9 +1725,15 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
- sizeof(ctype));
+ int* const len, MachineRepresentation rep,
+ int prefix_len = 0) {
+ // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
+ // the memarg is 1 byte from pc. We don't increment pc at the caller,
+ // because we want to keep pc to the start of the operation to keep trap
+ // reporting and tracing accurate, otherwise those will report at the middle
+ // of an opcode.
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(
+ decoder, code->at(pc + prefix_len), sizeof(ctype));
ctype val = Pop().to<ctype>();
uint32_t index = Pop().to<uint32_t>();
@@ -2223,9 +2264,22 @@ class ThreadImpl {
EXTRACT_LANE_CASE(F32x4, f32x4)
EXTRACT_LANE_CASE(I64x2, i64x2)
EXTRACT_LANE_CASE(I32x4, i32x4)
- EXTRACT_LANE_CASE(I16x8, i16x8)
- EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
+#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \
+ case kExpr##format##ExtractLane##sign: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
+ *len += 1; \
+ WasmValue val = Pop(); \
+ Simd128 s = val.to_s128(); \
+ auto ss = s.to_##name(); \
+ Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \
+ return true; \
+ }
+ EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
+ EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
+ EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
+ EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
+#undef EXTRACT_LANE_EXTEND_CASE
#define BINOP_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
WasmValue v2 = Pop(); \
@@ -2317,8 +2371,10 @@ class ThreadImpl {
}
UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
+ UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
+ UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
@@ -2431,10 +2487,12 @@ class ThreadImpl {
#undef REPLACE_LANE_CASE
case kExprS128LoadMem:
return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128);
+ MachineRepresentation::kSimd128,
+ /*prefix_len=*/1);
case kExprS128StoreMem:
return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128);
+ MachineRepresentation::kSimd128,
+ /*prefix_len=*/1);
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
uint32_t shift = Pop().to<uint32_t>(); \
@@ -2448,19 +2506,26 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
- SHIFT_CASE(I64x2Shl, i64x2, int2, 2, static_cast<uint64_t>(a) << shift)
- SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> shift)
- SHIFT_CASE(I64x2ShrU, i64x2, int2, 2, static_cast<uint64_t>(a) >> shift)
- SHIFT_CASE(I32x4Shl, i32x4, int4, 4, static_cast<uint32_t>(a) << shift)
- SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> shift)
- SHIFT_CASE(I32x4ShrU, i32x4, int4, 4, static_cast<uint32_t>(a) >> shift)
- SHIFT_CASE(I16x8Shl, i16x8, int8, 8, static_cast<uint16_t>(a) << shift)
- SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> shift)
- SHIFT_CASE(I16x8ShrU, i16x8, int8, 8, static_cast<uint16_t>(a) >> shift)
- SHIFT_CASE(I8x16Shl, i8x16, int16, 16, static_cast<uint8_t>(a) << shift)
- SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> shift)
+ SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
+ static_cast<uint64_t>(a) << (shift % 64))
+ SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64))
+ SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
+ static_cast<uint64_t>(a) >> (shift % 64))
+ SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
+ static_cast<uint32_t>(a) << (shift % 32))
+ SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32))
+ SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
+ static_cast<uint32_t>(a) >> (shift % 32))
+ SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
+ static_cast<uint16_t>(a) << (shift % 16))
+ SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16))
+ SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
+ static_cast<uint16_t>(a) >> (shift % 16))
+ SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
+ static_cast<uint8_t>(a) << (shift % 8))
+ SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
- static_cast<uint8_t>(a) >> shift)
+ static_cast<uint8_t>(a) >> (shift % 8))
#undef SHIFT_CASE
#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
expr) \
@@ -2564,6 +2629,18 @@ class ThreadImpl {
ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
#undef ADD_HORIZ_CASE
+ case kExprS8x16Swizzle: {
+ int16 v2 = Pop().to_s128().to_i8x16();
+ int16 v1 = Pop().to_s128().to_i8x16();
+ int16 res;
+ for (size_t i = 0; i < kSimd128Size; ++i) {
+ int lane = v2.val[LANE(i, v1)];
+ res.val[LANE(i, v1)] =
+ lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
case kExprS8x16Shuffle: {
Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc));
@@ -2604,6 +2681,23 @@ class ThreadImpl {
REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
+#define QFM_CASE(op, name, stype, count, operation) \
+ case kExpr##op: { \
+ stype c = Pop().to_s128().to_##name(); \
+ stype b = Pop().to_s128().to_##name(); \
+ stype a = Pop().to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; i++) { \
+ res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
+ }
+ QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
+ QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
+ QFM_CASE(F64x2Qfma, f64x2, float2, 2, +)
+ QFM_CASE(F64x2Qfms, f64x2, float2, 2, -)
+#undef QFM_CASE
default:
return false;
}
@@ -2658,7 +2752,7 @@ class ThreadImpl {
WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
isolate_);
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
- Handle<Object> exception_object =
+ Handle<WasmExceptionPackage> exception_object =
WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
@@ -2727,8 +2821,9 @@ class ThreadImpl {
// Determines whether the given exception has a tag matching the expected tag
// for the given index within the exception table of the current instance.
bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
- Handle<Object> caught_tag =
- WasmExceptionPackage::GetExceptionTag(isolate_, exception_object);
+ if (!exception_object->IsWasmExceptionPackage(isolate_)) return false;
+ Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
+ isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
Handle<Object> expected_tag =
handle(instance_object_->exceptions_table().get(index), isolate_);
DCHECK(expected_tag->IsWasmExceptionTag());
@@ -2755,8 +2850,9 @@ class ThreadImpl {
// the encoded values match the expected signature of the exception.
void DoUnpackException(const WasmException* exception,
Handle<Object> exception_object) {
- Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
- WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
+ Handle<FixedArray> encoded_values =
+ Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
+ isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
// Decode the exception values from the given exception package and push
// them onto the operand stack. This encoding has to be in sync with other
// backends so that exceptions can be passed between them.
@@ -3054,14 +3150,14 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
- case kExprGetLocal: {
+ case kExprLocalGet: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Push(GetStackValue(frames_.back().sp + imm.index));
len = 1 + imm.length;
break;
}
- case kExprSetLocal: {
+ case kExprLocalSet: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
@@ -3069,7 +3165,7 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
- case kExprTeeLocal: {
+ case kExprLocalTee: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
@@ -3231,7 +3327,7 @@ class ThreadImpl {
}
} break;
- case kExprGetGlobal: {
+ case kExprGlobalGet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
HandleScope handle_scope(isolate_);
@@ -3239,7 +3335,7 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
- case kExprSetGlobal: {
+ case kExprGlobalSet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
const WasmGlobal* global = &module()->globals[imm.index];
@@ -3770,7 +3866,8 @@ class ThreadImpl {
static WasmCode* GetTargetCode(Isolate* isolate, Address target) {
WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
NativeModule* native_module = code_manager->LookupNativeModule(target);
- if (native_module->is_jump_table_slot(target)) {
+ WasmCode* code = native_module->Lookup(target);
+ if (code->kind() == WasmCode::kJumpTable) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
@@ -3784,7 +3881,6 @@ class ThreadImpl {
return native_module->GetCode(func_index);
}
- WasmCode* code = native_module->Lookup(target);
DCHECK_EQ(code->instruction_start(), target);
return code;
}
@@ -3888,12 +3984,14 @@ class InterpretedFrameImpl {
}
WasmValue GetLocalValue(int index) const {
+ ThreadImpl::ReferenceStackScope stack_scope(thread_);
DCHECK_LE(0, index);
DCHECK_GT(GetLocalCount(), index);
return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
}
WasmValue GetStackValue(int index) const {
+ ThreadImpl::ReferenceStackScope stack_scope(thread_);
DCHECK_LE(0, index);
// Index must be within the number of stack values of this frame.
DCHECK_GT(GetStackHeight(), index);
@@ -3941,21 +4039,33 @@ const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
// translation unit anyway.
//============================================================================
WasmInterpreter::State WasmInterpreter::Thread::state() {
- return ToImpl(this)->state();
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->state();
}
void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
WasmValue* args) {
- ToImpl(this)->InitFrame(function, args);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ impl->InitFrame(function, args);
}
WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
- return ToImpl(this)->Run(num_steps);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->Run(num_steps);
}
void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
-void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
+void WasmInterpreter::Thread::Reset() {
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->Reset();
+}
WasmInterpreter::Thread::ExceptionHandlingResult
WasmInterpreter::Thread::RaiseException(Isolate* isolate,
Handle<Object> exception) {
- return ToImpl(this)->RaiseException(isolate, exception);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->RaiseException(isolate, exception);
}
pc_t WasmInterpreter::Thread::GetBreakpointPc() {
return ToImpl(this)->GetBreakpointPc();
@@ -3969,7 +4079,9 @@ WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
}
WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
- return ToImpl(this)->GetReturnValue(index);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->GetReturnValue(index);
}
TrapReason WasmInterpreter::Thread::GetTrapReason() {
return ToImpl(this)->GetTrapReason();
@@ -3996,41 +4108,38 @@ uint32_t WasmInterpreter::Thread::NumActivations() {
return ToImpl(this)->NumActivations();
}
uint32_t WasmInterpreter::Thread::StartActivation() {
- return ToImpl(this)->StartActivation();
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->StartActivation();
}
void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
- ToImpl(this)->FinishActivation(id);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ impl->FinishActivation(id);
}
uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
- return ToImpl(this)->ActivationFrameBase(id);
+ ThreadImpl* impl = ToImpl(this);
+ ThreadImpl::ReferenceStackScope stack_scope(impl);
+ return impl->ActivationFrameBase(id);
}
//============================================================================
// The implementation details of the interpreter.
//============================================================================
-class WasmInterpreterInternals : public ZoneObject {
+class WasmInterpreterInternals {
public:
// Create a copy of the module bytes for the interpreter, since the passed
// pointer might be invalidated after constructing the interpreter.
const ZoneVector<uint8_t> module_bytes_;
CodeMap codemap_;
- ZoneVector<ThreadImpl> threads_;
+ std::vector<ThreadImpl> threads_;
WasmInterpreterInternals(Zone* zone, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance_object)
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
- codemap_(module, module_bytes_.data(), zone),
- threads_(zone) {
- Isolate* isolate = instance_object->GetIsolate();
- Handle<Cell> reference_stack = isolate->global_handles()->Create(
- *isolate->factory()->NewCell(isolate->factory()->empty_fixed_array()));
- threads_.emplace_back(zone, &codemap_, instance_object, reference_stack);
- }
-
- ~WasmInterpreterInternals() {
- DCHECK_EQ(1, threads_.size());
- GlobalHandles::Destroy(threads_[0].reference_stack_cell().location());
+ codemap_(module, module_bytes_.data(), zone) {
+ threads_.emplace_back(zone, &codemap_, instance_object);
}
};
@@ -4059,10 +4168,12 @@ WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance_object)
: zone_(isolate->allocator(), ZONE_NAME),
- internals_(new (&zone_) WasmInterpreterInternals(
+ internals_(new WasmInterpreterInternals(
&zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
-WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
+// The destructor is here so we can forward declare {WasmInterpreterInternals}
+// used in the {unique_ptr} in the header.
+WasmInterpreter::~WasmInterpreter() {}
void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index da0ce01835..4eb0675aba 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_WASM_INTERPRETER_H_
#define V8_WASM_WASM_INTERPRETER_H_
+#include <memory>
+
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
#include "src/zone/zone-containers.h"
@@ -131,7 +133,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Stack inspection and modification.
pc_t GetBreakpointPc();
- // TODO(clemensh): Make this uint32_t.
+ // TODO(clemensb): Make this uint32_t.
int GetFrameCount();
// The InterpretedFrame is only valid as long as the Thread is paused.
FramePtr GetFrame(int index);
@@ -170,9 +172,12 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
uint32_t ActivationFrameBase(uint32_t activation_id);
};
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInterpreter);
+
WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance);
+
~WasmInterpreter();
//==========================================================================
@@ -214,7 +219,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
private:
Zone zone_;
- WasmInterpreterInternals* internals_;
+ std::unique_ptr<WasmInterpreterInternals> internals_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index f10f5ff2bf..80d2fcb059 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -26,7 +26,6 @@
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
@@ -207,20 +206,20 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
if (source->IsArrayBuffer()) {
// A raw array buffer was passed.
Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
- ArrayBuffer::Contents contents = buffer->GetContents();
+ auto backing_store = buffer->GetBackingStore();
- start = reinterpret_cast<const uint8_t*>(contents.Data());
- length = contents.ByteLength();
+ start = reinterpret_cast<const uint8_t*>(backing_store->Data());
+ length = backing_store->ByteLength();
*is_shared = buffer->IsSharedArrayBuffer();
} else if (source->IsTypedArray()) {
// A TypedArray was passed.
Local<TypedArray> array = Local<TypedArray>::Cast(source);
Local<ArrayBuffer> buffer = array->Buffer();
- ArrayBuffer::Contents contents = buffer->GetContents();
+ auto backing_store = buffer->GetBackingStore();
- start =
- reinterpret_cast<const uint8_t*>(contents.Data()) + array->ByteOffset();
+ start = reinterpret_cast<const uint8_t*>(backing_store->Data()) +
+ array->ByteOffset();
length = array->ByteLength();
*is_shared = buffer->IsSharedArrayBuffer();
} else {
@@ -434,8 +433,8 @@ class AsyncInstantiateCompileResultResolver
finished_ = true;
isolate_->wasm_engine()->AsyncInstantiate(
isolate_,
- base::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
- result),
+ std::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
+ result),
result, maybe_imports_);
}
@@ -597,7 +596,7 @@ void WebAssemblyCompileStreaming(
i::Handle<i::Managed<WasmStreaming>> data =
i::Managed<WasmStreaming>::Allocate(
i_isolate, 0,
- base::make_unique<WasmStreaming::WasmStreamingImpl>(
+ std::make_unique<WasmStreaming::WasmStreamingImpl>(
isolate, kAPIMethodName, resolver));
DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
@@ -876,7 +875,7 @@ void WebAssemblyInstantiateStreaming(
i::Handle<i::Managed<WasmStreaming>> data =
i::Managed<WasmStreaming>::Allocate(
i_isolate, 0,
- base::make_unique<WasmStreaming::WasmStreamingImpl>(
+ std::make_unique<WasmStreaming::WasmStreamingImpl>(
isolate, kAPIMethodName, compilation_resolver));
DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
@@ -1156,7 +1155,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- bool is_shared_memory = false;
+ auto shared = i::SharedFlag::kNotShared;
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
if (enabled_features.threads) {
// Shared property of descriptor
@@ -1165,10 +1164,11 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
descriptor->Get(context, shared_key);
v8::Local<v8::Value> value;
if (maybe_value.ToLocal(&value)) {
- is_shared_memory = value->BooleanValue(isolate);
+ shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared
+ : i::SharedFlag::kNotShared;
}
// Throw TypeError if shared is true, and the descriptor has no "maximum"
- if (is_shared_memory && maximum == -1) {
+ if (shared == i::SharedFlag::kShared && maximum == -1) {
thrower.TypeError(
"If shared is true, maximum property should be defined.");
return;
@@ -1177,13 +1177,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::JSObject> memory_obj;
if (!i::WasmMemoryObject::New(i_isolate, static_cast<uint32_t>(initial),
- static_cast<uint32_t>(maximum),
- is_shared_memory)
+ static_cast<uint32_t>(maximum), shared)
.ToHandle(&memory_obj)) {
thrower.RangeError("could not allocate memory");
return;
}
- if (is_shared_memory) {
+ if (shared == i::SharedFlag::kShared) {
i::Handle<i::JSArrayBuffer> buffer(
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
i_isolate);
@@ -2034,8 +2033,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(module_constructor);
Handle<JSObject> module_proto(
JSObject::cast(module_constructor->instance_prototype()), isolate);
- Handle<Map> module_map =
- isolate->factory()->NewMap(i::WASM_MODULE_TYPE, WasmModuleObject::kSize);
+ Handle<Map> module_map = isolate->factory()->NewMap(
+ i::WASM_MODULE_OBJECT_TYPE, WasmModuleObject::kSize);
JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
1);
@@ -2055,7 +2054,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSObject> instance_proto(
JSObject::cast(instance_constructor->instance_prototype()), isolate);
Handle<Map> instance_map = isolate->factory()->NewMap(
- i::WASM_INSTANCE_TYPE, WasmInstanceObject::kSize);
+ i::WASM_INSTANCE_OBJECT_TYPE, WasmInstanceObject::kSize);
JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
InstallGetter(isolate, instance_proto, "exports",
WebAssemblyInstanceGetExports);
@@ -2075,8 +2074,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(table_constructor);
Handle<JSObject> table_proto(
JSObject::cast(table_constructor->instance_prototype()), isolate);
- Handle<Map> table_map =
- isolate->factory()->NewMap(i::WASM_TABLE_TYPE, WasmTableObject::kSize);
+ Handle<Map> table_map = isolate->factory()->NewMap(i::WASM_TABLE_OBJECT_TYPE,
+ WasmTableObject::kSize);
JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
@@ -2096,8 +2095,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(memory_constructor);
Handle<JSObject> memory_proto(
JSObject::cast(memory_constructor->instance_prototype()), isolate);
- Handle<Map> memory_map =
- isolate->factory()->NewMap(i::WASM_MEMORY_TYPE, WasmMemoryObject::kSize);
+ Handle<Map> memory_map = isolate->factory()->NewMap(
+ i::WASM_MEMORY_OBJECT_TYPE, WasmMemoryObject::kSize);
JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
@@ -2115,8 +2114,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(global_constructor);
Handle<JSObject> global_proto(
JSObject::cast(global_constructor->instance_prototype()), isolate);
- Handle<Map> global_map =
- isolate->factory()->NewMap(i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
+ Handle<Map> global_map = isolate->factory()->NewMap(
+ i::WASM_GLOBAL_OBJECT_TYPE, WasmGlobalObject::kSize);
JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
@@ -2137,7 +2136,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSObject> exception_proto(
JSObject::cast(exception_constructor->instance_prototype()), isolate);
Handle<Map> exception_map = isolate->factory()->NewMap(
- i::WASM_EXCEPTION_TYPE, WasmExceptionObject::kSize);
+ i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kSize);
JSFunction::SetInitialMap(exception_constructor, exception_map,
exception_proto);
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index c7c95aca26..6dc652aba2 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -36,6 +36,7 @@ constexpr size_t kV8MaxWasmFunctionLocals = 50000;
constexpr size_t kV8MaxWasmFunctionParams = 1000;
constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
constexpr size_t kV8MaxWasmFunctionReturns = 1;
+constexpr size_t kV8MaxWasmFunctionBrTableSize = 65520;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
constexpr size_t kV8MaxWasmTableSize = 10000000;
constexpr size_t kV8MaxWasmTableInitEntries = 10000000;
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index f203649542..bbb0d67f9c 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -566,7 +566,7 @@ MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
- // Set by TryAllocateBackingStore or GetEmptyBackingStore
+ // Set by TryAllocateBackingStore.
void* allocation_base = nullptr;
size_t allocation_length = 0;
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
deleted file mode 100644
index ecb6203ac5..0000000000
--- a/deps/v8/src/wasm/wasm-memory.h
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_WASM_MEMORY_H_
-#define V8_WASM_WASM_MEMORY_H_
-
-#include <atomic>
-#include <unordered_map>
-#include <unordered_set>
-
-#include "src/base/platform/mutex.h"
-#include "src/flags/flags.h"
-#include "src/handles/handles.h"
-#include "src/objects/js-array-buffer.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory
-// and wasm code. There is an upper limit on the total reserved memory which is
-// checked by this class. Allocations are stored so we can look them up when an
-// array buffer dies and figure out the reservation and allocation bounds for
-// that buffer.
-class WasmMemoryTracker {
- public:
- WasmMemoryTracker() = default;
- V8_EXPORT_PRIVATE ~WasmMemoryTracker();
-
- // ReserveAddressSpace attempts to increase the reserved address space counter
- // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
- // and reserve {num_bytes} bytes), false otherwise.
- bool ReserveAddressSpace(size_t num_bytes);
-
- void RegisterAllocation(Isolate* isolate, void* allocation_base,
- size_t allocation_length, void* buffer_start,
- size_t buffer_length);
-
- struct SharedMemoryObjectState {
- Handle<WasmMemoryObject> memory_object;
- Isolate* isolate;
-
- SharedMemoryObjectState() = default;
- SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object,
- Isolate* isolate)
- : memory_object(memory_object), isolate(isolate) {}
- };
-
- struct AllocationData {
- void* allocation_base = nullptr;
- size_t allocation_length = 0;
- void* buffer_start = nullptr;
- size_t buffer_length = 0;
- bool is_shared = false;
- // Wasm memories are growable by default, this will be false only when
- // shared with an asmjs module.
- bool is_growable = true;
-
- // Track Wasm Memory instances across isolates, this is populated on
- // PostMessage using persistent handles for memory objects.
- std::vector<WasmMemoryTracker::SharedMemoryObjectState>
- memory_object_vector;
-
- private:
- AllocationData() = default;
- AllocationData(void* allocation_base, size_t allocation_length,
- void* buffer_start, size_t buffer_length)
- : allocation_base(allocation_base),
- allocation_length(allocation_length),
- buffer_start(buffer_start),
- buffer_length(buffer_length) {
- DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base),
- reinterpret_cast<uintptr_t>(buffer_start));
- DCHECK_GE(
- reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
- reinterpret_cast<uintptr_t>(buffer_start));
- DCHECK_GE(
- reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
- reinterpret_cast<uintptr_t>(buffer_start) + buffer_length);
- }
-
- friend WasmMemoryTracker;
- };
-
- // Allow tests to allocate a backing store the same way as we do it for
- // WebAssembly memory. This is used in unit tests for trap handler to
- // generate the same signals/exceptions for invalid memory accesses as
- // we would get with WebAssembly memory.
- V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting(
- Heap* heap, size_t size, void** allocation_base,
- size_t* allocation_length);
-
- // Free memory allocated with TryAllocateBackingStoreForTesting.
- V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory,
- void* buffer_start);
-
- // Decreases the amount of reserved address space.
- void ReleaseReservation(size_t num_bytes);
-
- V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start);
-
- bool IsWasmSharedMemory(const void* buffer_start);
-
- // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
- // buffer is not tracked.
- V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
- const void* buffer_start);
-
- // Free Memory allocated by the Wasm memory tracker
- bool FreeWasmMemory(Isolate* isolate, const void* buffer_start);
-
- void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer);
-
- bool IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer);
-
- // When WebAssembly.Memory is transferred over PostMessage, register the
- // allocation as shared and track the memory objects that will need
- // updating if memory is resized.
- void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object,
- Isolate* isolate);
-
- // This method is called when the underlying backing store is grown, but
- // instances that share the backing_store have not yet been updated.
- void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
- size_t new_size);
-
- // Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects
- // and instances that share the memory objects after a Grow call.
- void UpdateSharedMemoryInstances(Isolate* isolate);
-
- // Due to timing of when buffers are garbage collected, vs. when isolate
- // object handles are destroyed, it is possible to leak global handles. To
- // avoid this, cleanup any global handles on isolate destruction if any exist.
- void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate);
-
- // Allocation results are reported to UMA
- //
- // See wasm_memory_allocation_result in counters.h
- enum class AllocationStatus {
- kSuccess, // Succeeded on the first try
-
- kSuccessAfterRetry, // Succeeded after garbage collection
-
- kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address
- // space limit
-
- kOtherFailure // Failed for an unknown reason
- };
-
- private:
- // Helper methods to free memory only if not shared by other isolates, memory
- // objects.
- void FreeMemoryIfNotShared_Locked(Isolate* isolate,
- const void* backing_store);
- bool CanFreeSharedMemory_Locked(const void* backing_store);
- void RemoveSharedBufferState_Locked(Isolate* isolate,
- const void* backing_store);
-
- // Registers the allocation as shared, and tracks all the memory objects
- // associates with this allocation across isolates.
- void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object,
- Isolate* isolate);
-
- // Map the new size after grow to the buffer backing store, so that instances
- // and memory objects that share the WebAssembly.Memory across isolates can
- // be updated..
- void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer,
- size_t new_size);
-
- // Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory
- // objects that share this buffer.
- void TriggerSharedGrowInterruptOnAllIsolates_Locked(
- Handle<JSArrayBuffer> old_buffer);
-
- // When isolates hit a stack check, update the memory objects associated with
- // that isolate.
- void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate,
- void* backing_store,
- size_t new_size);
-
- // Check if all the isolates that share a backing_store have hit a stack
- // check. If a stack check is hit, and the backing store is pending grow,
- // this isolate will have updated memory objects.
- bool AreAllIsolatesUpdated_Locked(const void* backing_store);
-
- // If a grow call is made to a buffer with a pending grow, and all the
- // isolates that share this buffer have not hit a StackCheck, clear the set of
- // already updated instances so they can be updated with the new size on the
- // most recent grow call.
- void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store);
-
- // Helper functions to update memory objects on grow, and maintain state for
- // which isolates hit a stack check.
- void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate,
- void* backing_store,
- size_t new_size);
- bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate,
- const void* backing_store);
-
- // Destroy global handles to memory objects, and remove backing store from
- // isolates_per_buffer on Free.
- void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
- Isolate* isolate, const void* backing_store);
- void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
- const void* backing_store);
-
- void RemoveIsolateFromBackingStore_Locked(Isolate* isolate,
- const void* backing_store);
-
- // Removes an allocation from the tracker.
- AllocationData ReleaseAllocation_Locked(Isolate* isolate,
- const void* buffer_start);
-
- // Clients use a two-part process. First they "reserve" the address space,
- // which signifies an intent to actually allocate it. This determines whether
- // doing the allocation would put us over our limit. Once there is a
- // reservation, clients can do the allocation and register the result.
- //
- // We should always have:
- // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
- std::atomic<size_t> reserved_address_space_{0};
-
- // Used to protect access to the allocated address space counter and
- // allocation map. This is needed because Wasm memories can be freed on
- // another thread by the ArrayBufferTracker.
- base::Mutex mutex_;
-
- size_t allocated_address_space_ = 0;
-
- //////////////////////////////////////////////////////////////////////////////
- // Protected by {mutex_}:
-
- // Track Wasm memory allocation information. This is keyed by the start of the
- // buffer, rather than by the start of the allocation.
- std::unordered_map<const void*, AllocationData> allocations_;
-
- // Maps each buffer to the isolates that share the backing store.
- std::unordered_map<const void*, std::unordered_set<Isolate*>>
- isolates_per_buffer_;
-
- // Maps which isolates have had a grow interrupt handled on the buffer. This
- // is maintained to ensure that the instances are updated with the right size
- // on Grow.
- std::unordered_map<const void*, std::unordered_set<Isolate*>>
- isolates_updated_on_grow_;
-
- // Maps backing stores(void*) to the size of the underlying memory in
- // (size_t). An entry to this map is made on a grow call to the corresponding
- // backing store. On consecutive grow calls to the same backing store,
- // the size entry is updated. This entry is made right after the mprotect
- // call to change the protections on a backing_store, so the memory objects
- // have not been updated yet. The backing store entry in this map is erased
- // when all the memory objects, or instances that share this backing store
- // have their bounds updated.
- std::unordered_map<void*, size_t> grow_update_map_;
-
- // End of fields protected by {mutex_}.
- //////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
-};
-
-// Attempts to allocate an array buffer with guard regions suitable for trap
-// handling. If address space is not available, it will return a buffer with
-// mini-guards that will require bounds checks.
-V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*,
- size_t size);
-
-// Attempts to allocate a SharedArrayBuffer with guard regions suitable for
-// trap handling. If address space is not available, it will try to reserve
-// up to the maximum for that memory. If all else fails, it will return a
-// buffer with mini-guards of initial size.
-V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(
- Isolate*, size_t initial_size, size_t max_size);
-
-Handle<JSArrayBuffer> SetupArrayBuffer(
- Isolate*, void* backing_store, size_t size, bool is_external,
- SharedFlag shared = SharedFlag::kNotShared);
-
-V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool free_memory);
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_WASM_MEMORY_H_
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index d3874e1a34..0bbc104070 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -71,15 +71,15 @@ uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
}
void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
- EmitWithU32V(kExprGetLocal, local_index);
+ EmitWithU32V(kExprLocalGet, local_index);
}
void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
- EmitWithU32V(kExprSetLocal, local_index);
+ EmitWithU32V(kExprLocalSet, local_index);
}
void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
- EmitWithU32V(kExprTeeLocal, local_index);
+ EmitWithU32V(kExprLocalTee, local_index);
}
void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
@@ -505,7 +505,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_f64(global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex:
- buffer->write_u8(kExprGetGlobal);
+ buffer->write_u8(kExprGlobalGet);
buffer->write_u32v(global.init.val.global_index);
break;
case WasmInitExpr::kRefNullConst:
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 5a10368a8b..033f12ae24 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -22,6 +22,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-text.h"
namespace v8 {
namespace internal {
@@ -58,6 +59,57 @@ int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
return result;
}
+// static
+int GetWasmFunctionOffset(const WasmModule* module, uint32_t func_index) {
+ const std::vector<WasmFunction>& functions = module->functions;
+ if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
+ DCHECK_GE(kMaxInt, functions[func_index].code.offset());
+ return static_cast<int>(functions[func_index].code.offset());
+}
+
+// static
+int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
+ const std::vector<WasmFunction>& functions = module->functions;
+
+ // Binary search for a function containing the given position.
+ int left = 0; // inclusive
+ int right = static_cast<int>(functions.size()); // exclusive
+ if (right == 0) return false;
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ if (functions[mid].code.offset() <= byte_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // If the found function does not contains the given position, return -1.
+ const WasmFunction& func = functions[left];
+ if (byte_offset < func.code.offset() ||
+ byte_offset >= func.code.end_offset()) {
+ return -1;
+ }
+
+ return left;
+}
+
+// static
+v8::debug::WasmDisassembly DisassembleWasmFunction(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ int func_index) {
+ if (func_index < 0 ||
+ static_cast<uint32_t>(func_index) >= module->functions.size())
+ return {};
+
+ std::ostringstream disassembly_os;
+ v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+ PrintWasmText(module, wire_bytes, static_cast<uint32_t>(func_index),
+ disassembly_os, &offset_table);
+
+ return {disassembly_os.str(), std::move(offset_table)};
+}
+
void WasmModule::AddFunctionNameForTesting(int function_index,
WireBytesRef name) {
if (!function_names) {
@@ -475,21 +527,19 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
// Make a copy of the payload data in the section.
size_t size = section.payload.length();
- void* memory =
- size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
-
- if (size && !memory) {
+ MaybeHandle<JSArrayBuffer> result =
+ isolate->factory()->NewJSArrayBufferAndBackingStore(
+ size, InitializedFlag::kUninitialized);
+ Handle<JSArrayBuffer> array_buffer;
+ if (!result.ToHandle(&array_buffer)) {
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
- Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
- constexpr bool is_external = false;
- JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
- memcpy(memory, wire_bytes.begin() + section.payload.offset(),
+ memcpy(array_buffer->backing_store(),
+ wire_bytes.begin() + section.payload.offset(),
section.payload.length());
- matching_sections.push_back(buffer);
+ matching_sections.push_back(array_buffer);
}
int num_custom_sections = static_cast<int>(matching_sections.size());
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 69c57725de..79c3b23a33 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -16,9 +16,13 @@
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
+
+namespace debug {
+struct WasmDisassembly;
+}
+
namespace internal {
-class WasmDebugInfo;
class WasmModuleObject;
namespace wasm {
@@ -240,6 +244,25 @@ V8_EXPORT_PRIVATE int MaxNumExportWrappers(const WasmModule* module);
int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
bool is_import);
+// Return the byte offset of the function identified by the given index.
+// The offset will be relative to the start of the module bytes.
+// Returns -1 if the function index is invalid.
+int GetWasmFunctionOffset(const WasmModule* module, uint32_t func_index);
+
+// Returns the function containing the given byte offset.
+// Returns -1 if the byte offset is not contained in any function of this
+// module.
+int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset);
+
+// Compute the disassembly of a wasm function.
+// Returns the disassembly string and a list of <byte_offset, line, column>
+// entries, mapping wasm byte offsets to line and column in the disassembly.
+// The list is guaranteed to be ordered by the byte_offset.
+// Returns an empty string and empty vector if the function index is invalid.
+V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleWasmFunction(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ int func_index);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -290,15 +313,6 @@ struct WasmFunctionName {
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
-// Get the debug info associated with the given wasm object.
-// If no debug info exists yet, it is created automatically.
-Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
- Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
- ModuleOrigin origin, Handle<Script> asm_js_script,
- Vector<const byte> asm_offset_table);
-
V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
Handle<Context> context);
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 66d3a2716e..a7f74381ae 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -88,12 +88,8 @@ ACCESSORS(WasmModuleObject, managed_native_module, Managed<wasm::NativeModule>,
kNativeModuleOffset)
ACCESSORS(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset)
ACCESSORS(WasmModuleObject, script, Script, kScriptOffset)
-ACCESSORS(WasmModuleObject, weak_instance_list, WeakArrayList,
- kWeakInstanceListOffset)
OPTIONAL_ACCESSORS(WasmModuleObject, asm_js_offset_table, ByteArray,
kAsmJsOffsetTableOffset)
-OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
- kBreakPointInfosOffset)
wasm::NativeModule* WasmModuleObject::native_module() const {
return managed_native_module().raw();
}
@@ -102,13 +98,9 @@ WasmModuleObject::shared_native_module() const {
return managed_native_module().get();
}
const wasm::WasmModule* WasmModuleObject::module() const {
- // TODO(clemensh): Remove this helper (inline in callers).
+ // TODO(clemensb): Remove this helper (inline in callers).
return native_module()->module();
}
-void WasmModuleObject::reset_breakpoint_infos() {
- WRITE_FIELD(*this, kBreakPointInfosOffset,
- GetReadOnlyRoots().undefined_value());
-}
bool WasmModuleObject::is_asm_js() {
bool asm_js = is_asmjs_module(module());
DCHECK_EQ(asm_js, script().IsUserJavaScript());
@@ -309,6 +301,10 @@ ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset)
+// WasmExceptionPackage
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionPackage, JSReceiver)
+CAST_ACCESSOR(WasmExceptionPackage)
+
// WasmExportedFunction
WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmExportedFunction(*this));
@@ -382,6 +378,8 @@ ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
+ACCESSORS(WasmDebugInfo, interpreter_reference_stack, Cell,
+ kInterpreterReferenceStackOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
kCWasmEntriesOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index d9417943a8..14e682ce23 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -25,10 +25,8 @@
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-text.h"
#define TRACE(...) \
do { \
@@ -244,37 +242,40 @@ Handle<WasmModuleObject> WasmModuleObject::New(
isolate->factory()->NewJSObject(isolate->wasm_module_constructor()));
module_object->set_export_wrappers(*export_wrappers);
if (script->type() == Script::TYPE_WASM) {
- script->set_wasm_module_object(*module_object);
+ script->set_wasm_breakpoint_infos(
+ ReadOnlyRoots(isolate).empty_fixed_array());
+ script->set_wasm_managed_native_module(*managed_native_module);
+ script->set_wasm_weak_instance_list(
+ ReadOnlyRoots(isolate).empty_weak_array_list());
}
module_object->set_script(*script);
- module_object->set_weak_instance_list(
- ReadOnlyRoots(isolate).empty_weak_array_list());
module_object->set_managed_native_module(*managed_native_module);
return module_object;
}
-bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
- int* position,
+// static
+bool WasmModuleObject::SetBreakPoint(Handle<Script> script, int* position,
Handle<BreakPoint> break_point) {
- Isolate* isolate = module_object->GetIsolate();
+ Isolate* isolate = script->GetIsolate();
// Find the function for this breakpoint.
- int func_index = module_object->GetContainingFunction(*position);
+ const WasmModule* module = script->wasm_native_module()->module();
+ int func_index = GetContainingWasmFunction(module, *position);
if (func_index < 0) return false;
- const WasmFunction& func = module_object->module()->functions[func_index];
+ const WasmFunction& func = module->functions[func_index];
int offset_in_func = *position - func.code.offset();
// According to the current design, we should only be called with valid
// breakable positions.
- DCHECK(IsBreakablePosition(module_object->native_module(), func_index,
+ DCHECK(IsBreakablePosition(script->wasm_native_module(), func_index,
offset_in_func));
// Insert new break point into break_positions of module object.
- WasmModuleObject::AddBreakpoint(module_object, *position, break_point);
+ WasmModuleObject::AddBreakpointToInfo(script, *position, break_point);
- // Iterate over all instances of this module and tell them to set this new
- // breakpoint. We do this using the weak list of all instances.
- Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
+ // Iterate over all instances and tell them to set this new breakpoint.
+ // We do this using the weak list of all instances from the script.
+ Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
isolate);
for (int i = 0; i < weak_instance_list->length(); ++i) {
MaybeObject maybe_instance = weak_instance_list->Get(i);
@@ -291,6 +292,42 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
return true;
}
+// static
+bool WasmModuleObject::ClearBreakPoint(Handle<Script> script, int position,
+ Handle<BreakPoint> break_point) {
+ Isolate* isolate = script->GetIsolate();
+
+ // Find the function for this breakpoint.
+ const WasmModule* module = script->wasm_native_module()->module();
+ int func_index = GetContainingWasmFunction(module, position);
+ if (func_index < 0) return false;
+ const WasmFunction& func = module->functions[func_index];
+ int offset_in_func = position - func.code.offset();
+
+ if (!WasmModuleObject::RemoveBreakpointFromInfo(script, position,
+ break_point)) {
+ return false;
+ }
+
+ // Iterate over all instances and tell them to remove this breakpoint.
+ // We do this using the weak list of all instances from the script.
+ Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
+ isolate);
+ for (int i = 0; i < weak_instance_list->length(); ++i) {
+ MaybeObject maybe_instance = weak_instance_list->Get(i);
+ if (maybe_instance->IsWeak()) {
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
+ isolate);
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::ClearBreakpoint(debug_info, func_index, offset_in_func);
+ }
+ }
+
+ return true;
+}
+
namespace {
int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
@@ -323,17 +360,17 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
} // namespace
-void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
- int position,
- Handle<BreakPoint> break_point) {
- Isolate* isolate = module_object->GetIsolate();
+// static
+void WasmModuleObject::AddBreakpointToInfo(Handle<Script> script, int position,
+ Handle<BreakPoint> break_point) {
+ Isolate* isolate = script->GetIsolate();
Handle<FixedArray> breakpoint_infos;
- if (module_object->has_breakpoint_infos()) {
- breakpoint_infos = handle(module_object->breakpoint_infos(), isolate);
+ if (script->has_wasm_breakpoint_infos()) {
+ breakpoint_infos = handle(script->wasm_breakpoint_infos(), isolate);
} else {
breakpoint_infos =
isolate->factory()->NewFixedArray(4, AllocationType::kOld);
- module_object->set_breakpoint_infos(*breakpoint_infos);
+ script->set_wasm_breakpoint_infos(*breakpoint_infos);
}
int insert_pos =
@@ -357,7 +394,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
if (need_realloc) {
new_breakpoint_infos = isolate->factory()->NewFixedArray(
2 * breakpoint_infos->length(), AllocationType::kOld);
- module_object->set_breakpoint_infos(*new_breakpoint_infos);
+ script->set_wasm_breakpoint_infos(*new_breakpoint_infos);
// Copy over the entries [0, insert_pos).
for (int i = 0; i < insert_pos; ++i)
new_breakpoint_infos->set(i, breakpoint_infos->get(i));
@@ -379,16 +416,45 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
new_breakpoint_infos->set(insert_pos, *breakpoint_info);
}
+// static
+bool WasmModuleObject::RemoveBreakpointFromInfo(
+ Handle<Script> script, int position, Handle<BreakPoint> break_point) {
+ if (!script->has_wasm_breakpoint_infos()) return false;
+
+ Isolate* isolate = script->GetIsolate();
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
+
+ int pos = FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+
+ // Does a BreakPointInfo object already exist for this position?
+ if (pos == breakpoint_infos->length()) return false;
+
+ Handle<BreakPointInfo> info(BreakPointInfo::cast(breakpoint_infos->get(pos)),
+ isolate);
+ BreakPointInfo::ClearBreakPoint(isolate, info, break_point);
+
+ // Check if there are no more breakpoints at this location.
+ if (info->GetBreakPointCount(isolate) == 0) {
+ // Update array by moving breakpoints up one position.
+ for (int i = pos; i < breakpoint_infos->length() - 1; i++) {
+ Object entry = breakpoint_infos->get(i + 1);
+ breakpoint_infos->set(i, entry);
+ if (entry.IsUndefined(isolate)) break;
+ }
+ // Make sure last array element is empty as a result.
+ breakpoint_infos->set_undefined(breakpoint_infos->length() - 1);
+ }
+ return true;
+}
+
void WasmModuleObject::SetBreakpointsOnNewInstance(
- Handle<WasmModuleObject> module_object,
- Handle<WasmInstanceObject> instance) {
- if (!module_object->has_breakpoint_infos()) return;
- Isolate* isolate = module_object->GetIsolate();
+ Handle<Script> script, Handle<WasmInstanceObject> instance) {
+ if (!script->has_wasm_breakpoint_infos()) return;
+ Isolate* isolate = script->GetIsolate();
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
- Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(),
- isolate);
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
// If the array exists, it should not be empty.
DCHECK_LT(0, breakpoint_infos->length());
@@ -404,9 +470,10 @@ void WasmModuleObject::SetBreakpointsOnNewInstance(
int position = breakpoint_info->source_position();
// Find the function for this breakpoint, and set the breakpoint.
- int func_index = module_object->GetContainingFunction(position);
+ const WasmModule* module = script->wasm_native_module()->module();
+ int func_index = GetContainingWasmFunction(module, position);
DCHECK_LE(0, func_index);
- const WasmFunction& func = module_object->module()->functions[func_index];
+ const WasmFunction& func = module->functions[func_index];
int offset_in_func = position - func.code.offset();
WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
}
@@ -497,7 +564,7 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
if (module->origin == wasm::kWasmOrigin) {
// for non-asm.js modules, we just add the function's start offset
// to make a module-relative position.
- return byte_offset + module_object->GetFunctionOffset(func_index);
+ return byte_offset + GetWasmFunctionOffset(module, func_index);
}
// asm.js modules have an additional offset table that must be searched.
@@ -529,31 +596,15 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
return offset_table->get_int(kOTESize * left + idx);
}
-v8::debug::WasmDisassembly WasmModuleObject::DisassembleFunction(
- int func_index) {
- DisallowHeapAllocation no_gc;
-
- if (func_index < 0 ||
- static_cast<uint32_t>(func_index) >= module()->functions.size())
- return {};
-
- wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
-
- std::ostringstream disassembly_os;
- v8::debug::WasmDisassembly::OffsetTable offset_table;
-
- PrintWasmText(module(), wire_bytes, static_cast<uint32_t>(func_index),
- disassembly_os, &offset_table);
-
- return {disassembly_os.str(), std::move(offset_table)};
-}
-
+// static
bool WasmModuleObject::GetPossibleBreakpoints(
- const v8::debug::Location& start, const v8::debug::Location& end,
+ wasm::NativeModule* native_module, const v8::debug::Location& start,
+ const v8::debug::Location& end,
std::vector<v8::debug::BreakLocation>* locations) {
DisallowHeapAllocation no_gc;
- const std::vector<WasmFunction>& functions = module()->functions;
+ const std::vector<WasmFunction>& functions =
+ native_module->module()->functions;
if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
(!end.IsEmpty() &&
(end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
@@ -595,7 +646,7 @@ bool WasmModuleObject::GetPossibleBreakpoints(
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
- const byte* module_start = native_module()->wire_bytes().begin();
+ const byte* module_start = native_module->wire_bytes().begin();
for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
++func_idx) {
@@ -620,12 +671,12 @@ bool WasmModuleObject::GetPossibleBreakpoints(
return true;
}
+// static
MaybeHandle<FixedArray> WasmModuleObject::CheckBreakPoints(
- Isolate* isolate, Handle<WasmModuleObject> module_object, int position) {
- if (!module_object->has_breakpoint_infos()) return {};
+ Isolate* isolate, Handle<Script> script, int position) {
+ if (!script->has_wasm_breakpoint_infos()) return {};
- Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(),
- isolate);
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
int insert_pos =
FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
if (insert_pos >= breakpoint_infos->length()) return {};
@@ -709,60 +760,6 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
return Vector<const uint8_t>::cast(name);
}
-int WasmModuleObject::GetFunctionOffset(uint32_t func_index) {
- const std::vector<WasmFunction>& functions = module()->functions;
- if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
- DCHECK_GE(kMaxInt, functions[func_index].code.offset());
- return static_cast<int>(functions[func_index].code.offset());
-}
-
-int WasmModuleObject::GetContainingFunction(uint32_t byte_offset) {
- const std::vector<WasmFunction>& functions = module()->functions;
-
- // Binary search for a function containing the given position.
- int left = 0; // inclusive
- int right = static_cast<int>(functions.size()); // exclusive
- if (right == 0) return false;
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- if (functions[mid].code.offset() <= byte_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // If the found function does not contains the given position, return -1.
- const WasmFunction& func = functions[left];
- if (byte_offset < func.code.offset() ||
- byte_offset >= func.code.end_offset()) {
- return -1;
- }
-
- return left;
-}
-
-bool WasmModuleObject::GetPositionInfo(uint32_t position,
- Script::PositionInfo* info) {
- if (script().source_mapping_url().IsString()) {
- if (module()->functions.size() == 0) return false;
- info->line = 0;
- info->column = position;
- info->line_start = module()->functions[0].code.offset();
- info->line_end = module()->functions.back().code.end_offset();
- return true;
- }
- int func_index = GetContainingFunction(position);
- if (func_index < 0) return false;
-
- const WasmFunction& function = module()->functions[func_index];
-
- info->line = func_index;
- info->column = position - function.code.offset();
- info->line_start = function.code.offset();
- info->line_end = function.code.end_offset();
- return true;
-}
-
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
wasm::ValueType type,
uint32_t initial, bool has_maximum,
@@ -1217,66 +1214,17 @@ void WasmIndirectFunctionTable::Resize(Isolate* isolate,
}
namespace {
-bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
- size_t new_size) {
- if (new_size > old_buffer->allocation_length()) return false;
- void* old_mem_start = old_buffer->backing_store();
- size_t old_size = old_buffer->byte_length();
- if (old_size != new_size) {
- DCHECK_NOT_NULL(old_mem_start);
- DCHECK_GE(new_size, old_size);
- // If adjusting permissions fails, propagate error back to return
- // failure to grow.
- if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size,
- PageAllocator::kReadWrite)) {
- return false;
- }
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
- }
- return true;
-}
-MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> old_buffer,
- size_t new_size) {
- CHECK_EQ(0, new_size % wasm::kWasmPageSize);
- // Reusing the backing store from externalized buffers causes problems with
- // Blink's array buffers. The connection between the two is lost, which can
- // lead to Blink not knowing about the other reference to the buffer and
- // freeing it too early.
- if (old_buffer->is_external() || new_size > old_buffer->allocation_length()) {
- // We couldn't reuse the old backing store, so create a new one and copy the
- // old contents in.
- Handle<JSArrayBuffer> new_buffer;
- if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) {
- return {};
- }
- void* old_mem_start = old_buffer->backing_store();
- size_t old_size = old_buffer->byte_length();
- if (old_size == 0) return new_buffer;
- memcpy(new_buffer->backing_store(), old_mem_start, old_size);
- DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
- constexpr bool free_memory = true;
- i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
- return new_buffer;
- } else {
- if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {};
- // NOTE: We must allocate a new array buffer here because the spec
- // assumes that ArrayBuffers do not change size.
- void* backing_store = old_buffer->backing_store();
- bool is_external = old_buffer->is_external();
- // Disconnect buffer early so GC won't free it.
- i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
- Handle<JSArrayBuffer> new_buffer =
- wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
- return new_buffer;
- }
-}
-
-// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
+ bool is_wasm_module = instance->module()->origin == wasm::kWasmOrigin;
+ bool use_trap_handler =
+ instance->module_object().native_module()->use_trap_handler();
+ // Wasm modules compiled to use the trap handler don't have bounds checks,
+ // so they must have a memory that has guard regions.
+ CHECK_IMPLIES(is_wasm_module && use_trap_handler,
+ buffer->GetBackingStore()->has_guard_regions());
+
instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
buffer->byte_length());
#if DEBUG
@@ -1294,7 +1242,6 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
}
#endif
}
-
} // namespace
Handle<WasmMemoryObject> WasmMemoryObject::New(
@@ -1302,44 +1249,54 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
uint32_t maximum) {
Handle<JSArrayBuffer> buffer;
if (!maybe_buffer.ToHandle(&buffer)) {
- // If no buffer was provided, create a 0-length one.
- buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
+ // If no buffer was provided, create a zero-length one.
+ auto backing_store =
+ BackingStore::AllocateWasmMemory(isolate, 0, 0, SharedFlag::kNotShared);
+ buffer = isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
}
- // TODO(kschimpf): Do we need to add an argument that defines the
- // style of memory the user prefers (with/without trap handling), so
- // that the memory will match the style of the compiled wasm module.
- // See issue v8:7143
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor(), isolate);
- auto memory_obj = Handle<WasmMemoryObject>::cast(
+ auto memory_object = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld));
- memory_obj->set_array_buffer(*buffer);
- memory_obj->set_maximum_pages(maximum);
+ memory_object->set_array_buffer(*buffer);
+ memory_object->set_maximum_pages(maximum);
- return memory_obj;
+ if (buffer->is_shared()) {
+ auto backing_store = buffer->GetBackingStore();
+ backing_store->AttachSharedWasmMemoryObject(isolate, memory_object);
+ }
+
+ return memory_object;
}
MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
uint32_t initial,
uint32_t maximum,
- bool is_shared_memory) {
- Handle<JSArrayBuffer> buffer;
- size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
- static_cast<size_t>(initial);
- if (is_shared_memory) {
- size_t max_size = static_cast<size_t>(i::wasm::kWasmPageSize) *
- static_cast<size_t>(maximum);
- if (!i::wasm::NewSharedArrayBuffer(isolate, size, max_size)
- .ToHandle(&buffer)) {
- return {};
- }
- } else {
- if (!i::wasm::NewArrayBuffer(isolate, size).ToHandle(&buffer)) {
- return {};
- }
+ SharedFlag shared) {
+ auto heuristic_maximum = maximum;
+#ifdef V8_TARGET_ARCH_32_BIT
+ // TODO(wasm): use a better heuristic for reserving more than the initial
+ // number of pages on 32-bit systems. Being too greedy in reserving capacity
+ // limits the number of memories that can be allocated, causing OOMs in many
+ // tests. For now, on 32-bit we never reserve more than initial, unless the
+ // memory is shared.
+ if (shared == SharedFlag::kNotShared || !FLAG_wasm_grow_shared_memory) {
+ heuristic_maximum = initial;
}
+#endif
+
+ auto backing_store = BackingStore::AllocateWasmMemory(
+ isolate, initial, heuristic_maximum, shared);
+
+ if (!backing_store) return {};
+
+ Handle<JSArrayBuffer> buffer =
+ (shared == SharedFlag::kShared)
+ ? isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store))
+ : isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
+
return New(isolate, buffer, maximum);
}
@@ -1383,11 +1340,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
uint32_t pages) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
- if (old_buffer->is_shared() && !FLAG_wasm_grow_shared_memory) return -1;
- auto* memory_tracker = isolate->wasm_engine()->memory_tracker();
- if (!memory_tracker->IsWasmMemoryGrowable(old_buffer)) return -1;
+ // Any buffer used as an asmjs memory cannot be detached, and
+ // therefore this memory cannot be grown.
+ if (old_buffer->is_asmjs_memory()) return -1;
- // Checks for maximum memory size, compute new size.
+ // Checks for maximum memory size.
uint32_t maximum_pages = wasm::max_mem_pages();
if (memory_object->has_maximum_pages()) {
maximum_pages = std::min(
@@ -1402,47 +1359,49 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
(pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
return -1;
}
- size_t new_size =
- static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+ std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
+ if (!backing_store) return -1;
+
+ // Compute new size.
+ size_t new_pages = old_pages + pages;
+ size_t new_byte_length = new_pages * wasm::kWasmPageSize;
- // Memory is grown, but the memory objects and instances are not yet updated.
- // Handle this in the interrupt handler so that it's safe for all the isolates
- // that share this buffer to be updated safely.
- Handle<JSArrayBuffer> new_buffer;
+ // Try to handle shared memory first.
if (old_buffer->is_shared()) {
- // Adjust protections for the buffer.
- if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) {
- return -1;
- }
- void* backing_store = old_buffer->backing_store();
- if (memory_tracker->IsWasmSharedMemory(backing_store)) {
- // This memory is shared between different isolates.
- DCHECK(old_buffer->is_shared());
- // Update pending grow state, and trigger a grow interrupt on all the
- // isolates that share this buffer.
- memory_tracker->SetPendingUpdateOnGrow(old_buffer, new_size);
- // Handle interrupts for this isolate so that the instances with this
- // isolate are updated.
- isolate->stack_guard()->HandleInterrupts();
- // Failure to allocate, or adjust pemissions already handled here, and
- // updates to instances handled in the interrupt handler safe to return.
- return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
+ if (FLAG_wasm_grow_shared_memory) {
+ // Shared memories can only be grown in place; no copying.
+ if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
+ BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store,
+ new_pages);
+ // Broadcasting the update should update this memory object too.
+ CHECK_NE(*old_buffer, memory_object->array_buffer());
+ CHECK_EQ(new_byte_length, memory_object->array_buffer().byte_length());
+ return static_cast<int32_t>(old_pages); // success
+ }
}
- // SharedArrayBuffer, but not shared across isolates. Setup a new buffer
- // with updated permissions and update the instances.
- new_buffer =
- wasm::SetupArrayBuffer(isolate, backing_store, new_size,
- old_buffer->is_external(), SharedFlag::kShared);
+ return -1;
+ }
+
+ // Try to grow non-shared memory in-place.
+ if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
+ // Detach old and create a new one with the grown backing store.
+ old_buffer->Detach(true);
+ Handle<JSArrayBuffer> new_buffer =
+ isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
memory_object->update_instances(isolate, new_buffer);
- } else {
- if (!MemoryGrowBuffer(isolate, old_buffer, new_size)
- .ToHandle(&new_buffer)) {
- return -1;
- }
+ return static_cast<int32_t>(old_pages); // success
}
- // Update instances if any.
+ // Try allocating a new backing store and copying.
+ std::unique_ptr<BackingStore> new_backing_store =
+ backing_store->CopyWasmMemory(isolate, new_pages);
+ if (!new_backing_store) return -1;
+
+ // Detach old and create a new one with the new backing store.
+ old_buffer->Detach(true);
+ Handle<JSArrayBuffer> new_buffer =
+ isolate->factory()->NewJSArrayBuffer(std::move(new_backing_store));
memory_object->update_instances(isolate, new_buffer);
- return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
+ return static_cast<int32_t>(old_pages); // success
}
// static
@@ -1476,18 +1435,15 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_tagged_buffer(*tagged_buffer);
} else {
DCHECK(maybe_tagged_buffer.is_null());
- Handle<JSArrayBuffer> untagged_buffer;
uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
+
+ Handle<JSArrayBuffer> untagged_buffer;
if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
- // If no buffer was provided, create one long enough for the given type.
- untagged_buffer = isolate->factory()->NewJSArrayBuffer(
- SharedFlag::kNotShared, AllocationType::kOld);
-
- const bool initialize = true;
- if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate,
- type_size, initialize)) {
- return {};
- }
+ MaybeHandle<JSArrayBuffer> result =
+ isolate->factory()->NewJSArrayBufferAndBackingStore(
+ offset + type_size, InitializedFlag::kZeroInitialized);
+
+ if (!result.ToHandle(&untagged_buffer)) return {};
}
// Check that the offset is in bounds.
@@ -1725,13 +1681,16 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
- // Insert the new instance into the modules weak list of instances.
+ // Insert the new instance into the scripts weak list of instances. This list
+ // is used for breakpoints affecting all instances belonging to the script.
// TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below.
- Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
- isolate);
- weak_instance_list = WeakArrayList::AddToEnd(
- isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
- module_object->set_weak_instance_list(*weak_instance_list);
+ if (module_object->script().type() == Script::TYPE_WASM) {
+ Handle<WeakArrayList> weak_instance_list(
+ module_object->script().wasm_weak_instance_list(), isolate);
+ weak_instance_list = WeakArrayList::AddToEnd(
+ isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
+ module_object->script().set_wasm_weak_instance_list(*weak_instance_list);
+ }
InitDataSegmentArrays(instance, module_object);
InitElemSegmentArrays(instance, module_object);
@@ -2040,7 +1999,7 @@ bool WasmCapiFunction::IsSignatureEqual(const wasm::FunctionSig* sig) const {
}
// static
-Handle<JSReceiver> WasmExceptionPackage::New(
+Handle<WasmExceptionPackage> WasmExceptionPackage::New(
Isolate* isolate, Handle<WasmExceptionTag> exception_tag, int size) {
Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
MessageTemplate::kWasmExceptionError);
@@ -2055,37 +2014,31 @@ Handle<JSReceiver> WasmExceptionPackage::New(
values, StoreOrigin::kMaybeKeyed,
Just(ShouldThrow::kThrowOnError))
.is_null());
- return Handle<JSReceiver>::cast(exception);
+ return Handle<WasmExceptionPackage>::cast(exception);
}
// static
Handle<Object> WasmExceptionPackage::GetExceptionTag(
- Isolate* isolate, Handle<Object> exception_object) {
- if (exception_object->IsJSReceiver()) {
- Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object);
- Handle<Object> tag;
- if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->wasm_exception_tag_symbol())
- .ToHandle(&tag)) {
- return tag;
- }
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package) {
+ Handle<Object> tag;
+ if (JSReceiver::GetProperty(isolate, exception_package,
+ isolate->factory()->wasm_exception_tag_symbol())
+ .ToHandle(&tag)) {
+ return tag;
}
return ReadOnlyRoots(isolate).undefined_value_handle();
}
// static
Handle<Object> WasmExceptionPackage::GetExceptionValues(
- Isolate* isolate, Handle<Object> exception_object) {
- if (exception_object->IsJSReceiver()) {
- Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object);
- Handle<Object> values;
- if (JSReceiver::GetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol())
- .ToHandle(&values)) {
- DCHECK(values->IsFixedArray());
- return values;
- }
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package) {
+ Handle<Object> values;
+ if (JSReceiver::GetProperty(
+ isolate, exception_package,
+ isolate->factory()->wasm_exception_values_symbol())
+ .ToHandle(&values)) {
+ DCHECK(values->IsFixedArray());
+ return values;
}
return ReadOnlyRoots(isolate).undefined_value_handle();
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index c198a9bc63..23c13c4329 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -5,13 +5,13 @@
#ifndef V8_WASM_WASM_OBJECTS_H_
#define V8_WASM_WASM_OBJECTS_H_
+#include <memory>
+
#include "src/base/bits.h"
#include "src/codegen/signature.h"
#include "src/debug/debug.h"
-#include "src/debug/interface-types.h"
#include "src/heap/heap.h"
#include "src/objects/objects.h"
-#include "src/objects/script.h"
#include "src/wasm/value-type.h"
// Has to be the last include (doesn't have include guards)
@@ -47,6 +47,8 @@ class WasmJSFunction;
class WasmModuleObject;
class WasmIndirectFunctionTable;
+enum class SharedFlag : uint8_t;
+
template <class CppType>
class Managed;
@@ -124,14 +126,11 @@ class WasmModuleObject : public JSObject {
DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
DECL_ACCESSORS(export_wrappers, FixedArray)
DECL_ACCESSORS(script, Script)
- DECL_ACCESSORS(weak_instance_list, WeakArrayList)
DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
- DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
inline wasm::NativeModule* native_module() const;
inline const std::shared_ptr<wasm::NativeModule>& shared_native_module()
const;
inline const wasm::WasmModule* module() const;
- inline void reset_breakpoint_infos();
// Dispatched behavior.
DECL_PRINTER(WasmModuleObject)
@@ -153,23 +152,28 @@ class WasmModuleObject : public JSObject {
Handle<Script> script, Handle<FixedArray> export_wrappers,
size_t code_size_estimate);
+ // TODO(mstarzinger): The below breakpoint handling methods taking a {Script}
+ // instead of a {WasmModuleObject} as first argument should be moved onto a
+ // separate {WasmScript} class, implementation move to wasm-debug.cc then.
+
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
// The passed position might be modified to point to the next breakable
// location inside the same function.
// If it points outside a function, or behind the last breakable location,
// this function returns false and does not set any breakpoint.
- V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<WasmModuleObject>,
- int* position,
+ V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<Script>, int* position,
Handle<BreakPoint> break_point);
+ // Remove a previously set breakpoint at the given byte position inside the
+ // given module. If this breakpoint is not found this function returns false.
+ V8_EXPORT_PRIVATE static bool ClearBreakPoint(Handle<Script>, int position,
+ Handle<BreakPoint> break_point);
+
// Check whether this module was generated from asm.js source.
inline bool is_asm_js();
- static void AddBreakpoint(Handle<WasmModuleObject>, int position,
- Handle<BreakPoint> break_point);
-
- static void SetBreakpointsOnNewInstance(Handle<WasmModuleObject>,
+ static void SetBreakpointsOnNewInstance(Handle<Script>,
Handle<WasmInstanceObject>);
// Get the module name, if set. Returns an empty handle otherwise.
@@ -195,34 +199,12 @@ class WasmModuleObject : public JSObject {
// Does not allocate, hence gc-safe.
Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
- // Return the byte offset of the function identified by the given index.
- // The offset will be relative to the start of the module bytes.
- // Returns -1 if the function index is invalid.
- int GetFunctionOffset(uint32_t func_index);
-
- // Returns the function containing the given byte offset.
- // Returns -1 if the byte offset is not contained in any function of this
- // module.
- int GetContainingFunction(uint32_t byte_offset);
-
- // Translate from byte offset in the module to function number and byte offset
- // within that function, encoded as line and column in the position info.
- // Returns true if the position is valid inside this module, false otherwise.
- bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
-
// Get the source position from a given function index and byte offset,
// for either asm.js or pure Wasm modules.
static int GetSourcePosition(Handle<WasmModuleObject>, uint32_t func_index,
uint32_t byte_offset,
bool is_at_number_conversion);
- // Compute the disassembly of a wasm function.
- // Returns the disassembly string and a list of <byte_offset, line, column>
- // entries, mapping wasm byte offsets to line and column in the disassembly.
- // The list is guaranteed to be ordered by the byte_offset.
- // Returns an empty string and empty vector if the function index is invalid.
- V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleFunction(int func_index);
-
// Extract a portion of the wire bytes as UTF-8 string.
// Returns a null handle if the respective bytes do not form a valid UTF-8
// string.
@@ -233,17 +215,24 @@ class WasmModuleObject : public JSObject {
wasm::WireBytesRef ref);
// Get a list of all possible breakpoints within a given range of this module.
- V8_EXPORT_PRIVATE bool GetPossibleBreakpoints(
- const debug::Location& start, const debug::Location& end,
- std::vector<debug::BreakLocation>* locations);
+ V8_EXPORT_PRIVATE static bool GetPossibleBreakpoints(
+ wasm::NativeModule* native_module, const debug::Location& start,
+ const debug::Location& end, std::vector<debug::BreakLocation>* locations);
// Return an empty handle if no breakpoint is hit at that location, or a
// FixedArray with all hit breakpoint objects.
- static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
- Handle<WasmModuleObject>,
+ static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*, Handle<Script>,
int position);
OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject);
+
+ private:
+ // Helper functions that update the breakpoint info list.
+ static void AddBreakpointToInfo(Handle<Script>, int position,
+ Handle<BreakPoint> break_point);
+
+ static bool RemoveBreakpointFromInfo(Handle<Script>, int position,
+ Handle<BreakPoint> break_point);
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -354,9 +343,10 @@ class WasmMemoryObject : public JSObject {
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
- V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
- Isolate* isolate, uint32_t initial, uint32_t maximum,
- bool is_shared_memory);
+ V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(Isolate* isolate,
+ uint32_t initial,
+ uint32_t maximum,
+ SharedFlag shared);
void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer);
@@ -645,20 +635,22 @@ class WasmExceptionObject : public JSObject {
// A Wasm exception that has been thrown out of Wasm code.
class WasmExceptionPackage : public JSReceiver {
public:
- // TODO(mstarzinger): Ideally this interface would use {WasmExceptionPackage}
- // instead of {JSReceiver} throughout. For now a type-check implies doing a
- // property lookup however, which would result in casts being handlified.
- static Handle<JSReceiver> New(Isolate* isolate,
- Handle<WasmExceptionTag> exception_tag,
- int encoded_size);
+ static Handle<WasmExceptionPackage> New(
+ Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
+ int encoded_size);
// The below getters return {undefined} in case the given exception package
// does not carry the requested values (i.e. is of a different type).
- static Handle<Object> GetExceptionTag(Isolate*, Handle<Object> exception);
- static Handle<Object> GetExceptionValues(Isolate*, Handle<Object> exception);
+ static Handle<Object> GetExceptionTag(
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package);
+ static Handle<Object> GetExceptionValues(
+ Isolate* isolate, Handle<WasmExceptionPackage> exception_package);
// Determines the size of the array holding all encoded exception values.
static uint32_t GetEncodedSize(const wasm::WasmException* exception);
+
+ DECL_CAST(WasmExceptionPackage)
+ OBJECT_CONSTRUCTORS(WasmExceptionPackage, JSReceiver);
};
// A Wasm function that is wrapped and exported to JavaScript.
@@ -801,7 +793,7 @@ class WasmExportedFunctionData : public Struct {
DECL_PRINTER(WasmExportedFunctionData)
DECL_VERIFIER(WasmExportedFunctionData)
-// Layout description.
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
HeapObject::kHeaderSize,
TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS)
@@ -828,7 +820,7 @@ class WasmJSFunctionData : public Struct {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_JSFUNCTION_DATA_FIELDS)
+ TORQUE_GENERATED_WASM_JS_FUNCTION_DATA_FIELDS)
OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
};
@@ -838,6 +830,7 @@ class WasmDebugInfo : public Struct {
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined
+ DECL_ACCESSORS(interpreter_reference_stack, Cell)
DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
@@ -848,7 +841,7 @@ class WasmDebugInfo : public Struct {
DECL_PRINTER(WasmDebugInfo)
DECL_VERIFIER(WasmDebugInfo)
-// Layout description.
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_WASM_DEBUG_INFO_FIELDS)
@@ -867,6 +860,11 @@ class WasmDebugInfo : public Struct {
V8_EXPORT_PRIVATE static void SetBreakpoint(Handle<WasmDebugInfo>,
int func_index, int offset);
+ // Clear a previously set breakpoint in the given function at the given byte
+ // offset within that function.
+ V8_EXPORT_PRIVATE static void ClearBreakpoint(Handle<WasmDebugInfo>,
+ int func_index, int offset);
+
// Make a set of functions always execute in the interpreter without setting
// breakpoints.
V8_EXPORT_PRIVATE static void RedirectToInterpreter(Handle<WasmDebugInfo>,
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 879da1445b..3bd76ae43b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -147,11 +147,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(SelectWithType, "select")
- CASE_OP(GetLocal, "local.get")
- CASE_OP(SetLocal, "local.set")
- CASE_OP(TeeLocal, "local.tee")
- CASE_OP(GetGlobal, "global.get")
- CASE_OP(SetGlobal, "global.set")
+ CASE_OP(LocalGet, "local.get")
+ CASE_OP(LocalSet, "local.set")
+ CASE_OP(LocalTee, "local.tee")
+ CASE_OP(GlobalGet, "global.get")
+ CASE_OP(GlobalSet, "global.set")
CASE_OP(TableGet, "table.get")
CASE_OP(TableSet, "table.set")
CASE_ALL_OP(Const, "const")
@@ -222,6 +222,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
CASE_F64x2_OP(Neg, "neg")
+ CASE_F64x2_OP(Sqrt, "sqrt")
+ CASE_F32x4_OP(Sqrt, "sqrt")
CASE_I64x2_OP(Neg, "neg")
CASE_SIMD_OP(Eq, "eq")
CASE_F64x2_OP(Eq, "eq")
@@ -272,7 +274,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_F32x4_OP(ReplaceLane, "replace_lane")
CASE_I64x2_OP(ExtractLane, "extract_lane")
CASE_I64x2_OP(ReplaceLane, "replace_lane")
- CASE_SIMDI_OP(ExtractLane, "extract_lane")
+ CASE_I32x4_OP(ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
CASE_SIGN_OP(I64x2, Min, "min")
@@ -302,6 +306,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Xor, "xor")
CASE_S128_OP(Not, "not")
CASE_S128_OP(Select, "select")
+ CASE_S8x16_OP(Swizzle, "swizzle")
CASE_S8x16_OP(Shuffle, "shuffle")
CASE_S1x2_OP(AnyTrue, "any_true")
CASE_S1x2_OP(AllTrue, "all_true")
@@ -311,6 +316,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x8_OP(AllTrue, "all_true")
CASE_S1x16_OP(AnyTrue, "any_true")
CASE_S1x16_OP(AllTrue, "all_true")
+ CASE_F64x2_OP(Qfma, "qfma")
+ CASE_F64x2_OP(Qfms, "qfms")
+ CASE_F32x4_OP(Qfma, "qfma")
+ CASE_F32x4_OP(Qfms, "qfms")
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
@@ -489,7 +498,7 @@ constexpr const FunctionSig* kCachedSigs[] = {
// gcc 4.7 - 4.9 has a bug which causes the constexpr attribute to get lost when
// passing functions (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892). Hence
// encapsulate these constexpr functions in functors.
-// TODO(clemensh): Remove this once we require gcc >= 5.0.
+// TODO(clemensb): Remove this once we require gcc >= 5.0.
struct GetShortOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 0b19d7452c..f37f7f0520 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -48,11 +48,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(Drop, 0x1a, _) \
V(Select, 0x1b, _) \
V(SelectWithType, 0x1c, _) \
- V(GetLocal, 0x20, _) \
- V(SetLocal, 0x21, _) \
- V(TeeLocal, 0x22, _) \
- V(GetGlobal, 0x23, _) \
- V(SetGlobal, 0x24, _) \
+ V(LocalGet, 0x20, _) \
+ V(LocalSet, 0x21, _) \
+ V(LocalTee, 0x22, _) \
+ V(GlobalGet, 0x23, _) \
+ V(GlobalSet, 0x24, _) \
V(TableGet, 0x25, _) \
V(TableSet, 0x26, _) \
V(I32Const, 0x41, _) \
@@ -396,8 +396,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64x2MaxU, 0xfd91, s_ss) \
V(F32x4Abs, 0xfd95, s_s) \
V(F32x4Neg, 0xfd96, s_s) \
- V(F32x4RecipApprox, 0xfd98, s_s) \
- V(F32x4RecipSqrtApprox, 0xfd99, s_s) \
+ V(F32x4Sqrt, 0xfd97, s_s) \
+ V(F32x4Qfma, 0xfd98, s_sss) \
+ V(F32x4Qfms, 0xfd99, s_sss) \
V(F32x4Add, 0xfd9a, s_ss) \
V(F32x4Sub, 0xfd9b, s_ss) \
V(F32x4Mul, 0xfd9c, s_ss) \
@@ -406,6 +407,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32x4Max, 0xfd9f, s_ss) \
V(F64x2Abs, 0xfda0, s_s) \
V(F64x2Neg, 0xfda1, s_s) \
+ V(F64x2Sqrt, 0xfda2, s_s) \
+ V(F64x2Qfma, 0xfda3, s_sss) \
+ V(F64x2Qfms, 0xfda4, s_sss) \
V(F64x2Add, 0xfda5, s_ss) \
V(F64x2Sub, 0xfda6, s_ss) \
V(F64x2Mul, 0xfda7, s_ss) \
@@ -416,6 +420,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I32x4UConvertF32x4, 0xfdac, s_s) \
V(F32x4SConvertI32x4, 0xfdaf, s_s) \
V(F32x4UConvertI32x4, 0xfdb0, s_s) \
+ V(S8x16Swizzle, 0xfdc0, s_ss) \
V(I8x16SConvertI16x8, 0xfdc6, s_ss) \
V(I8x16UConvertI16x8, 0xfdc7, s_ss) \
V(I16x8SConvertI32x4, 0xfdc8, s_ss) \
@@ -430,11 +435,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I32x4UConvertI16x8High, 0xfdd1, s_s) \
V(I16x8AddHoriz, 0xfdbd, s_ss) \
V(I32x4AddHoriz, 0xfdbe, s_ss) \
- V(F32x4AddHoriz, 0xfdbf, s_ss)
+ V(F32x4AddHoriz, 0xfdbf, s_ss) \
+ V(F32x4RecipApprox, 0xfde0, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfde1, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
- V(I8x16ExtractLane, 0xfd05, _) \
- V(I16x8ExtractLane, 0xfd09, _) \
+ V(I8x16ExtractLaneS, 0xfd05, _) \
+ V(I8x16ExtractLaneU, 0xfd06, _) \
+ V(I16x8ExtractLaneS, 0xfd09, _) \
+ V(I16x8ExtractLaneU, 0xfd0a, _) \
V(I32x4ExtractLane, 0xfd0d, _) \
V(I64x2ExtractLane, 0xfd10, _) \
V(F32x4ExtractLane, 0xfd13, _) \
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 81460b9fe2..f1fa76b98a 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -289,9 +289,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
Vector<WasmCode* const> code_table_;
bool write_called_;
- // Reverse lookup tables for embedded addresses.
- std::map<Address, uint32_t> wasm_stub_targets_lookup_;
-
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
@@ -301,11 +298,6 @@ NativeModuleSerializer::NativeModuleSerializer(
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
- for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Address addr = native_module_->runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(i));
- wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
- }
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
@@ -367,7 +359,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->WriteVector(code->source_positions());
writer->WriteVector(Vector<byte>::cast(code->protected_instructions()));
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390X
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
std::unique_ptr<byte[]> aligned_buffer;
@@ -400,10 +392,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::WASM_STUB_CALL: {
- Address orig_target = orig_iter.rinfo()->wasm_stub_call_address();
- auto stub_iter = wasm_stub_targets_lookup_.find(orig_target);
- DCHECK(stub_iter != wasm_stub_targets_lookup_.end());
- uint32_t tag = stub_iter->second;
+ Address target = orig_iter.rinfo()->wasm_stub_call_address();
+ uint32_t tag = native_module_->GetRuntimeStubId(target);
+ DCHECK_GT(WasmCode::kRuntimeStubCount, tag);
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::EXTERNAL_REFERENCE: {
@@ -550,6 +541,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ auto jump_tables_ref =
+ native_module_->FindJumpTablesForCode(code->instruction_start());
for (RelocIterator iter(code->instructions(), code->reloc_info(),
code->constant_pool(), mask);
!iter.done(); iter.next()) {
@@ -557,15 +550,16 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
switch (mode) {
case RelocInfo::WASM_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
- Address target = native_module_->GetCallTargetForFunction(tag);
+ Address target =
+ native_module_->GetNearCallTargetForFunction(tag, jump_tables_ref);
iter.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
case RelocInfo::WASM_STUB_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
DCHECK_LT(tag, WasmCode::kRuntimeStubCount);
- Address target = native_module_->runtime_stub_entry(
- static_cast<WasmCode::RuntimeStubId>(tag));
+ Address target = native_module_->GetNearRuntimeStubEntry(
+ static_cast<WasmCode::RuntimeStubId>(tag), jump_tables_ref);
iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
@@ -628,7 +622,6 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
auto shared_native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled_features, std::move(decode_result.value()));
shared_native_module->SetWireBytes(OwnedVector<uint8_t>::Of(wire_bytes_vec));
- shared_native_module->SetRuntimeStubs(isolate);
Handle<FixedArray> export_wrappers;
CompileJsToWasmWrappers(isolate, shared_native_module->module(),
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 44abd71445..fedd37ccd3 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -154,9 +154,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
- case kExprGetLocal:
- case kExprSetLocal:
- case kExprTeeLocal: {
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
@@ -166,8 +166,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
- case kExprGetGlobal:
- case kExprSetGlobal: {
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
@@ -304,8 +304,10 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
break;
}
- case kExprI8x16ExtractLane:
- case kExprI16x8ExtractLane:
+ case kExprI8x16ExtractLaneS:
+ case kExprI8x16ExtractLaneU:
+ case kExprI16x8ExtractLaneS:
+ case kExprI16x8ExtractLaneU:
case kExprI32x4ExtractLane:
case kExprI64x2ExtractLane:
case kExprF32x4ExtractLane: