summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-12-05 16:41:55 +0100
committerMichaël Zasso <targos@protonmail.com>2017-12-06 12:52:07 +0100
commit1854ba04e9a68f062beb299dd6e1479279b26363 (patch)
treed5b2df9b8c1deb6388f7a728fca8e1c98c779abe /deps/v8/src/wasm
parentb52c23b75f96e1c9d2c7b3a7e5619170d0a0d8e1 (diff)
downloadandroid-node-v8-1854ba04e9a68f062beb299dd6e1479279b26363.tar.gz
android-node-v8-1854ba04e9a68f062beb299dd6e1479279b26363.tar.bz2
android-node-v8-1854ba04e9a68f062beb299dd6e1479279b26363.zip
deps: update V8 to 6.3.292.46
PR-URL: https://github.com/nodejs/node/pull/16271 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/compilation-manager.cc29
-rw-r--r--deps/v8/src/wasm/compilation-manager.h12
-rw-r--r--deps/v8/src/wasm/decoder.h27
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h707
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc400
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h12
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc20
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc49
-rw-r--r--deps/v8/src/wasm/memory-tracing.h28
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1441
-rw-r--r--deps/v8/src/wasm/module-compiler.h429
-rw-r--r--deps/v8/src/wasm/module-decoder.cc158
-rw-r--r--deps/v8/src/wasm/module-decoder.h39
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc234
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h135
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc118
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h10
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc102
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc5
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h3
-rw-r--r--deps/v8/src/wasm/wasm-heap.cc101
-rw-r--r--deps/v8/src/wasm/wasm-heap.h66
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc156
-rw-r--r--deps/v8/src/wasm/wasm-js.cc140
-rw-r--r--deps/v8/src/wasm/wasm-js.h3
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc134
-rw-r--r--deps/v8/src/wasm/wasm-memory.h32
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc14
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h3
-rw-r--r--deps/v8/src/wasm/wasm-module.cc865
-rw-r--r--deps/v8/src/wasm/wasm-module.h195
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h210
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc382
-rw-r--r--deps/v8/src/wasm/wasm-objects.h253
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc38
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h40
-rw-r--r--deps/v8/src/wasm/wasm-result.h9
-rw-r--r--deps/v8/src/wasm/wasm-text.cc46
38 files changed, 3953 insertions, 2692 deletions
diff --git a/deps/v8/src/wasm/compilation-manager.cc b/deps/v8/src/wasm/compilation-manager.cc
index 01e0755e14..a19a228f1f 100644
--- a/deps/v8/src/wasm/compilation-manager.cc
+++ b/deps/v8/src/wasm/compilation-manager.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/wasm/compilation-manager.h"
+#include "src/base/template-utils.h"
#include "src/objects-inl.h"
@@ -10,19 +11,37 @@ namespace v8 {
namespace internal {
namespace wasm {
-void CompilationManager::StartAsyncCompileJob(
+AsyncCompileJob* CompilationManager::CreateAsyncCompileJob(
Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, Handle<JSPromise> promise) {
std::shared_ptr<AsyncCompileJob> job(new AsyncCompileJob(
isolate, std::move(bytes_copy), length, context, promise));
jobs_.insert({job.get(), job});
+ return job.get();
+}
+
+void CompilationManager::StartAsyncCompileJob(
+ Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context, Handle<JSPromise> promise) {
+ AsyncCompileJob* job = CreateAsyncCompileJob(isolate, std::move(bytes_copy),
+ length, context, promise);
job->Start();
}
-void CompilationManager::RemoveJob(AsyncCompileJob* job) {
- size_t num_removed = jobs_.erase(job);
- USE(num_removed);
- DCHECK_EQ(1, num_removed);
+std::shared_ptr<StreamingDecoder> CompilationManager::StartStreamingCompilation(
+ Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise) {
+ AsyncCompileJob* job = CreateAsyncCompileJob(
+ isolate, std::unique_ptr<byte[]>(nullptr), 0, context, promise);
+ return job->CreateStreamingDecoder();
+}
+
+std::shared_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
+ AsyncCompileJob* job) {
+ auto item = jobs_.find(job);
+ DCHECK(item != jobs_.end());
+ std::shared_ptr<AsyncCompileJob> result = std::move(item->second);
+ jobs_.erase(item);
+ return result;
}
void CompilationManager::TearDown() { jobs_.clear(); }
diff --git a/deps/v8/src/wasm/compilation-manager.h b/deps/v8/src/wasm/compilation-manager.h
index 85b6fd5ce2..e359b11c26 100644
--- a/deps/v8/src/wasm/compilation-manager.h
+++ b/deps/v8/src/wasm/compilation-manager.h
@@ -26,12 +26,20 @@ class CompilationManager {
std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, Handle<JSPromise> promise);
- // Removes {job} from the list of active compile jobs. This will delete {job}.
- void RemoveJob(AsyncCompileJob* job);
+ std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
+ Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
+
+ // Removes {job} from the list of active compile jobs.
+ std::shared_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
void TearDown();
private:
+ AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
+ std::unique_ptr<byte[]> bytes_copy,
+ size_t length, Handle<Context> context,
+ Handle<JSPromise> promise);
+
// We use an AsyncCompileJob as the key for itself so that we can delete the
// job from the map when it is finished.
std::unordered_map<AsyncCompileJob*, std::shared_ptr<AsyncCompileJob>> jobs_;
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 9be5b1aedc..87373100f5 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -33,6 +33,12 @@ namespace wasm {
#define TRACE_IF(...)
#endif
+// A {DecodeResult} only stores the failure / success status, but no data. Thus
+// we use {nullptr_t} as data value, such that the only valid data stored in
+// this type is a nullptr.
+// Storing {void} would require template specialization.
+using DecodeResult = Result<std::nullptr_t>;
+
// A helper utility to decode bytes, integers, fields, varints, etc, from
// a buffer of bytes.
class Decoder {
@@ -355,6 +361,27 @@ class Decoder {
}
};
+// Reference to a string in the wire bytes.
+class WireBytesRef {
+ public:
+ WireBytesRef() : WireBytesRef(0, 0) {}
+ WireBytesRef(uint32_t offset, uint32_t length)
+ : offset_(offset), length_(length) {
+ DCHECK_IMPLIES(offset_ == 0, length_ == 0);
+ DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t length() const { return length_; }
+ uint32_t end_offset() const { return offset_ + length_; }
+ bool is_empty() const { return length_ == 0; }
+ bool is_set() const { return offset_ != 0; }
+
+ private:
+ uint32_t offset_;
+ uint32_t length_;
+};
+
#undef TRACE
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 1a7278c78e..de17401752 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -31,36 +31,16 @@ struct WasmException;
#define TRACE(...)
#endif
-// Return the evaluation of `condition` if validate==true, DCHECK
-// and always return true otherwise.
+// Return the evaluation of `condition` if validate==true, DCHECK that it's
+// true and always return true otherwise.
#define VALIDATE(condition) \
(validate ? (condition) : [&] { \
DCHECK(condition); \
return true; \
}())
-// Return the evaluation of `condition` if validate==true, DCHECK that it's
-// false and always return false otherwise.
-#define CHECK_ERROR(condition) \
- (validate ? (condition) : [&] { \
- DCHECK(!(condition)); \
- return false; \
- }())
-
-// Use this macro to check a condition if checked == true, and DCHECK the
-// condition otherwise.
-// TODO(clemensh): Rename all "checked" to "validate" and replace
-// "CHECKED_COND" with "CHECK_ERROR".
-#define CHECKED_COND(cond) \
- (checked ? (cond) : ([&] { \
- DCHECK(cond); \
- return true; \
- })())
-
#define CHECK_PROTOTYPE_OPCODE(flag) \
- if (this->module_ != nullptr && this->module_->is_asm_js()) { \
- this->error("Opcode not supported for asmjs modules"); \
- } \
+ DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
break; \
@@ -70,75 +50,106 @@ struct WasmException;
(this->errorf(this->pc_, "%s: %s", WasmOpcodes::OpcodeName(opcode), \
(message)))
+#define ATOMIC_OP_LIST(V) \
+ V(I32AtomicLoad, Uint32) \
+ V(I32AtomicAdd, Uint32) \
+ V(I32AtomicSub, Uint32) \
+ V(I32AtomicAnd, Uint32) \
+ V(I32AtomicOr, Uint32) \
+ V(I32AtomicXor, Uint32) \
+ V(I32AtomicExchange, Uint32) \
+ V(I32AtomicLoad8U, Uint8) \
+ V(I32AtomicAdd8U, Uint8) \
+ V(I32AtomicSub8U, Uint8) \
+ V(I32AtomicAnd8U, Uint8) \
+ V(I32AtomicOr8U, Uint8) \
+ V(I32AtomicXor8U, Uint8) \
+ V(I32AtomicExchange8U, Uint8) \
+ V(I32AtomicLoad16U, Uint16) \
+ V(I32AtomicAdd16U, Uint16) \
+ V(I32AtomicSub16U, Uint16) \
+ V(I32AtomicAnd16U, Uint16) \
+ V(I32AtomicOr16U, Uint16) \
+ V(I32AtomicXor16U, Uint16) \
+ V(I32AtomicExchange16U, Uint16) \
+ V(I32AtomicCompareExchange, Uint32) \
+ V(I32AtomicCompareExchange8U, Uint8) \
+ V(I32AtomicCompareExchange16U, Uint16)
+
+#define ATOMIC_STORE_OP_LIST(V) \
+ V(I32AtomicStore, Uint32) \
+ V(I32AtomicStore8U, Uint8) \
+ V(I32AtomicStore16U, Uint16)
+
template <typename T>
Vector<T> vec2vec(std::vector<T>& vec) {
return Vector<T>(vec.data(), vec.size());
}
// Helpers for decoding different kinds of operands which follow bytecodes.
-template <bool checked>
+template <bool validate>
struct LocalIndexOperand {
uint32_t index;
ValueType type = kWasmStmt;
unsigned length;
inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "local index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "local index");
}
};
-template <bool checked>
+template <bool validate>
struct ExceptionIndexOperand {
uint32_t index;
const WasmException* exception = nullptr;
unsigned length;
inline ExceptionIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "exception index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "exception index");
}
};
-template <bool checked>
+template <bool validate>
struct ImmI32Operand {
int32_t value;
unsigned length;
inline ImmI32Operand(Decoder* decoder, const byte* pc) {
- value = decoder->read_i32v<checked>(pc + 1, &length, "immi32");
+ value = decoder->read_i32v<validate>(pc + 1, &length, "immi32");
}
};
-template <bool checked>
+template <bool validate>
struct ImmI64Operand {
int64_t value;
unsigned length;
inline ImmI64Operand(Decoder* decoder, const byte* pc) {
- value = decoder->read_i64v<checked>(pc + 1, &length, "immi64");
+ value = decoder->read_i64v<validate>(pc + 1, &length, "immi64");
}
};
-template <bool checked>
+template <bool validate>
struct ImmF32Operand {
float value;
unsigned length = 4;
inline ImmF32Operand(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint32_t tmp = decoder->read_u32<checked>(pc + 1, "immf32");
+ uint32_t tmp = decoder->read_u32<validate>(pc + 1, "immf32");
memcpy(&value, &tmp, sizeof(value));
}
};
-template <bool checked>
+template <bool validate>
struct ImmF64Operand {
double value;
unsigned length = 8;
inline ImmF64Operand(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint64_t tmp = decoder->read_u64<checked>(pc + 1, "immf64");
+ uint64_t tmp = decoder->read_u64<validate>(pc + 1, "immf64");
memcpy(&value, &tmp, sizeof(value));
}
};
-template <bool checked>
+template <bool validate>
struct GlobalIndexOperand {
uint32_t index;
ValueType type = kWasmStmt;
@@ -146,35 +157,36 @@ struct GlobalIndexOperand {
unsigned length;
inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "global index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "global index");
}
};
-template <bool checked>
+template <bool validate>
struct BlockTypeOperand {
uint32_t arity = 0;
const byte* types = nullptr; // pointer to encoded types for the block.
unsigned length = 1;
inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
- uint8_t val = decoder->read_u8<checked>(pc + 1, "block type");
+ uint8_t val = decoder->read_u8<validate>(pc + 1, "block type");
ValueType type = kWasmStmt;
if (decode_local_type(val, &type)) {
arity = type == kWasmStmt ? 0 : 1;
types = pc + 1;
} else {
// Handle multi-value blocks.
- if (!CHECKED_COND(FLAG_experimental_wasm_mv)) {
+ if (!VALIDATE(FLAG_experimental_wasm_mv)) {
decoder->error(pc + 1, "invalid block arity > 1");
return;
}
- if (!CHECKED_COND(val == kMultivalBlock)) {
+ if (!VALIDATE(val == kMultivalBlock)) {
decoder->error(pc + 1, "invalid block type");
return;
}
// Decode and check the types vector of the block.
unsigned len = 0;
- uint32_t count = decoder->read_u32v<checked>(pc + 2, &len, "block arity");
+ uint32_t count =
+ decoder->read_u32v<validate>(pc + 2, &len, "block arity");
// {count} is encoded as {arity-2}, so that a {0} count here corresponds
// to a block with 2 values. This makes invalid/redundant encodings
// impossible.
@@ -184,9 +196,9 @@ struct BlockTypeOperand {
for (uint32_t i = 0; i < arity; i++) {
uint32_t offset = 1 + 1 + len + i;
- val = decoder->read_u8<checked>(pc + offset, "block type");
+ val = decoder->read_u8<validate>(pc + offset, "block type");
decode_local_type(val, &type);
- if (!CHECKED_COND(type != kWasmStmt)) {
+ if (!VALIDATE(type != kWasmStmt)) {
decoder->error(pc + offset, "invalid block type");
return;
}
@@ -232,16 +244,16 @@ struct BlockTypeOperand {
}
};
-template <bool checked>
+template <bool validate>
struct BreakDepthOperand {
uint32_t depth;
unsigned length;
inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- depth = decoder->read_u32v<checked>(pc + 1, &length, "break depth");
+ depth = decoder->read_u32v<validate>(pc + 1, &length, "break depth");
}
};
-template <bool checked>
+template <bool validate>
struct CallIndirectOperand {
uint32_t table_index;
uint32_t index;
@@ -249,9 +261,9 @@ struct CallIndirectOperand {
unsigned length;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
- index = decoder->read_u32v<checked>(pc + 1, &len, "signature index");
- table_index = decoder->read_u8<checked>(pc + 1 + len, "table index");
- if (!CHECKED_COND(table_index == 0)) {
+ index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
+ if (!VALIDATE(table_index == 0)) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
table_index);
}
@@ -259,44 +271,44 @@ struct CallIndirectOperand {
}
};
-template <bool checked>
+template <bool validate>
struct CallFunctionOperand {
uint32_t index;
FunctionSig* sig = nullptr;
unsigned length;
inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<checked>(pc + 1, &length, "function index");
+ index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
}
};
-template <bool checked>
+template <bool validate>
struct MemoryIndexOperand {
uint32_t index;
unsigned length = 1;
inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->read_u8<checked>(pc + 1, "memory index");
- if (!CHECKED_COND(index == 0)) {
+ index = decoder->read_u8<validate>(pc + 1, "memory index");
+ if (!VALIDATE(index == 0)) {
decoder->errorf(pc + 1, "expected memory index 0, found %u", index);
}
}
};
-template <bool checked>
+template <bool validate>
struct BranchTableOperand {
uint32_t table_count;
const byte* start;
const byte* table;
inline BranchTableOperand(Decoder* decoder, const byte* pc) {
- DCHECK_EQ(kExprBrTable, decoder->read_u8<checked>(pc, "opcode"));
+ DCHECK_EQ(kExprBrTable, decoder->read_u8<validate>(pc, "opcode"));
start = pc + 1;
unsigned len = 0;
- table_count = decoder->read_u32v<checked>(pc + 1, &len, "table count");
+ table_count = decoder->read_u32v<validate>(pc + 1, &len, "table count");
table = pc + 1 + len;
}
};
// A helper to iterate over a branch table.
-template <bool checked>
+template <bool validate>
class BranchTableIterator {
public:
unsigned cur_index() { return index_; }
@@ -306,7 +318,7 @@ class BranchTableIterator {
index_++;
unsigned length;
uint32_t result =
- decoder_->read_u32v<checked>(pc_, &length, "branch table entry");
+ decoder_->read_u32v<validate>(pc_, &length, "branch table entry");
pc_ += length;
return result;
}
@@ -319,7 +331,7 @@ class BranchTableIterator {
const byte* pc() { return pc_; }
BranchTableIterator(Decoder* decoder,
- const BranchTableOperand<checked>& operand)
+ const BranchTableOperand<validate>& operand)
: decoder_(decoder),
start_(operand.start),
pc_(operand.table),
@@ -334,7 +346,7 @@ class BranchTableIterator {
uint32_t table_count_; // the count of entries, not including default.
};
-template <bool checked>
+template <bool validate>
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
@@ -343,80 +355,74 @@ struct MemoryAccessOperand {
uint32_t max_alignment) {
unsigned alignment_length;
alignment =
- decoder->read_u32v<checked>(pc + 1, &alignment_length, "alignment");
- if (!CHECKED_COND(alignment <= max_alignment)) {
+ decoder->read_u32v<validate>(pc + 1, &alignment_length, "alignment");
+ if (!VALIDATE(alignment <= max_alignment)) {
decoder->errorf(pc + 1,
"invalid alignment; expected maximum alignment is %u, "
"actual alignment is %u",
max_alignment, alignment);
}
unsigned offset_length;
- offset = decoder->read_u32v<checked>(pc + 1 + alignment_length,
- &offset_length, "offset");
+ offset = decoder->read_u32v<validate>(pc + 1 + alignment_length,
+ &offset_length, "offset");
length = alignment_length + offset_length;
}
};
// Operand for SIMD lane operations.
-template <bool checked>
+template <bool validate>
struct SimdLaneOperand {
uint8_t lane;
unsigned length = 1;
inline SimdLaneOperand(Decoder* decoder, const byte* pc) {
- lane = decoder->read_u8<checked>(pc + 2, "lane");
+ lane = decoder->read_u8<validate>(pc + 2, "lane");
}
};
// Operand for SIMD shift operations.
-template <bool checked>
+template <bool validate>
struct SimdShiftOperand {
uint8_t shift;
unsigned length = 1;
inline SimdShiftOperand(Decoder* decoder, const byte* pc) {
- shift = decoder->read_u8<checked>(pc + 2, "shift");
+ shift = decoder->read_u8<validate>(pc + 2, "shift");
}
};
// Operand for SIMD S8x16 shuffle operations.
-template <bool checked>
+template <bool validate>
struct Simd8x16ShuffleOperand {
uint8_t shuffle[kSimd128Size];
inline Simd8x16ShuffleOperand(Decoder* decoder, const byte* pc) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
- shuffle[i] = decoder->read_u8<checked>(pc + 2 + i, "shuffle");
+ shuffle[i] = decoder->read_u8<validate>(pc + 2 + i, "shuffle");
}
}
};
// An entry on the value stack.
-template <typename Interface>
-struct AbstractValue {
+struct ValueBase {
const byte* pc;
ValueType type;
- typename Interface::IValue interface_data;
// Named constructors.
- static AbstractValue Unreachable(const byte* pc) {
- return {pc, kWasmVar, Interface::IValue::Unreachable()};
- }
+ static ValueBase Unreachable(const byte* pc) { return {pc, kWasmVar}; }
- static AbstractValue New(const byte* pc, ValueType type) {
- return {pc, type, Interface::IValue::New()};
- }
+ static ValueBase New(const byte* pc, ValueType type) { return {pc, type}; }
};
-template <typename Interface>
-struct AbstractMerge {
+template <typename Value>
+struct Merge {
uint32_t arity;
union {
- AbstractValue<Interface>* array;
- AbstractValue<Interface> first;
+ Value* array;
+ Value first;
} vals; // Either multiple values or a single value.
- AbstractValue<Interface>& operator[](size_t i) {
+ Value& operator[](uint32_t i) {
DCHECK_GT(arity, i);
return arity == 1 ? vals.first : vals.array[i];
}
@@ -432,16 +438,15 @@ enum ControlKind {
};
// An entry on the control stack (i.e. if, block, loop, or try).
-template <typename Interface>
-struct AbstractControl {
+template <typename Value>
+struct ControlBase {
const byte* pc;
ControlKind kind;
- size_t stack_depth; // stack height at the beginning of the construct.
- typename Interface::IControl interface_data;
- bool unreachable; // The current block has been ended.
+ uint32_t stack_depth; // stack height at the beginning of the construct.
+ bool unreachable; // The current block has been ended.
// Values merged into the end of this control construct.
- AbstractMerge<Interface> merge;
+ Merge<Value> merge;
inline bool is_if() const { return is_onearmed_if() || is_if_else(); }
inline bool is_onearmed_if() const { return kind == kControlIf; }
@@ -453,26 +458,59 @@ struct AbstractControl {
inline bool is_try_catch() const { return kind == kControlTryCatch; }
// Named constructors.
- static AbstractControl Block(const byte* pc, size_t stack_depth) {
- return {pc, kControlBlock, stack_depth, Interface::IControl::Block(), false,
- {}};
+ static ControlBase Block(const byte* pc, size_t stack_depth) {
+ return {pc, kControlBlock, static_cast<uint32_t>(stack_depth), false, {}};
}
- static AbstractControl If(const byte* pc, size_t stack_depth) {
- return {pc, kControlIf, stack_depth, Interface::IControl::If(), false, {}};
+ static ControlBase If(const byte* pc, size_t stack_depth) {
+ return {pc, kControlIf, static_cast<uint32_t>(stack_depth), false, {}};
}
- static AbstractControl Loop(const byte* pc, size_t stack_depth) {
- return {pc, kControlLoop, stack_depth, Interface::IControl::Loop(), false,
- {}};
+ static ControlBase Loop(const byte* pc, size_t stack_depth) {
+ return {pc, kControlLoop, static_cast<uint32_t>(stack_depth), false, {}};
}
- static AbstractControl Try(const byte* pc, size_t stack_depth) {
- return {pc, kControlTry, stack_depth, Interface::IControl::Try(),
- false, {}};
+ static ControlBase Try(const byte* pc, size_t stack_depth) {
+ return {pc, kControlTry, static_cast<uint32_t>(stack_depth), false, {}};
}
};
+#define CONCRETE_NAMED_CONSTRUCTOR(concrete_type, abstract_type, name) \
+ template <typename... Args> \
+ static concrete_type name(Args&&... args) { \
+ concrete_type val; \
+ static_cast<abstract_type&>(val) = \
+ abstract_type::name(std::forward<Args>(args)...); \
+ return val; \
+ }
+
+// Provide the default named constructors, which default-initialize the
+// ConcreteType and the initialize the fields of ValueBase correctly.
+// Use like this:
+// struct Value : public ValueWithNamedConstructors<Value> { int new_field; };
+template <typename ConcreteType>
+struct ValueWithNamedConstructors : public ValueBase {
+ // Named constructors.
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, Unreachable)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, New)
+};
+
+// Provide the default named constructors, which default-initialize the
+// ConcreteType and the initialize the fields of ControlBase correctly.
+// Use like this:
+// struct Control : public ControlWithNamedConstructors<Control, Value> {
+// int my_uninitialized_field;
+// char* other_field = nullptr;
+// };
+template <typename ConcreteType, typename Value>
+struct ControlWithNamedConstructors : public ControlBase<Value> {
+ // Named constructors.
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Block)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, If)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Loop)
+ CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Try)
+};
+
// This is the list of callback functions that an interface for the
// WasmFullDecoder should implement.
// F(Name, args...)
@@ -508,8 +546,8 @@ struct AbstractControl {
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
- F(BreakTo, Control* block) \
- F(BrIf, const Value& cond, Control* block) \
+ F(BreakTo, uint32_t depth) \
+ F(BrIf, const Value& cond, uint32_t depth) \
F(BrTable, const BranchTableOperand<validate>& operand, const Value& key) \
F(Else, Control* if_block) \
F(LoadMem, ValueType type, MachineType mem_type, \
@@ -532,9 +570,12 @@ struct AbstractControl {
const Value& input, Value* result) \
F(Simd8x16ShuffleOp, const Simd8x16ShuffleOperand<validate>& operand, \
const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexOperand<validate>&) \
- F(Catch, const ExceptionIndexOperand<validate>& operand, Control* block) \
- F(AtomicOp, WasmOpcode opcode, Vector<Value> args, Value* result)
+ F(Throw, const ExceptionIndexOperand<validate>&, Control* block, \
+ const Vector<Value>& args) \
+ F(CatchException, const ExceptionIndexOperand<validate>& operand, \
+ Control* block, Vector<Value> caught_values) \
+ F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
+ const MemoryAccessOperand<validate>& operand, Value* result)
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
@@ -552,8 +593,10 @@ class WasmDecoder : public Decoder {
ZoneVector<ValueType>* local_types_;
- size_t total_locals() const {
- return local_types_ == nullptr ? 0 : local_types_->size();
+ uint32_t total_locals() const {
+ return local_types_ == nullptr
+ ? 0
+ : static_cast<uint32_t>(local_types_->size());
}
static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig,
@@ -595,8 +638,11 @@ class WasmDecoder : public Decoder {
type = kWasmF64;
break;
case kLocalS128:
- type = kWasmS128;
- break;
+ if (FLAG_experimental_wasm_simd) {
+ type = kWasmS128;
+ break;
+ }
+ // else fall through to default.
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -608,10 +654,12 @@ class WasmDecoder : public Decoder {
}
static BitVector* AnalyzeLoopAssignment(Decoder* decoder, const byte* pc,
- int locals_count, Zone* zone) {
+ uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
if (*pc != kExprLoop) return nullptr;
+ // The number of locals_count is augmented by 2 so that 'locals_count - 2'
+ // can be used to track mem_size, and 'locals_count - 1' to track mem_start.
BitVector* assigned = new (zone) BitVector(locals_count, zone);
int depth = 0;
// Iteratively process all AST nodes nested inside the loop.
@@ -637,6 +685,14 @@ class WasmDecoder : public Decoder {
length = 1 + operand.length;
break;
}
+ case kExprGrowMemory:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ // Add mem_size and mem_start to the assigned set.
+ assigned->Add(locals_count - 2); // mem_size
+ assigned->Add(locals_count - 1); // mem_start
+ length = OpcodeLength(decoder, pc);
+ break;
case kExprEnd:
depth--;
break;
@@ -651,46 +707,43 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, LocalIndexOperand<validate>& operand) {
- if (VALIDATE(operand.index < total_locals())) {
- if (local_types_) {
- operand.type = local_types_->at(operand.index);
- } else {
- operand.type = kWasmStmt;
- }
- return true;
+ if (!VALIDATE(operand.index < total_locals())) {
+ errorf(pc + 1, "invalid local index: %u", operand.index);
+ return false;
}
- errorf(pc + 1, "invalid local index: %u", operand.index);
- return false;
+ operand.type = local_types_ ? local_types_->at(operand.index) : kWasmStmt;
+ return true;
}
inline bool Validate(const byte* pc,
ExceptionIndexOperand<validate>& operand) {
- if (module_ != nullptr && operand.index < module_->exceptions.size()) {
- operand.exception = &module_->exceptions[operand.index];
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->exceptions.size())) {
+ errorf(pc + 1, "Invalid exception index: %u", operand.index);
+ return false;
}
- errorf(pc + 1, "Invalid exception index: %u", operand.index);
- return false;
+ operand.exception = &module_->exceptions[operand.index];
+ return true;
}
inline bool Validate(const byte* pc, GlobalIndexOperand<validate>& operand) {
- if (VALIDATE(module_ != nullptr &&
- operand.index < module_->globals.size())) {
- operand.global = &module_->globals[operand.index];
- operand.type = operand.global->type;
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->globals.size())) {
+ errorf(pc + 1, "invalid global index: %u", operand.index);
+ return false;
}
- errorf(pc + 1, "invalid global index: %u", operand.index);
- return false;
+ operand.global = &module_->globals[operand.index];
+ operand.type = operand.global->type;
+ return true;
}
inline bool Complete(const byte* pc, CallFunctionOperand<validate>& operand) {
- if (VALIDATE(module_ != nullptr &&
- operand.index < module_->functions.size())) {
- operand.sig = module_->functions[operand.index].sig;
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->functions.size())) {
+ return false;
}
- return false;
+ operand.sig = module_->functions[operand.index].sig;
+ return true;
}
inline bool Validate(const byte* pc, CallFunctionOperand<validate>& operand) {
@@ -702,38 +755,38 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallIndirectOperand<validate>& operand) {
- if (VALIDATE(module_ != nullptr &&
- operand.index < module_->signatures.size())) {
- operand.sig = module_->signatures[operand.index];
- return true;
+ if (!VALIDATE(module_ != nullptr &&
+ operand.index < module_->signatures.size())) {
+ return false;
}
- return false;
+ operand.sig = module_->signatures[operand.index];
+ return true;
}
inline bool Validate(const byte* pc, CallIndirectOperand<validate>& operand) {
- if (CHECK_ERROR(module_ == nullptr || module_->function_tables.empty())) {
+ if (!VALIDATE(module_ != nullptr && !module_->function_tables.empty())) {
error("function table has to exist to execute call_indirect");
return false;
}
- if (Complete(pc, operand)) {
- return true;
+ if (!Complete(pc, operand)) {
+ errorf(pc + 1, "invalid signature index: #%u", operand.index);
+ return false;
}
- errorf(pc + 1, "invalid signature index: #%u", operand.index);
- return false;
+ return true;
}
inline bool Validate(const byte* pc, BreakDepthOperand<validate>& operand,
size_t control_depth) {
- if (VALIDATE(operand.depth < control_depth)) {
- return true;
+ if (!VALIDATE(operand.depth < control_depth)) {
+ errorf(pc + 1, "invalid break depth: %u", operand.depth);
+ return false;
}
- errorf(pc + 1, "invalid break depth: %u", operand.depth);
- return false;
+ return true;
}
bool Validate(const byte* pc, BranchTableOperand<validate>& operand,
size_t block_depth) {
- if (CHECK_ERROR(operand.table_count >= kV8MaxWasmFunctionSize)) {
+ if (!VALIDATE(operand.table_count < kV8MaxWasmFunctionSize)) {
errorf(pc + 1, "invalid table count (> max function size): %u",
operand.table_count);
return false;
@@ -763,7 +816,7 @@ class WasmDecoder : public Decoder {
UNREACHABLE();
break;
}
- if (CHECK_ERROR(operand.lane < 0 || operand.lane >= num_lanes)) {
+ if (!VALIDATE(operand.lane >= 0 && operand.lane < num_lanes)) {
error(pc_ + 2, "invalid lane index");
return false;
} else {
@@ -794,7 +847,7 @@ class WasmDecoder : public Decoder {
UNREACHABLE();
break;
}
- if (CHECK_ERROR(operand.shift < 0 || operand.shift >= max_shift)) {
+ if (!VALIDATE(operand.shift >= 0 && operand.shift < max_shift)) {
error(pc_ + 2, "invalid shift amount");
return false;
} else {
@@ -808,12 +861,11 @@ class WasmDecoder : public Decoder {
for (uint32_t i = 0; i < kSimd128Size; ++i)
max_lane = std::max(max_lane, operand.shuffle[i]);
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
- if (CHECK_ERROR(max_lane > 2 * kSimd128Size)) {
+ if (!VALIDATE(max_lane <= 2 * kSimd128Size)) {
error(pc_ + 2, "invalid shuffle mask");
return false;
- } else {
- return true;
}
+ return true;
}
static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
@@ -917,6 +969,23 @@ class WasmDecoder : public Decoder {
return 2;
}
}
+ case kAtomicPrefix: {
+ byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
+ WasmOpcode opcode =
+ static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ switch (opcode) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ MemoryAccessOperand<validate> operand(decoder, pc + 1, UINT32_MAX);
+ return 2 + operand.length;
+ }
+ default:
+ decoder->error(pc, "invalid Atomics opcode");
+ return 2;
+ }
+ }
default:
return 1;
}
@@ -993,9 +1062,9 @@ class WasmDecoder : public Decoder {
template <bool validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
- using Value = AbstractValue<Interface>;
- using Control = AbstractControl<Interface>;
- using MergeValues = AbstractMerge<Interface>;
+ using Value = typename Interface::Value;
+ using Control = typename Interface::Control;
+ using MergeValues = Merge<Value>;
// All Value and Control types should be trivially copyable for
// performance. We push and pop them, and store them in local variables.
@@ -1118,9 +1187,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &stack_[stack_.size() - depth - 1];
}
- inline const Value& GetMergeValueFromStack(Control* c, size_t i) {
+ inline Value& GetMergeValueFromStack(Control* c, uint32_t i) {
DCHECK_GT(c->merge.arity, i);
- DCHECK_GE(stack_.size(), c->merge.arity);
+ DCHECK_GE(stack_.size(), c->stack_depth + c->merge.arity);
return stack_[stack_.size() - c->merge.arity + i];
}
@@ -1137,9 +1206,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool last_end_found_;
bool CheckHasMemory() {
- if (VALIDATE(this->module_->has_memory)) return true;
- this->error(this->pc_ - 1, "memory instruction with no memory");
- return false;
+ if (!VALIDATE(this->module_->has_memory)) {
+ this->error(this->pc_ - 1, "memory instruction with no memory");
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckHasSharedMemory() {
+ if (!VALIDATE(this->module_->has_shared_memory)) {
+ this->error(this->pc_ - 1, "Atomic opcodes used without shared memory");
+ return false;
+ }
+ return true;
}
// Decodes the body of a function.
@@ -1203,16 +1282,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ExceptionIndexOperand<true> operand(this, this->pc_);
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
- if (operand.exception->sig->parameter_count() > 0) {
- // TODO(kschimpf): Fix to pull values off stack and build throw.
- OPCODE_ERROR(opcode, "can't handle exceptions with values yet");
- break;
- }
- interface_.Throw(this, operand);
- // TODO(titzer): Throw should end control, but currently we build a
- // (reachable) runtime call instead of connecting it directly to
- // end.
- // EndControl();
+ std::vector<Value> args;
+ PopArgs(operand.exception->ToFunctionSig(), &args);
+ interface_.Throw(this, operand, &control_.back(), vec2vec(args));
break;
}
case kExprTry: {
@@ -1232,26 +1304,31 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, operand)) break;
- if (CHECK_ERROR(control_.empty())) {
+ if (!VALIDATE(!control_.empty())) {
this->error("catch does not match any try");
break;
}
Control* c = &control_.back();
- if (CHECK_ERROR(!c->is_try())) {
+ if (!VALIDATE(c->is_try())) {
this->error("catch does not match any try");
break;
}
- if (CHECK_ERROR(c->is_try_catch())) {
+ if (!VALIDATE(c->is_incomplete_try())) {
OPCODE_ERROR(opcode, "multiple catch blocks not implemented");
break;
}
c->kind = kControlTryCatch;
FallThruTo(c);
stack_.resize(c->stack_depth);
-
- interface_.Catch(this, operand, c);
+ const WasmExceptionSig* sig = operand.exception->sig;
+ for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
+ Push(sig->GetParam(i));
+ }
+ Vector<Value> values(stack_.data() + c->stack_depth,
+ sig->parameter_count());
+ interface_.CatchException(this, operand, c, values);
break;
}
case kExprCatchAll: {
@@ -1280,12 +1357,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprElse: {
- if (CHECK_ERROR(control_.empty())) {
+ if (!VALIDATE(!control_.empty())) {
this->error("else does not match any if");
break;
}
Control* c = &control_.back();
- if (CHECK_ERROR(!c->is_if())) {
+ if (!VALIDATE(c->is_if())) {
this->error(this->pc_, "else does not match an if");
break;
}
@@ -1300,7 +1377,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprEnd: {
- if (CHECK_ERROR(control_.empty())) {
+ if (!VALIDATE(!control_.empty())) {
this->error("end does not match any if, try, or block");
return;
}
@@ -1308,21 +1385,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (c->is_loop()) {
// A loop just leaves the values on the stack.
TypeCheckFallThru(c);
- if (c->unreachable) PushEndValues(c);
PopControl(c);
break;
}
if (c->is_onearmed_if()) {
// End the true branch of a one-armed if.
- if (CHECK_ERROR(!c->unreachable &&
- stack_.size() != c->stack_depth)) {
+ if (!VALIDATE(c->unreachable ||
+ stack_.size() == c->stack_depth)) {
this->error("end of if expected empty stack");
stack_.resize(c->stack_depth);
}
- if (CHECK_ERROR(c->merge.arity > 0)) {
+ if (!VALIDATE(c->merge.arity == 0)) {
this->error("non-void one-armed if");
}
- } else if (CHECK_ERROR(c->is_incomplete_try())) {
+ } else if (!VALIDATE(!c->is_incomplete_try())) {
this->error(this->pc_, "missing catch in try");
break;
}
@@ -1331,21 +1407,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
- if (CHECK_ERROR(this->pc_ + 1 != this->end_)) {
+ if (!VALIDATE(this->pc_ + 1 == this->end_)) {
this->error(this->pc_ + 1, "trailing code after function end");
break;
}
last_end_found_ = true;
- if (c->unreachable) {
- TypeCheckFallThru(c);
- } else {
- // The result of the block is the return value.
- TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
- "(implicit) return");
- DoReturn();
- TRACE("\n");
- }
+ // The result of the block is the return value.
+ TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
+ "(implicit) return");
+ DoReturn();
+ TRACE("\n");
}
+
PopControl(c);
break;
}
@@ -1359,9 +1432,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprBr: {
BreakDepthOperand<validate> operand(this, this->pc_);
- if (VALIDATE(this->Validate(this->pc_, operand, control_.size()) &&
- TypeCheckBreak(operand.depth))) {
- interface_.BreakTo(this, control_at(operand.depth));
+ if (this->Validate(this->pc_, operand, control_.size()) &&
+ TypeCheckBreak(operand.depth)) {
+ interface_.BreakTo(this, operand.depth);
}
len = 1 + operand.length;
EndControl();
@@ -1370,10 +1443,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrIf: {
BreakDepthOperand<validate> operand(this, this->pc_);
auto cond = Pop(0, kWasmI32);
- if (VALIDATE(this->ok() &&
- this->Validate(this->pc_, operand, control_.size()) &&
- TypeCheckBreak(operand.depth))) {
- interface_.BrIf(this, cond, control_at(operand.depth));
+ if (this->Validate(this->pc_, operand, control_.size()) &&
+ TypeCheckBreak(operand.depth)) {
+ interface_.BrIf(this, cond, operand.depth);
}
len = 1 + operand.length;
break;
@@ -1383,42 +1455,29 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchTableIterator<validate> iterator(this, operand);
if (!this->Validate(this->pc_, operand, control_.size())) break;
auto key = Pop(0, kWasmI32);
- MergeValues* merge = nullptr;
+ uint32_t br_arity = 0;
while (iterator.has_next()) {
const uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (CHECK_ERROR(target >= control_.size())) {
+ if (!VALIDATE(target < control_.size())) {
this->error(pos, "improper branch in br_table");
break;
}
// Check that label types match up.
- static MergeValues loop_dummy = {0, {nullptr}};
Control* c = control_at(target);
- MergeValues* current = c->is_loop() ? &loop_dummy : &c->merge;
+ uint32_t arity = c->is_loop() ? 0 : c->merge.arity;
if (i == 0) {
- merge = current;
- } else if (CHECK_ERROR(merge->arity != current->arity)) {
+ br_arity = arity;
+ } else if (!VALIDATE(br_arity == arity)) {
this->errorf(pos,
"inconsistent arity in br_table target %d"
" (previous was %u, this one %u)",
- i, merge->arity, current->arity);
- } else if (control_at(0)->unreachable) {
- for (uint32_t j = 0; VALIDATE(this->ok()) && j < merge->arity;
- ++j) {
- if (CHECK_ERROR((*merge)[j].type != (*current)[j].type)) {
- this->errorf(pos,
- "type error in br_table target %d operand %d"
- " (previous expected %s, this one %s)",
- i, j, WasmOpcodes::TypeName((*merge)[j].type),
- WasmOpcodes::TypeName((*current)[j].type));
- }
- }
+ i, br_arity, arity);
}
- bool valid = TypeCheckBreak(target);
- if (CHECK_ERROR(!valid)) break;
+ if (!VALIDATE(TypeCheckBreak(target))) break;
}
- if (CHECK_ERROR(this->failed())) break;
+ if (!VALIDATE(this->ok())) break;
if (operand.table_count > 0) {
interface_.BrTable(this, operand, key);
@@ -1427,11 +1486,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchTableIterator<validate> iterator(this, operand);
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (CHECK_ERROR(target >= control_.size())) {
+ if (!VALIDATE(target < control_.size())) {
this->error(pos, "improper branch in br_table");
break;
}
- interface_.BreakTo(this, control_at(target));
+ interface_.BreakTo(this, target);
}
len = 1 + iterator.length();
EndControl();
@@ -1515,7 +1574,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
GlobalIndexOperand<validate> operand(this, this->pc_);
len = 1 + operand.length;
if (!this->Validate(this->pc_, operand)) break;
- if (CHECK_ERROR(!operand.global->mutability)) {
+ if (!VALIDATE(operand.global->mutability)) {
this->errorf(this->pc_, "immutable global #%u cannot be assigned",
operand.index);
break;
@@ -1598,7 +1657,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexOperand<validate> operand(this, this->pc_);
len = 1 + operand.length;
DCHECK_NOT_NULL(this->module_);
- if (CHECK_ERROR(!this->module_->is_wasm())) {
+ if (!VALIDATE(this->module_->is_wasm())) {
this->error("grow_memory is not supported for asmjs modules");
break;
}
@@ -1651,6 +1710,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kAtomicPrefix: {
CHECK_PROTOTYPE_OPCODE(threads);
+ if (!CheckHasSharedMemory()) break;
len++;
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
@@ -1715,14 +1775,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PrintF("[%d]", operand.value);
break;
}
- case kExprGetLocal: {
+ case kExprGetLocal:
+ case kExprSetLocal:
+ case kExprTeeLocal: {
LocalIndexOperand<validate> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
- case kExprSetLocal: // fallthru
- case kExprTeeLocal: {
- LocalIndexOperand<validate> operand(this, val.pc);
+ case kExprGetGlobal:
+ case kExprSetGlobal: {
+ GlobalIndexOperand<validate> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
@@ -1932,7 +1994,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
default: {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (CHECK_ERROR(sig == nullptr)) {
+ if (!VALIDATE(sig != nullptr)) {
this->error("invalid simd opcode");
break;
}
@@ -1948,15 +2010,43 @@ class WasmFullDecoder : public WasmDecoder<validate> {
unsigned DecodeAtomicOpcode(WasmOpcode opcode) {
unsigned len = 0;
+ ValueType ret_type;
FunctionSig* sig = WasmOpcodes::AtomicSignature(opcode);
if (sig != nullptr) {
+ MachineType memtype;
+ switch (opcode) {
+#define CASE_ATOMIC_STORE_OP(Name, Type) \
+ case kExpr##Name: { \
+ memtype = MachineType::Type(); \
+ ret_type = MachineRepresentation::kNone; \
+ break; \
+ }
+ ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
+#undef CASE_ATOMIC_OP
+#define CASE_ATOMIC_OP(Name, Type) \
+ case kExpr##Name: { \
+ memtype = MachineType::Type(); \
+ ret_type = GetReturnType(sig); \
+ break; \
+ }
+ ATOMIC_OP_LIST(CASE_ATOMIC_OP)
+#undef CASE_ATOMIC_OP
+ default:
+ this->error("invalid atomic opcode");
+ break;
+ }
// TODO(clemensh): Better memory management here.
std::vector<Value> args(sig->parameter_count());
+ MemoryAccessOperand<validate> operand(
+ this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
+ len += operand.length;
for (int i = static_cast<int>(sig->parameter_count() - 1); i >= 0; --i) {
args[i] = Pop(i, sig->GetParam(i));
}
- auto* result = Push(GetReturnType(sig));
- interface_.AtomicOp(this, opcode, vec2vec(args), result);
+ auto result = ret_type == MachineRepresentation::kNone
+ ? nullptr
+ : Push(GetReturnType(sig));
+ interface_.AtomicOp(this, opcode, vec2vec(args), operand, result);
} else {
this->error("invalid atomic opcode");
}
@@ -1979,7 +2069,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline Value* Push(ValueType type) {
- DCHECK(type != kWasmStmt);
+ DCHECK_NE(kWasmStmt, type);
stack_.push_back(Value::New(this->pc_, type));
return &stack_.back();
}
@@ -2009,8 +2099,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (CHECK_ERROR(val.type != expected && val.type != kWasmVar &&
- expected != kWasmVar)) {
+ if (!VALIDATE(val.type == expected || val.type == kWasmVar ||
+ expected == kWasmVar)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
WasmOpcodes::TypeName(expected), SafeOpcodeNameAt(val.pc),
@@ -2021,10 +2111,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value Pop() {
DCHECK(!control_.empty());
- size_t limit = control_.back().stack_depth;
+ uint32_t limit = control_.back().stack_depth;
if (stack_.size() <= limit) {
// Popping past the current control start in reachable code.
- if (CHECK_ERROR(!control_.back().unreachable)) {
+ if (!VALIDATE(control_.back().unreachable)) {
this->errorf(this->pc_, "%s found empty stack",
SafeOpcodeNameAt(this->pc_));
}
@@ -2037,26 +2127,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- bool TypeCheckBreak(unsigned depth) {
- DCHECK(validate); // Only call this for validation.
- Control* c = control_at(depth);
- if (c->is_loop()) {
- // This is the inner loop block, which does not have a value.
- return true;
- }
- size_t expected = control_.back().stack_depth + c->merge.arity;
- if (stack_.size() < expected && !control_.back().unreachable) {
- this->errorf(
- this->pc_,
- "expected at least %u values on the stack for br to @%d, found %d",
- c->merge.arity, startrel(c->pc),
- static_cast<int>(stack_.size() - c->stack_depth));
- return false;
- }
-
- return TypeCheckMergeValues(c);
- }
-
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!TypeCheckFallThru(c)) return;
@@ -2066,17 +2136,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckMergeValues(Control* c) {
- // Typecheck the values left on the stack.
- size_t avail = stack_.size() - c->stack_depth;
- size_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
- for (size_t i = start; i < c->merge.arity; ++i) {
+ DCHECK_GE(stack_.size(), c->stack_depth + c->merge.arity);
+ // Typecheck the topmost {c->merge.arity} values on the stack.
+ for (uint32_t i = 0; i < c->merge.arity; ++i) {
auto& val = GetMergeValueFromStack(c, i);
auto& old = c->merge[i];
- if (val.type != old.type && val.type != kWasmVar) {
- this->errorf(
- this->pc_, "type error in merge[%zu] (expected %s, got %s)", i,
- WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
- return false;
+ if (val.type != old.type) {
+ // If {val.type} is polymorphic, which results from unreachable, make
+ // it more specific by using the merge value's expected type.
+ // If it is not polymorphic, this is a type error.
+ if (!VALIDATE(val.type == kWasmVar)) {
+ this->errorf(
+ this->pc_, "type error in merge[%u] (expected %s, got %s)", i,
+ WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+ return false;
+ }
+ val.type = old.type;
}
}
@@ -2086,19 +2161,59 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool TypeCheckFallThru(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!validate) return true;
- // Fallthru must match arity exactly.
- size_t expected = c->stack_depth + c->merge.arity;
- if (stack_.size() != expected &&
- (stack_.size() > expected || !c->unreachable)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for fallthru to @%d",
- c->merge.arity, startrel(c->pc));
+ uint32_t expected = c->merge.arity;
+ DCHECK_GE(stack_.size(), c->stack_depth);
+ uint32_t actual = static_cast<uint32_t>(stack_.size()) - c->stack_depth;
+ // Fallthrus must match the arity of the control exactly.
+ if (!InsertUnreachablesIfNecessary(expected, actual) || actual > expected) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for fallthru to @%d, found %u",
+ expected, startrel(c->pc), actual);
return false;
}
return TypeCheckMergeValues(c);
}
+ bool TypeCheckBreak(unsigned depth) {
+ Control* c = control_at(depth);
+ if (c->is_loop()) {
+ // This is the inner loop block, which does not have a value.
+ return true;
+ }
+ // Breaks must have at least the number of values expected; can have more.
+ uint32_t expected = c->merge.arity;
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (!InsertUnreachablesIfNecessary(expected, actual)) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for br to @%d, found %u",
+ expected, startrel(c->pc), actual);
+ return false;
+ }
+ return TypeCheckMergeValues(c);
+ }
+
+ inline bool InsertUnreachablesIfNecessary(uint32_t expected,
+ uint32_t actual) {
+ if (V8_LIKELY(actual >= expected)) {
+ return true; // enough actual values are there.
+ }
+ if (!VALIDATE(control_.back().unreachable)) {
+ // There aren't enough values on the stack.
+ return false;
+ }
+ // A slow path. When the actual number of values on the stack is less
+ // than the expected number of values and the current control is
+ // unreachable, insert unreachable values below the actual values.
+ // This simplifies {TypeCheckMergeValues}.
+ auto pos = stack_.begin() + (stack_.size() - actual);
+ stack_.insert(pos, (expected - actual), Value::Unreachable(this->pc_));
+ return true;
+ }
+
virtual void onFirstError() {
this->end_ = this->pc_; // Terminate decoding loop.
TRACE(" !%s\n", this->error_msg_.c_str());
@@ -2127,28 +2242,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
};
-template <bool decoder_validate, typename Interface>
-class InterfaceTemplate {
+class EmptyInterface {
public:
- constexpr static bool validate = decoder_validate;
- using Decoder = WasmFullDecoder<validate, Interface>;
- using Control = AbstractControl<Interface>;
- using Value = AbstractValue<Interface>;
- using MergeValues = AbstractMerge<Interface>;
-};
-
-class EmptyInterface : public InterfaceTemplate<true, EmptyInterface> {
- public:
- struct IValue {
- static IValue Unreachable() { return {}; }
- static IValue New() { return {}; }
- };
- struct IControl {
- static IControl Block() { return {}; }
- static IControl If() { return {}; }
- static IControl Loop() { return {}; }
- static IControl Try() { return {}; }
- };
+ constexpr static bool validate = true;
+ using Value = ValueBase;
+ using Control = ControlBase<Value>;
+ using Decoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
void name(Decoder* decoder, ##__VA_ARGS__) {}
@@ -2156,12 +2255,10 @@ class EmptyInterface : public InterfaceTemplate<true, EmptyInterface> {
#undef DEFINE_EMPTY_CALLBACK
};
-#undef CHECKED_COND
-#undef VALIDATE
-#undef CHECK_ERROR
#undef TRACE
+#undef VALIDATE
#undef CHECK_PROTOTYPE_OPCODE
-#undef PROTOTYPE_NOT_FUNCTIONAL
+#undef OPCODE_ERROR
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index bb3fc544bb..bcd57fe616 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -27,6 +27,11 @@ namespace wasm {
namespace {
+template <typename T>
+Vector<T> vec2vec(ZoneVector<T>& vec) {
+ return Vector<T>(vec.data(), vec.size());
+}
+
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
@@ -37,6 +42,8 @@ struct SsaEnv {
State state;
TFNode* control;
TFNode* effect;
+ TFNode* mem_size;
+ TFNode* mem_start;
TFNode** locals;
bool go() { return state >= kReached; }
@@ -45,6 +52,8 @@ struct SsaEnv {
locals = nullptr;
control = nullptr;
effect = nullptr;
+ mem_size = nullptr;
+ mem_start = nullptr;
}
void SetNotMerged() {
if (state == kMerged) state = kReached;
@@ -60,14 +69,12 @@ struct SsaEnv {
constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
-class WasmGraphBuildingInterface
- : public InterfaceTemplate<true, WasmGraphBuildingInterface> {
+class WasmGraphBuildingInterface {
public:
- struct IValue {
- TFNode* node;
+ using Decoder = WasmFullDecoder<true, WasmGraphBuildingInterface>;
- static IValue Unreachable() { return {nullptr}; }
- static IValue New() { return {nullptr}; }
+ struct Value : public ValueWithNamedConstructors<Value> {
+ TFNode* node;
};
struct TryInfo : public ZoneObject {
@@ -77,16 +84,11 @@ class WasmGraphBuildingInterface
explicit TryInfo(SsaEnv* c) : catch_env(c), exception(nullptr) {}
};
- struct IControl {
+ struct Control : public ControlWithNamedConstructors<Control, Value> {
SsaEnv* end_env; // end environment for the construct.
SsaEnv* false_env; // false environment (only for if).
TryInfo* try_info; // information used for compiling try statements.
int32_t previous_catch; // previous Control (on the stack) with a catch.
-
- static IControl Block() { return {}; }
- static IControl If() { return {}; }
- static IControl Loop() { return {}; }
- static IControl Try() { return {}; }
};
explicit WasmGraphBuildingInterface(TFBuilder* builder) : builder_(builder) {}
@@ -94,37 +96,61 @@ class WasmGraphBuildingInterface
void StartFunction(Decoder* decoder) {
SsaEnv* ssa_env =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- uint32_t env_count = decoder->NumLocals();
+ uint32_t num_locals = decoder->NumLocals();
+ // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
+ uint32_t env_count = num_locals + 2;
size_t size = sizeof(TFNode*) * env_count;
ssa_env->state = SsaEnv::kReached;
ssa_env->locals =
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
- TFNode* start =
- builder_->Start(static_cast<int>(decoder->sig_->parameter_count() + 1));
- // Initialize local variables.
+ // The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
+ // wasm_context parameter.
+ TFNode* start = builder_->Start(
+ static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
+ // Initialize the wasm_context (the paramater at index 0).
+ builder_->set_wasm_context(
+ builder_->Param(compiler::kWasmContextParameterIndex));
+ // Initialize local variables. Parameters are shifted by 1 because of the
+ // the wasm_context.
uint32_t index = 0;
for (; index < decoder->sig_->parameter_count(); ++index) {
- ssa_env->locals[index] = builder_->Param(index);
+ ssa_env->locals[index] = builder_->Param(index + 1);
}
- while (index < env_count) {
+ while (index < num_locals) {
ValueType type = decoder->GetLocalType(index);
TFNode* node = DefaultValue(type);
- while (index < env_count && decoder->GetLocalType(index) == type) {
+ while (index < num_locals && decoder->GetLocalType(index) == type) {
// Do a whole run of like-typed locals at a time.
ssa_env->locals[index++] = node;
}
}
- ssa_env->control = start;
ssa_env->effect = start;
+ ssa_env->control = start;
+ // Initialize effect and control before loading the context.
+ builder_->set_effect_ptr(&ssa_env->effect);
+ builder_->set_control_ptr(&ssa_env->control);
+ // Always load mem_size and mem_start from the WasmContext into the ssa.
+ LoadContextIntoSsa(ssa_env);
SetEnv(ssa_env);
}
+ // Reload the wasm context variables from the WasmContext structure attached
+ // to the memory object into the Ssa Environment. This does not automatically
+ // set the mem_size_ and mem_start_ pointers in WasmGraphBuilder.
+ void LoadContextIntoSsa(SsaEnv* ssa_env) {
+ if (!ssa_env || !ssa_env->go()) return;
+ DCHECK_NOT_NULL(builder_->Effect());
+ DCHECK_NOT_NULL(builder_->Control());
+ ssa_env->mem_size = builder_->LoadMemSize();
+ ssa_env->mem_start = builder_->LoadMemStart();
+ }
+
void StartFunctionBody(Decoder* decoder, Control* block) {
SsaEnv* break_env = ssa_env_;
SetEnv(Steal(decoder->zone(), break_env));
- block->interface_data.end_env = break_env;
+ block->end_env = break_env;
}
void FinishFunction(Decoder* decoder) {
@@ -133,13 +159,13 @@ class WasmGraphBuildingInterface
void Block(Decoder* decoder, Control* block) {
// The break environment is the outer environment.
- block->interface_data.end_env = ssa_env_;
+ block->end_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
void Loop(Decoder* decoder, Control* block) {
SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
- block->interface_data.end_env = finish_try_env;
+ block->end_env = finish_try_env;
// The continue environment is the inner environment.
SetEnv(PrepareForLoop(decoder, finish_try_env));
ssa_env_->SetNotMerged();
@@ -147,39 +173,41 @@ class WasmGraphBuildingInterface
void Try(Decoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
+ SsaEnv* catch_env = Split(decoder, outer_env);
+ // Mark catch environment as unreachable, since only accessable
+ // through catch unwinding (i.e. landing pads).
+ catch_env->state = SsaEnv::kUnreachable;
SsaEnv* try_env = Steal(decoder->zone(), outer_env);
- SsaEnv* catch_env = UnreachableEnv(decoder->zone());
SetEnv(try_env);
TryInfo* try_info = new (decoder->zone()) TryInfo(catch_env);
- block->interface_data.end_env = outer_env;
- block->interface_data.try_info = try_info;
- block->interface_data.previous_catch = current_catch_;
+ block->end_env = outer_env;
+ block->try_info = try_info;
+ block->previous_catch = current_catch_;
current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
}
void If(Decoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- BUILD(BranchNoHint, cond.interface_data.node, &if_true, &if_false);
+ BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(decoder, ssa_env_);
false_env->control = if_false;
SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
true_env->control = if_true;
- if_block->interface_data.end_env = end_env;
- if_block->interface_data.false_env = false_env;
+ if_block->end_env = end_env;
+ if_block->false_env = false_env;
SetEnv(true_env);
}
void FallThruTo(Decoder* decoder, Control* c) {
MergeValuesInto(decoder, c);
- SetEnv(c->interface_data.end_env);
+ SetEnv(c->end_env);
}
void PopControl(Decoder* decoder, Control& block) {
if (block.is_onearmed_if()) {
- Goto(decoder, block.interface_data.false_env,
- block.interface_data.end_env);
+ Goto(decoder, block.false_env, block.end_env);
}
}
@@ -187,38 +215,36 @@ class WasmGraphBuildingInterface
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& value, Value* result) {
- result->interface_data.node =
- BUILD(Unop, opcode, value.interface_data.node, decoder->position());
+ result->node = BUILD(Unop, opcode, value.node, decoder->position());
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& lhs, const Value& rhs, Value* result) {
- result->interface_data.node =
- BUILD(Binop, opcode, lhs.interface_data.node, rhs.interface_data.node,
- decoder->position());
+ result->node =
+ BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
- result->interface_data.node = builder_->Int32Constant(value);
+ result->node = builder_->Int32Constant(value);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
- result->interface_data.node = builder_->Int64Constant(value);
+ result->node = builder_->Int64Constant(value);
}
void F32Const(Decoder* decoder, Value* result, float value) {
- result->interface_data.node = builder_->Float32Constant(value);
+ result->node = builder_->Float32Constant(value);
}
void F64Const(Decoder* decoder, Value* result, double value) {
- result->interface_data.node = builder_->Float64Constant(value);
+ result->node = builder_->Float64Constant(value);
}
void DoReturn(Decoder* decoder, Vector<Value> values) {
size_t num_values = values.size();
TFNode** buffer = GetNodes(values);
for (size_t i = 0; i < num_values; ++i) {
- buffer[i] = values[i].interface_data.node;
+ buffer[i] = values[i].node;
}
BUILD(Return, static_cast<unsigned>(values.size()), buffer);
}
@@ -226,30 +252,30 @@ class WasmGraphBuildingInterface
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<true>& operand) {
if (!ssa_env_->locals) return; // unreachable
- result->interface_data.node = ssa_env_->locals[operand.index];
+ result->node = ssa_env_->locals[operand.index];
}
void SetLocal(Decoder* decoder, const Value& value,
const LocalIndexOperand<true>& operand) {
if (!ssa_env_->locals) return; // unreachable
- ssa_env_->locals[operand.index] = value.interface_data.node;
+ ssa_env_->locals[operand.index] = value.node;
}
void TeeLocal(Decoder* decoder, const Value& value, Value* result,
const LocalIndexOperand<true>& operand) {
- result->interface_data.node = value.interface_data.node;
+ result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
- ssa_env_->locals[operand.index] = value.interface_data.node;
+ ssa_env_->locals[operand.index] = value.node;
}
void GetGlobal(Decoder* decoder, Value* result,
const GlobalIndexOperand<true>& operand) {
- result->interface_data.node = BUILD(GetGlobal, operand.index);
+ result->node = BUILD(GetGlobal, operand.index);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<true>& operand) {
- BUILD(SetGlobal, operand.index, value.interface_data.node);
+ BUILD(SetGlobal, operand.index, value.node);
}
void Unreachable(Decoder* decoder) {
@@ -259,30 +285,30 @@ class WasmGraphBuildingInterface
void Select(Decoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
TFNode* controls[2];
- BUILD(BranchNoHint, cond.interface_data.node, &controls[0], &controls[1]);
+ BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
TFNode* merge = BUILD(Merge, 2, controls);
- TFNode* vals[2] = {tval.interface_data.node, fval.interface_data.node};
+ TFNode* vals[2] = {tval.node, fval.node};
TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge);
- result->interface_data.node = phi;
+ result->node = phi;
ssa_env_->control = merge;
}
- void BreakTo(Decoder* decoder, Control* block) {
- if (block->is_loop()) {
- Goto(decoder, ssa_env_, block->interface_data.end_env);
+ void BreakTo(Decoder* decoder, uint32_t depth) {
+ Control* target = decoder->control_at(depth);
+ if (target->is_loop()) {
+ Goto(decoder, ssa_env_, target->end_env);
} else {
- MergeValuesInto(decoder, block);
+ MergeValuesInto(decoder, target);
}
}
- void BrIf(Decoder* decoder, const Value& cond, Control* block) {
+ void BrIf(Decoder* decoder, const Value& cond, uint32_t depth) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder, fenv);
fenv->SetNotMerged();
- BUILD(BranchNoHint, cond.interface_data.node, &tenv->control,
- &fenv->control);
+ BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
ssa_env_ = tenv;
- BreakTo(decoder, block);
+ BreakTo(decoder, depth);
ssa_env_ = fenv;
}
@@ -290,8 +316,7 @@ class WasmGraphBuildingInterface
const Value& key) {
SsaEnv* break_env = ssa_env_;
// Build branches to the various blocks based on the table.
- TFNode* sw =
- BUILD(Switch, operand.table_count + 1, key.interface_data.node);
+ TFNode* sw = BUILD(Switch, operand.table_count + 1, key.node);
SsaEnv* copy = Steal(decoder->zone(), break_env);
ssa_env_ = copy;
@@ -302,38 +327,38 @@ class WasmGraphBuildingInterface
ssa_env_ = Split(decoder, copy);
ssa_env_->control = (i == operand.table_count) ? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw);
- BreakTo(decoder, decoder->control_at(target));
+ BreakTo(decoder, target);
}
DCHECK(decoder->ok());
ssa_env_ = break_env;
}
void Else(Decoder* decoder, Control* if_block) {
- SetEnv(if_block->interface_data.false_env);
+ SetEnv(if_block->false_env);
}
void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
const MemoryAccessOperand<true>& operand, const Value& index,
Value* result) {
- result->interface_data.node =
- BUILD(LoadMem, type, mem_type, index.interface_data.node,
- operand.offset, operand.alignment, decoder->position());
+ result->node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
+ operand.alignment, decoder->position());
}
void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
const MemoryAccessOperand<true>& operand, const Value& index,
const Value& value) {
- BUILD(StoreMem, mem_type, index.interface_data.node, operand.offset,
- operand.alignment, value.interface_data.node, decoder->position(),
- type);
+ BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
+ value.node, decoder->position(), type);
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
- result->interface_data.node = BUILD(CurrentMemoryPages);
+ result->node = BUILD(CurrentMemoryPages);
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
- result->interface_data.node = BUILD(GrowMemory, value.interface_data.node);
+ result->node = BUILD(GrowMemory, value.node);
+ // Reload mem_size and mem_start after growing memory.
+ LoadContextIntoSsa(ssa_env_);
}
void CallDirect(Decoder* decoder, const CallFunctionOperand<true>& operand,
@@ -344,40 +369,36 @@ class WasmGraphBuildingInterface
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<true>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, index.interface_data.node, operand, args, returns, true);
+ DoCall(decoder, index.node, operand, args, returns, true);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(SimdOp, opcode, inputs);
- if (result) result->interface_data.node = node;
+ if (result) result->node = node;
}
void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
const SimdLaneOperand<true> operand, Vector<Value> inputs,
Value* result) {
TFNode** nodes = GetNodes(inputs);
- result->interface_data.node =
- BUILD(SimdLaneOp, opcode, operand.lane, nodes);
+ result->node = BUILD(SimdLaneOp, opcode, operand.lane, nodes);
}
void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
const SimdShiftOperand<true> operand, const Value& input,
Value* result) {
- TFNode* inputs[] = {input.interface_data.node};
- result->interface_data.node =
- BUILD(SimdShiftOp, opcode, operand.shift, inputs);
+ TFNode* inputs[] = {input.node};
+ result->node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
}
void Simd8x16ShuffleOp(Decoder* decoder,
const Simd8x16ShuffleOperand<true>& operand,
const Value& input0, const Value& input1,
Value* result) {
- TFNode* input_nodes[] = {input0.interface_data.node,
- input1.interface_data.node};
- result->interface_data.node =
- BUILD(Simd8x16ShuffleOp, operand.shuffle, input_nodes);
+ TFNode* input_nodes[] = {input0.node, input1.node};
+ result->node = BUILD(Simd8x16ShuffleOp, operand.shuffle, input_nodes);
}
TFNode* GetExceptionTag(Decoder* decoder,
@@ -387,47 +408,84 @@ class WasmGraphBuildingInterface
return BUILD(Int32Constant, operand.index);
}
- void Throw(Decoder* decoder, const ExceptionIndexOperand<true>& operand) {
- BUILD(Throw, GetExceptionTag(decoder, operand));
+ void Throw(Decoder* decoder, const ExceptionIndexOperand<true>& operand,
+ Control* block, const Vector<Value>& value_args) {
+ int count = value_args.length();
+ ZoneVector<TFNode*> args(count, decoder->zone());
+ for (int i = 0; i < count; ++i) {
+ args[i] = value_args[i].node;
+ }
+ BUILD(Throw, operand.index, operand.exception, vec2vec(args));
+ Unreachable(decoder);
+ EndControl(decoder, block);
}
- void Catch(Decoder* decoder, const ExceptionIndexOperand<true>& operand,
- Control* block) {
+ void CatchException(Decoder* decoder,
+ const ExceptionIndexOperand<true>& operand,
+ Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
- current_catch_ = block->interface_data.previous_catch;
- SsaEnv* catch_env = block->interface_data.try_info->catch_env;
+ current_catch_ = block->previous_catch;
+ SsaEnv* catch_env = block->try_info->catch_env;
SetEnv(catch_env);
- // Get the exception and see if wanted exception.
- TFNode* exception_as_i32 = BUILD(
- Catch, block->interface_data.try_info->exception, decoder->position());
- TFNode* exception_tag = GetExceptionTag(decoder, operand);
- TFNode* compare_i32 = BUILD(Binop, kExprI32Eq, exception_as_i32,
- exception_tag, decoder->position());
- TFNode* if_true = nullptr;
- TFNode* if_false = nullptr;
- BUILD(BranchNoHint, compare_i32, &if_true, &if_false);
- SsaEnv* false_env = Split(decoder, catch_env);
- false_env->control = if_false;
- SsaEnv* true_env = Steal(decoder->zone(), catch_env);
- true_env->control = if_true;
- block->interface_data.try_info->catch_env = false_env;
+ TFNode* compare_i32 = nullptr;
+ if (block->try_info->exception == nullptr) {
+ // Catch not applicable, no possible throws in the try
+ // block. Create dummy code so that body of catch still
+ // compiles. Note: This only happens because the current
+ // implementation only builds a landing pad if some node in the
+ // try block can (possibly) throw.
+ //
+ // TODO(kschimpf): Always generate a landing pad for a try block.
+ compare_i32 = BUILD(Int32Constant, 0);
+ } else {
+ // Get the exception and see if wanted exception.
+ TFNode* caught_tag = BUILD(GetExceptionRuntimeId);
+ TFNode* exception_tag =
+ BUILD(ConvertExceptionTagToRuntimeId, operand.index);
+ compare_i32 = BUILD(Binop, kExprI32Eq, caught_tag, exception_tag);
+ }
- // Generate code to re-throw the exception.
- DCHECK_NOT_NULL(block->interface_data.try_info->catch_env);
- SetEnv(false_env);
+ TFNode* if_catch = nullptr;
+ TFNode* if_no_catch = nullptr;
+ BUILD(BranchNoHint, compare_i32, &if_catch, &if_no_catch);
+
+ SsaEnv* if_no_catch_env = Split(decoder, ssa_env_);
+ if_no_catch_env->control = if_no_catch;
+ SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_);
+ if_catch_env->control = if_catch;
+
+ // TODO(kschimpf): Generalize to allow more catches. Will force
+ // moving no_catch code to END opcode.
+ SetEnv(if_no_catch_env);
BUILD(Rethrow);
- FallThruTo(decoder, block);
+ Unreachable(decoder);
+ EndControl(decoder, block);
- SetEnv(true_env);
- // TODO(kschimpf): Add code to pop caught exception from isolate.
+ SetEnv(if_catch_env);
+
+ if (block->try_info->exception == nullptr) {
+ // No caught value, make up filler nodes so that catch block still
+ // compiles.
+ for (Value& value : values) {
+ value.node = DefaultValue(value.type);
+ }
+ } else {
+ // TODO(kschimpf): Can't use BUILD() here, GetExceptionValues() returns
+ // TFNode** rather than TFNode*. Fix to add landing pads.
+ TFNode** caught_values = builder_->GetExceptionValues(operand.exception);
+ for (size_t i = 0, e = values.size(); i < e; ++i) {
+ values[i].node = caught_values[i];
+ }
+ }
}
void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
- Value* result) {
+ const MemoryAccessOperand<true>& operand, Value* result) {
TFNode** inputs = GetNodes(args);
- TFNode* node = BUILD(AtomicOp, opcode, inputs, decoder->position());
- if (result) result->interface_data.node = node;
+ TFNode* node = BUILD(AtomicOp, opcode, inputs, operand.alignment,
+ operand.offset, decoder->position());
+ if (result) result->node = node;
}
private:
@@ -439,13 +497,13 @@ class WasmGraphBuildingInterface
TryInfo* current_try_info(Decoder* decoder) {
return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
- ->interface_data.try_info;
+ ->try_info;
}
TFNode** GetNodes(Value* values, size_t count) {
TFNode** nodes = builder_->Buffer(count);
for (size_t i = 0; i < count; ++i) {
- nodes[i] = values[i].interface_data.node;
+ nodes[i] = values[i].node;
}
return nodes;
}
@@ -483,8 +541,13 @@ class WasmGraphBuildingInterface
}
#endif
ssa_env_ = env;
+ // TODO(wasm): Create a WasmEnv class with control, effect, mem_size and
+ // mem_start. SsaEnv can inherit from it. This way WasmEnv can be passed
+ // directly to WasmGraphBuilder instead of always copying four pointers.
builder_->set_control_ptr(&env->control);
builder_->set_effect_ptr(&env->effect);
+ builder_->set_mem_size(&env->mem_size);
+ builder_->set_mem_start(&env->mem_start);
}
TFNode* CheckForException(Decoder* decoder, TFNode* node) {
@@ -542,24 +605,21 @@ class WasmGraphBuildingInterface
void MergeValuesInto(Decoder* decoder, Control* c) {
if (!ssa_env_->go()) return;
- SsaEnv* target = c->interface_data.end_env;
+ SsaEnv* target = c->end_env;
const bool first = target->state == SsaEnv::kUnreachable;
Goto(decoder, ssa_env_, target);
- size_t avail = decoder->stack_size() - decoder->control_at(0)->stack_depth;
- size_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
- for (size_t i = start; i < c->merge.arity; ++i) {
+ uint32_t avail =
+ decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ uint32_t start = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
+ for (uint32_t i = start; i < c->merge.arity; ++i) {
auto& val = decoder->GetMergeValueFromStack(c, i);
auto& old = c->merge[i];
- DCHECK_NOT_NULL(val.interface_data.node);
- // TODO(clemensh): Remove first.
- DCHECK_EQ(first, old.interface_data.node == nullptr);
+ DCHECK_NOT_NULL(val.node);
DCHECK(val.type == old.type || val.type == kWasmVar);
- old.interface_data.node =
- first ? val.interface_data.node
- : CreateOrMergeIntoPhi(old.type, target->control,
- old.interface_data.node,
- val.interface_data.node);
+ old.node = first ? val.node
+ : CreateOrMergeIntoPhi(old.type, target->control,
+ old.node, val.node);
}
}
@@ -572,6 +632,8 @@ class WasmGraphBuildingInterface
to->locals = from->locals;
to->control = from->control;
to->effect = from->effect;
+ to->mem_size = from->mem_size;
+ to->mem_start = from->mem_start;
break;
}
case SsaEnv::kReached: { // Create a new merge.
@@ -595,6 +657,17 @@ class WasmGraphBuildingInterface
builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
}
}
+ // Merge mem_size and mem_start.
+ if (to->mem_size != from->mem_size) {
+ TFNode* vals[] = {to->mem_size, from->mem_size};
+ to->mem_size =
+ builder_->Phi(MachineRepresentation::kWord32, 2, vals, merge);
+ }
+ if (to->mem_start != from->mem_start) {
+ TFNode* vals[] = {to->mem_start, from->mem_start};
+ to->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 2,
+ vals, merge);
+ }
break;
}
case SsaEnv::kMerged: {
@@ -615,21 +688,16 @@ class WasmGraphBuildingInterface
}
// Merge locals.
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- TFNode* tnode = to->locals[i];
- TFNode* fnode = from->locals[i];
- if (builder_->IsPhiWithMerge(tnode, merge)) {
- builder_->AppendToPhi(tnode, fnode);
- } else if (tnode != fnode) {
- uint32_t count = builder_->InputCount(merge);
- TFNode** vals = builder_->Buffer(count);
- for (uint32_t j = 0; j < count - 1; j++) {
- vals[j] = tnode;
- }
- vals[count - 1] = fnode;
- to->locals[i] =
- builder_->Phi(decoder->GetLocalType(i), count, vals, merge);
- }
+ to->locals[i] = CreateOrMergeIntoPhi(decoder->GetLocalType(i), merge,
+ to->locals[i], from->locals[i]);
}
+ // Merge mem_size and mem_start.
+ to->mem_size =
+ CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
+ to->mem_size, from->mem_size);
+ to->mem_start =
+ CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), merge,
+ to->mem_start, from->mem_start);
break;
}
default:
@@ -659,17 +727,29 @@ class WasmGraphBuildingInterface
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
+ // The '+ 2' here is to be able to set mem_size and mem_start as assigned.
BitVector* assigned = WasmDecoder<true>::AnalyzeLoopAssignment(
- decoder, decoder->pc(), static_cast<int>(decoder->total_locals()),
- decoder->zone());
+ decoder, decoder->pc(), decoder->total_locals() + 2, decoder->zone());
if (decoder->failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
+ int mem_size_index = decoder->total_locals();
+ int mem_start_index = decoder->total_locals() + 1;
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
}
+ // Introduce phis for mem_size and mem_start if necessary.
+ if (assigned->Contains(mem_size_index)) {
+ env->mem_size = builder_->Phi(MachineRepresentation::kWord32, 1,
+ &env->mem_size, env->control);
+ }
+ if (assigned->Contains(mem_start_index)) {
+ env->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 1,
+ &env->mem_start, env->control);
+ }
+
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &(loop_body_env->effect),
&(loop_body_env->control));
@@ -682,6 +762,12 @@ class WasmGraphBuildingInterface
&env->locals[i], env->control);
}
+ // Conservatively introduce phis for mem_size and mem_start.
+ env->mem_size = builder_->Phi(MachineRepresentation::kWord32, 1,
+ &env->mem_size, env->control);
+ env->mem_start = builder_->Phi(MachineType::PointerRepresentation(), 1,
+ &env->mem_start, env->control);
+
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &loop_body_env->effect,
&loop_body_env->control);
@@ -693,7 +779,8 @@ class WasmGraphBuildingInterface
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- size_t size = sizeof(TFNode*) * decoder->NumLocals();
+ // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
+ size_t size = sizeof(TFNode*) * (decoder->NumLocals() + 2);
result->control = from->control;
result->effect = from->effect;
@@ -703,9 +790,13 @@ class WasmGraphBuildingInterface
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
memcpy(result->locals, from->locals, size);
+ result->mem_size = from->mem_size;
+ result->mem_start = from->mem_start;
} else {
result->state = SsaEnv::kUnreachable;
result->locals = nullptr;
+ result->mem_size = nullptr;
+ result->mem_start = nullptr;
}
return result;
@@ -721,6 +812,8 @@ class WasmGraphBuildingInterface
result->locals = from->locals;
result->control = from->control;
result->effect = from->effect;
+ result->mem_size = from->mem_size;
+ result->mem_start = from->mem_start;
from->Kill(SsaEnv::kUnreachable);
return result;
}
@@ -732,6 +825,8 @@ class WasmGraphBuildingInterface
result->control = nullptr;
result->effect = nullptr;
result->locals = nullptr;
+ result->mem_size = nullptr;
+ result->mem_start = nullptr;
return result;
}
@@ -745,7 +840,7 @@ class WasmGraphBuildingInterface
TFNode** return_nodes = nullptr;
arg_nodes[0] = index_node;
for (int i = 0; i < param_count; ++i) {
- arg_nodes[i + 1] = args[i].interface_data.node;
+ arg_nodes[i + 1] = args[i].node;
}
if (is_indirect) {
builder_->CallIndirect(operand.index, arg_nodes, &return_nodes,
@@ -756,8 +851,11 @@ class WasmGraphBuildingInterface
}
int return_count = static_cast<int>(operand.sig->return_count());
for (int i = 0; i < return_count; ++i) {
- returns[i].interface_data.node = return_nodes[i];
+ returns[i].node = return_nodes[i];
}
+ // The invoked function could have used grow_memory, so we need to
+ // reload mem_size and mem_start
+ LoadContextIntoSsa(ssa_env_);
}
};
@@ -798,11 +896,7 @@ DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
const wasm::WasmModule* module,
FunctionBody& body, bool is_wasm,
Counters* counters) {
- auto size_histogram = is_wasm ? counters->wasm_wasm_function_size_bytes()
- : counters->wasm_asm_function_size_bytes();
- // TODO(bradnelson): Improve histogram handling of ptrdiff_t.
- CHECK((body.end - body.start) >= 0);
- size_histogram->AddSample(static_cast<int>(body.end - body.start));
+ CHECK_LE(0, body.end - body.start);
auto time_counter = is_wasm ? counters->wasm_decode_wasm_function_time()
: counters->wasm_decode_asm_function_time();
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
@@ -909,7 +1003,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
os << RawOpcodeName(opcode) << ",";
- for (size_t j = 1; j < length; ++j) {
+ for (unsigned j = 1; j < length; ++j) {
os << " 0x" << AsHex(i.pc()[j], 2) << ",";
}
@@ -979,10 +1073,10 @@ BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
Decoder decoder(start, end);
return WasmDecoder<true>::AnalyzeLoopAssignment(
- &decoder, start, static_cast<int>(num_locals), zone);
+ &decoder, start, static_cast<uint32_t>(num_locals), zone);
}
-#undef TRACE
+#undef BUILD
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 0c44b87ec4..a244e294c8 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -41,12 +41,6 @@ static inline FunctionBody FunctionBodyForTesting(const byte* start,
return {nullptr, 0, start, end};
}
-// A {DecodeResult} only stores the failure / success status, but no data. Thus
-// we use {nullptr_t} as data value, such that the only valid data stored in
-// this type is a nullptr.
-// Storing {void} would require template specialization.
-using DecodeResult = Result<std::nullptr_t>;
-
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
FunctionBody& body);
@@ -195,6 +189,12 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
}
bool has_next() { return pc_ < end_; }
+
+ WasmOpcode prefixed_opcode() {
+ byte prefix = read_u8<false>(pc_, "expected prefix");
+ byte index = read_u8<false>(pc_ + 1, "expected index");
+ return static_cast<WasmOpcode>(prefix << 8 | index);
+ }
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index 0f3da2f383..ab179f3a9d 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -6,19 +6,9 @@
#include "src/wasm/leb-helper.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
void LocalDeclEncoder::Prepend(Zone* zone, const byte** start,
const byte** end) const {
@@ -60,3 +50,7 @@ size_t LocalDeclEncoder::Size() const {
for (auto p : local_decls) size += 1 + LEBHelper::sizeof_u32v(p.first);
return size;
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
new file mode 100644
index 0000000000..d6e7891fc0
--- /dev/null
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/memory-tracing.h"
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+void TraceMemoryOperation(ExecutionEngine engine, bool is_store,
+ MachineRepresentation rep, uint32_t addr,
+ int func_index, int position, uint8_t* mem_start) {
+ EmbeddedVector<char, 64> value;
+ switch (rep) {
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ ReadLittleEndianValue<ctype1>(mem_start + addr), \
+ ReadLittleEndianValue<ctype2>(mem_start + addr)); \
+ break;
+ TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
+ TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
+ TRACE_TYPE(kWord32, "i32", "%d / %08x", uint32_t, uint32_t)
+ TRACE_TYPE(kWord64, "i64", "%" PRId64 " / %016" PRIx64, uint64_t, uint64_t)
+ TRACE_TYPE(kFloat32, "f32", "%f / %08x", float, uint32_t)
+ TRACE_TYPE(kFloat64, "f64", "%f / %016" PRIx64, double, uint64_t)
+#undef TRACE_TYPE
+ default:
+ SNPrintF(value, "???");
+ }
+ char eng_c = '?';
+ switch (engine) {
+ case kWasmCompiled:
+ eng_c = 'C';
+ break;
+ case kWasmInterpreted:
+ eng_c = 'I';
+ break;
+ }
+ printf("%c %8d+0x%-6x %s @%08x %s\n", eng_c, func_index, position,
+ is_store ? "store" : "read ", addr, value.start());
+}
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
new file mode 100644
index 0000000000..7d7bc288c0
--- /dev/null
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MEMORY_TRACING_H
+#define V8_MEMORY_TRACING_H
+
+#include <cstdint>
+
+#include "src/machine-type.h"
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+enum ExecutionEngine { kWasmCompiled, kWasmInterpreted };
+
+// Callback for tracing a memory operation for debugging.
+// Triggered by --wasm-trace-memory.
+void TraceMemoryOperation(ExecutionEngine, bool is_store, MachineRepresentation,
+ uint32_t addr, int func_index, int position,
+ uint8_t* mem_start);
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
+
+#endif /* !V8_MEMORY_TRACING_H */
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index e1523e17d9..e42c139ce1 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -2,20 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <src/wasm/module-compiler.h>
+#include "src/wasm/module-compiler.h"
#include <atomic>
+#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
+#include "src/base/template-utils.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/property-descriptor.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-memory.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#define TRACE(...) \
@@ -33,12 +38,866 @@
if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE_STREAMING(...) \
+ do { \
+ if (FLAG_trace_wasm_streaming) PrintF(__VA_ARGS__); \
+ } while (false)
static const int kInvalidSigIndex = -1;
namespace v8 {
namespace internal {
namespace wasm {
+// A class compiling an entire module.
+class ModuleCompiler {
+ public:
+ ModuleCompiler(Isolate* isolate, WasmModule* module,
+ Handle<Code> centry_stub);
+
+ // The actual runnable task that performs compilations in the background.
+ class CompilationTask : public CancelableTask {
+ public:
+ ModuleCompiler* compiler_;
+ explicit CompilationTask(ModuleCompiler* compiler)
+ : CancelableTask(&compiler->background_task_manager_),
+ compiler_(compiler) {}
+
+ void RunInternal() override {
+ while (compiler_->executed_units_.CanAcceptWork() &&
+ compiler_->FetchAndExecuteCompilationUnit()) {
+ }
+
+ compiler_->OnBackgroundTaskStopped();
+ }
+ };
+
+ // The CompilationUnitBuilder builds compilation units and stores them in an
+ // internal buffer. The buffer is moved into the working queue of the
+ // ModuleCompiler when {Commit} is called.
+ class CompilationUnitBuilder {
+ public:
+ explicit CompilationUnitBuilder(ModuleCompiler* compiler)
+ : compiler_(compiler) {}
+
+ ~CompilationUnitBuilder() { DCHECK(units_.empty()); }
+
+ void AddUnit(compiler::ModuleEnv* module_env, const WasmFunction* function,
+ uint32_t buffer_offset, Vector<const uint8_t> bytes,
+ WasmName name) {
+ units_.emplace_back(new compiler::WasmCompilationUnit(
+ compiler_->isolate_, module_env,
+ wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
+ bytes.end()},
+ name, function->func_index, compiler_->centry_stub_,
+ compiler_->counters()));
+ }
+
+ void Commit() {
+ {
+ base::LockGuard<base::Mutex> guard(
+ &compiler_->compilation_units_mutex_);
+ compiler_->compilation_units_.insert(
+ compiler_->compilation_units_.end(),
+ std::make_move_iterator(units_.begin()),
+ std::make_move_iterator(units_.end()));
+ }
+ units_.clear();
+ }
+
+ void Clear() { units_.clear(); }
+
+ private:
+ ModuleCompiler* compiler_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
+ };
+
+ class CodeGenerationSchedule {
+ public:
+ explicit CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator,
+ size_t max_memory = 0);
+
+ void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
+
+ bool IsEmpty() const { return schedule_.empty(); }
+
+ std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
+
+ bool CanAcceptWork() const;
+
+ bool ShouldIncreaseWorkload() const;
+
+ void EnableThrottling() { throttle_ = true; }
+
+ private:
+ size_t GetRandomIndexInSchedule();
+
+ base::RandomNumberGenerator* random_number_generator_ = nullptr;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
+ const size_t max_memory_;
+ bool throttle_ = false;
+ base::AtomicNumber<size_t> allocated_memory_{0};
+ };
+
+ Counters* counters() const { return async_counters_.get(); }
+
+ // Run by each compilation task and by the main thread (i.e. in both
+ // foreground and background threads). The no_finisher_callback is called
+ // within the result_mutex_ lock when no finishing task is running, i.e. when
+ // the finisher_is_running_ flag is not set.
+ bool FetchAndExecuteCompilationUnit(
+ std::function<void()> no_finisher_callback = nullptr);
+
+ void OnBackgroundTaskStopped();
+
+ void EnableThrottling() { executed_units_.EnableThrottling(); }
+
+ bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
+
+ bool ShouldIncreaseWorkload() const {
+ return executed_units_.ShouldIncreaseWorkload();
+ }
+
+ size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
+ const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env);
+
+ void RestartCompilationTasks();
+
+ size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ bool IsFinisherRunning() const { return finisher_is_running_; }
+
+ void SetFinisherIsRunning(bool value);
+
+ MaybeHandle<Code> FinishCompilationUnit(ErrorThrower* thrower,
+ int* func_index);
+
+ void CompileInParallel(const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void CompileSequentially(const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void ValidateSequentially(const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ ErrorThrower* thrower);
+
+ static MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ Isolate* isolate, ErrorThrower* thrower,
+ std::unique_ptr<WasmModule> module, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ private:
+ MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
+ ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ Isolate* isolate_;
+ WasmModule* module_;
+ const std::shared_ptr<Counters> async_counters_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
+ compilation_units_;
+ base::Mutex compilation_units_mutex_;
+ CodeGenerationSchedule executed_units_;
+ base::Mutex result_mutex_;
+ const size_t num_background_tasks_;
+ // This flag should only be set while holding result_mutex_.
+ bool finisher_is_running_ = false;
+ CancelableTaskManager background_task_manager_;
+ size_t stopped_compilation_tasks_ = 0;
+ base::Mutex tasks_mutex_;
+ Handle<Code> centry_stub_;
+};
+
+class JSToWasmWrapperCache {
+ public:
+ void SetContextAddress(Address context_address) {
+ // Prevent to have different context addresses in the cache.
+ DCHECK(code_cache_.empty());
+ context_address_ = context_address;
+ }
+
+ Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
+ wasm::WasmModule* module,
+ Handle<Code> wasm_code,
+ uint32_t index) {
+ const wasm::WasmFunction* func = &module->functions[index];
+ int cached_idx = sig_map_.Find(func->sig);
+ if (cached_idx >= 0) {
+ Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
+ // Now patch the call to wasm code.
+ for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
+ DCHECK(!it.done());
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION ||
+ target->kind() == Code::WASM_TO_JS_FUNCTION ||
+ target->builtin_index() == Builtins::kIllegal ||
+ target->builtin_index() == Builtins::kWasmCompileLazy) {
+ it.rinfo()->set_target_address(isolate,
+ wasm_code->instruction_start());
+ break;
+ }
+ }
+ return code;
+ }
+
+ Handle<Code> code = compiler::CompileJSToWasmWrapper(
+ isolate, module, wasm_code, index, context_address_);
+ uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
+ DCHECK_EQ(code_cache_.size(), new_cache_idx);
+ USE(new_cache_idx);
+ code_cache_.push_back(code);
+ return code;
+ }
+
+ private:
+ // sig_map_ maps signatures to an index in code_cache_.
+ wasm::SignatureMap sig_map_;
+ std::vector<Handle<Code>> code_cache_;
+ Address context_address_ = nullptr;
+};
+
+// A helper class to simplify instantiating a module from a compiled module.
+// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
+// etc.
+class InstanceBuilder {
+ public:
+ InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory,
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback);
+
+ // Build an instance, in all of its glory.
+ MaybeHandle<WasmInstanceObject> Build();
+
+ private:
+ // Represents the initialized state of a table.
+ struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_wrappers; // JSFunctions exported
+ Handle<FixedArray> function_table; // internal code array
+ Handle<FixedArray> signature_table; // internal sig array
+ };
+
+ // A pre-evaluated value to use in import binding.
+ struct SanitizedImport {
+ Handle<String> module_name;
+ Handle<String> import_name;
+ Handle<Object> value;
+ };
+
+ Isolate* isolate_;
+ WasmModule* const module_;
+ const std::shared_ptr<Counters> async_counters_;
+ ErrorThrower* thrower_;
+ Handle<WasmModuleObject> module_object_;
+ MaybeHandle<JSReceiver> ffi_;
+ MaybeHandle<JSArrayBuffer> memory_;
+ Handle<JSArrayBuffer> globals_;
+ Handle<WasmCompiledModule> compiled_module_;
+ std::vector<TableInstance> table_instances_;
+ std::vector<Handle<JSFunction>> js_wrappers_;
+ JSToWasmWrapperCache js_to_wasm_cache_;
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
+ std::vector<SanitizedImport> sanitized_imports_;
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+// Helper routines to print out errors with imports.
+#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
+ void Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name, Handle<String> import_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
+ index, module_name->ToCString().get(), \
+ import_name->ToCString().get(), error); \
+ } \
+ \
+ MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
+ module_name->ToCString().get(), error); \
+ return MaybeHandle<Object>(); \
+ }
+
+ ERROR_THROWER_WITH_MESSAGE(LinkError)
+ ERROR_THROWER_WITH_MESSAGE(TypeError)
+
+ // Look up an import value in the {ffi_} object.
+ MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+ Handle<String> import_name);
+
+ // Look up an import value in the {ffi_} object specifically for linking an
+ // asm.js module. This only performs non-observable lookups, which allows
+ // falling back to JavaScript proper (and hence re-executing all lookups) if
+ // module instantiation fails.
+ MaybeHandle<Object> LookupImportAsm(uint32_t index,
+ Handle<String> import_name);
+
+ uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
+
+ // Load data segments into the memory.
+ void LoadDataSegments(Address mem_addr, size_t mem_size);
+
+ void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
+
+ void SanitizeImports();
+
+ Handle<FixedArray> SetupWasmToJSImportsTable(
+ Handle<WasmInstanceObject> instance);
+
+ // Process the imports, including functions, tables, globals, and memory, in
+ // order, loading them from the {ffi_} object. Returns the number of imported
+ // functions.
+ int ProcessImports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance);
+
+ template <typename T>
+ T* GetRawGlobalPtr(WasmGlobal& global);
+
+ // Process initialization of globals.
+ void InitGlobals();
+
+ // Allocate memory for a module instance as a new JSArrayBuffer.
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
+
+ bool NeedsWrappers() const;
+
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<WasmInstanceObject> instance,
+ Handle<WasmCompiledModule> compiled_module);
+
+ void InitializeTables(Handle<WasmInstanceObject> instance,
+ CodeSpecialization* code_specialization);
+
+ void LoadTableSegments(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance);
+};
+
+// TODO(titzer): move to wasm-objects.cc
+static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ // If a link to shared memory instances exists, update the list of memory
+ // instances before the instance is destroyed.
+ WasmCompiledModule* compiled_module = owner->compiled_module();
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
+ DCHECK(compiled_module->has_weak_wasm_module());
+ WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
+
+ if (trap_handler::UseTrapHandler()) {
+ Handle<FixedArray> code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+ }
+ }
+ }
+
+ // Since the order of finalizers is not guaranteed, it can be the case
+ // that {instance->compiled_module()->module()}, which is a
+ // {Managed<WasmModule>} has been collected earlier in this GC cycle.
+ // Weak references to this instance won't be cleared until
+ // the next GC cycle, so we need to manually break some links (such as
+ // the weak references from {WasmMemoryObject::instances}.
+ if (owner->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
+ Handle<WasmInstanceObject> instance(owner, isolate);
+ WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ }
+
+ // weak_wasm_module may have been cleared, meaning the module object
+ // was GC-ed. In that case, there won't be any new instances created,
+ // and we don't need to maintain the links between instances.
+ if (!weak_wasm_module->cleared()) {
+ WasmModuleObject* wasm_module =
+ WasmModuleObject::cast(weak_wasm_module->value());
+ WasmCompiledModule* current_template = wasm_module->compiled_module();
+
+ TRACE("chain before {\n");
+ TRACE_CHAIN(current_template);
+ TRACE("}\n");
+
+ DCHECK(!current_template->has_weak_prev_instance());
+ WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
+ WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
+
+ if (current_template == compiled_module) {
+ if (next == nullptr) {
+ WasmCompiledModule::Reset(isolate, compiled_module);
+ } else {
+ WasmCompiledModule* next_compiled_module =
+ WasmCompiledModule::cast(next->value());
+ WasmModuleObject::cast(wasm_module)
+ ->set_compiled_module(next_compiled_module);
+ DCHECK_NULL(prev);
+ next_compiled_module->reset_weak_prev_instance();
+ }
+ } else {
+ DCHECK(!(prev == nullptr && next == nullptr));
+ // the only reason prev or next would be cleared is if the
+ // respective objects got collected, but if that happened,
+ // we would have relinked the list.
+ if (prev != nullptr) {
+ DCHECK(!prev->cleared());
+ if (next == nullptr) {
+ WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
+ } else {
+ WasmCompiledModule::cast(prev->value())
+ ->set_ptr_to_weak_next_instance(next);
+ }
+ }
+ if (next != nullptr) {
+ DCHECK(!next->cleared());
+ if (prev == nullptr) {
+ WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+ } else {
+ WasmCompiledModule::cast(next->value())
+ ->set_ptr_to_weak_prev_instance(prev);
+ }
+ }
+ }
+ TRACE("chain after {\n");
+ TRACE_CHAIN(wasm_module->compiled_module());
+ TRACE("}\n");
+ }
+ compiled_module->reset_weak_owning_instance();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ TRACE("}\n");
+}
+
+bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
+ if (bytes.start() == nullptr || bytes.length() == 0) return false;
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), true, kWasmOrigin);
+ return result.ok();
+}
+
+MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kAsmJsOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return ModuleCompiler::CompileToModuleObject(
+ isolate, thrower, std::move(result.val), bytes, asm_js_script,
+ asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes) {
+ // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
+
+ ModuleResult result = SyncDecodeWasmModule(
+ isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return ModuleCompiler::CompileToModuleObject(
+ isolate, thrower, std::move(result.val), bytes_copy, Handle<Script>(),
+ Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
+ &InstanceFinalizer);
+ return builder.Build();
+}
+
+MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
+ MaybeHandle<WasmModuleObject> module = SyncCompile(isolate, thrower, bytes);
+ DCHECK_EQ(thrower->error(), module.is_null());
+ if (module.is_null()) return {};
+
+ return SyncInstantiate(isolate, thrower, module.ToHandleChecked(),
+ Handle<JSReceiver>::null(),
+ Handle<JSArrayBuffer>::null());
+}
+
+void RejectPromise(Isolate* isolate, Handle<Context> context,
+ ErrorThrower& thrower, Handle<JSPromise> promise) {
+ Local<Promise::Resolver> resolver =
+ Utils::PromiseToLocal(promise).As<Promise::Resolver>();
+ auto maybe = resolver->Reject(Utils::ToLocal(context),
+ Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
+}
+
+void ResolvePromise(Isolate* isolate, Handle<Context> context,
+ Handle<JSPromise> promise, Handle<Object> result) {
+ Local<Promise::Resolver> resolver =
+ Utils::PromiseToLocal(promise).As<Promise::Resolver>();
+ auto maybe =
+ resolver->Resolve(Utils::ToLocal(context), Utils::ToLocal(result));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
+}
+
+void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ RejectPromise(isolate, handle(isolate->context()), thrower, promise);
+ return;
+ }
+ ResolvePromise(isolate, handle(isolate->context()), promise,
+ instance_object.ToHandleChecked());
+}
+
+void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes) {
+ if (!FLAG_wasm_async_compilation) {
+ ErrorThrower thrower(isolate, "WasmCompile");
+ // Compile the module.
+ MaybeHandle<WasmModuleObject> module_object =
+ SyncCompile(isolate, &thrower, bytes);
+ if (thrower.error()) {
+ RejectPromise(isolate, handle(isolate->context()), thrower, promise);
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ ResolvePromise(isolate, handle(isolate->context()), promise, module);
+ return;
+ }
+
+ if (FLAG_wasm_test_streaming) {
+ std::shared_ptr<StreamingDecoder> streaming_decoder =
+ isolate->wasm_compilation_manager()->StartStreamingCompilation(
+ isolate, handle(isolate->context()), promise);
+ streaming_decoder->OnBytesReceived(bytes.module_bytes());
+ streaming_decoder->Finish();
+ return;
+ }
+ // Make a copy of the wire bytes in case the user program changes them
+ // during asynchronous compilation.
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ isolate->wasm_compilation_manager()->StartAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(), handle(isolate->context()),
+ promise);
+}
+
+Handle<Code> CompileLazy(Isolate* isolate) {
+ HistogramTimerScope lazy_time_scope(
+ isolate->counters()->wasm_lazy_compilation_time());
+
+ // Find the wasm frame which triggered the lazy compile, to get the wasm
+ // instance.
+ StackFrameIterator it(isolate);
+ // First frame: C entry stub.
+ DCHECK(!it.done());
+ DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
+ it.Advance();
+ // Second frame: WasmCompileLazy builtin.
+ DCHECK(!it.done());
+ Handle<Code> lazy_compile_code(it.frame()->LookupCode(), isolate);
+ DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_compile_code->builtin_index());
+ Handle<WasmInstanceObject> instance;
+ Handle<FixedArray> exp_deopt_data;
+ int func_index = -1;
+ if (lazy_compile_code->deoptimization_data()->length() > 0) {
+ // Then it's an indirect call or via JS->wasm wrapper.
+ DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
+ exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
+ auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
+ instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
+ func_index = Smi::ToInt(exp_deopt_data->get(1));
+ }
+ it.Advance();
+ // Third frame: The calling wasm code or js-to-wasm wrapper.
+ DCHECK(!it.done());
+ DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
+ Handle<Code> caller_code = handle(it.frame()->LookupCode(), isolate);
+ if (it.frame()->is_js_to_wasm()) {
+ DCHECK(!instance.is_null());
+ } else if (instance.is_null()) {
+ // Then this is a direct call (otherwise we would have attached the instance
+ // via deopt data to the lazy compile stub). Just use the instance of the
+ // caller.
+ instance =
+ handle(WasmInstanceObject::GetOwningInstance(*caller_code), isolate);
+ }
+ int offset =
+ static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
+ // Only patch the caller code if this is *no* indirect call.
+ // exp_deopt_data will be null if the called function is not exported at all,
+ // and its length will be <= 2 if all entries in tables were already patched.
+ // Note that this check is conservative: If the first call to an exported
+ // function is direct, we will just patch the export tables, and only on the
+ // second call we will patch the caller.
+ bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
+ exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
+
+ Handle<Code> compiled_code = WasmCompiledModule::CompileLazy(
+ isolate, instance, caller_code, offset, func_index, patch_caller);
+ if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
+ // See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
+ // <export_table, index> followed by undefined values.
+ // Use this information here to patch all export tables.
+ DCHECK_EQ(0, exp_deopt_data->length() % 2);
+ for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
+ if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
+ FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
+ DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
+ exp_table->set(exp_index, *compiled_code);
+ }
+ // After processing, remove the list of exported entries, such that we don't
+ // do the patching redundantly.
+ Handle<FixedArray> new_deopt_data =
+ isolate->factory()->CopyFixedArrayUpTo(exp_deopt_data, 2, TENURED);
+ lazy_compile_code->set_deoptimization_data(*new_deopt_data);
+ }
+
+ return compiled_code;
+}
+
+compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ DisallowHeapAllocation no_gc;
+ WasmModule* module = compiled_module->module();
+
+ std::vector<GlobalHandleAddress> function_tables;
+ std::vector<GlobalHandleAddress> signature_tables;
+ std::vector<SignatureMap*> signature_maps;
+
+ int num_function_tables = static_cast<int>(module->function_tables.size());
+ for (int i = 0; i < num_function_tables; ++i) {
+ FixedArray* ft = compiled_module->ptr_to_function_tables();
+ FixedArray* st = compiled_module->ptr_to_signature_tables();
+
+ // TODO(clemensh): defer these handles for concurrent compilation.
+ function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
+ signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
+ signature_maps.push_back(&module->function_tables[i].map);
+ }
+
+ std::vector<Handle<Code>> empty_code;
+
+ compiler::ModuleEnv result = {
+ module, // --
+ function_tables, // --
+ signature_tables, // --
+ signature_maps, // --
+ empty_code, // --
+ BUILTIN_CODE(isolate, WasmCompileLazy), // --
+ reinterpret_cast<uintptr_t>( // --
+ compiled_module->GetGlobalsStartOrNull()) // --
+ };
+ return result;
+}
+
+void LazyCompilationOrchestrator::CompileFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
+ Code::WASM_FUNCTION) {
+ return;
+ }
+
+ compiler::ModuleEnv module_env =
+ CreateModuleEnvFromCompiledModule(isolate, compiled_module);
+
+ const uint8_t* module_start = compiled_module->module_bytes()->GetChars();
+
+ const WasmFunction* func = &module_env.module->functions[func_index];
+ FunctionBody body{func->sig, func->code.offset(),
+ module_start + func->code.offset(),
+ module_start + func->code.end_offset()};
+ // TODO(wasm): Refactor this to only get the name if it is really needed for
+ // tracing / debugging.
+ std::string func_name;
+ {
+ WasmName name = Vector<const char>::cast(
+ compiled_module->GetRawFunctionName(func_index));
+ // Copy to std::string, because the underlying string object might move on
+ // the heap.
+ func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ }
+ ErrorThrower thrower(isolate, "WasmLazyCompile");
+ compiler::WasmCompilationUnit unit(isolate, &module_env, body,
+ CStrVector(func_name.c_str()), func_index,
+ CEntryStub(isolate, 1).GetCode());
+ unit.ExecuteCompilation();
+ MaybeHandle<Code> maybe_code = unit.FinishCompilation(&thrower);
+
+ // If there is a pending error, something really went wrong. The module was
+ // verified before starting execution with lazy compilation.
+ // This might be OOM, but then we cannot continue execution anyway.
+ // TODO(clemensh): According to the spec, we can actually skip validation at
+ // module creation time, and return a function that always traps here.
+ CHECK(!thrower.error());
+ Handle<Code> code = maybe_code.ToHandleChecked();
+
+ Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
+ Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+ // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
+ deopt_data->set(0, *weak_instance);
+ deopt_data->set(1, Smi::FromInt(func_index));
+ code->set_deoptimization_data(*deopt_data);
+
+ DCHECK_EQ(Builtins::kWasmCompileLazy,
+ Code::cast(compiled_module->code_table()->get(func_index))
+ ->builtin_index());
+ compiled_module->code_table()->set(func_index, *code);
+
+ // Now specialize the generated code for this instance.
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ code_specialization.RelocateDirectCalls(instance);
+ code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ auto counters = isolate->counters();
+ counters->wasm_lazily_compiled_functions()->Increment();
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+}
+
+int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
+ int offset) {
+ DCHECK(!iterator.done());
+ int byte_pos;
+ do {
+ byte_pos = iterator.source_position().ScriptOffset();
+ iterator.Advance();
+ } while (!iterator.done() && iterator.code_offset() <= offset);
+ return byte_pos;
+}
+
+Handle<Code> LazyCompilationOrchestrator::CompileLazy(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
+ int call_offset, int exported_func_index, bool patch_caller) {
+ struct NonCompiledFunction {
+ int offset;
+ int func_index;
+ };
+ std::vector<NonCompiledFunction> non_compiled_functions;
+ int func_to_return_idx = exported_func_index;
+ Decoder decoder(nullptr, nullptr);
+ bool is_js_to_wasm = caller->kind() == Code::JS_TO_WASM_FUNCTION;
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+
+ if (is_js_to_wasm) {
+ non_compiled_functions.push_back({0, exported_func_index});
+ } else if (patch_caller) {
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString* module_bytes = compiled_module->module_bytes();
+ SourcePositionTableIterator source_pos_iterator(
+ caller->SourcePositionTable());
+ DCHECK_EQ(2, caller->deoptimization_data()->length());
+ int caller_func_index = Smi::ToInt(caller->deoptimization_data()->get(1));
+ const byte* func_bytes =
+ module_bytes->GetChars() +
+ compiled_module->module()->functions[caller_func_index].code.offset();
+ for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ Code* callee =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
+ // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
+ // (depending on the bool) against limits of T and then static_casts.
+ size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
+ DCHECK_GE(kMaxInt, offset_l);
+ int offset = static_cast<int>(offset_l);
+ int byte_pos =
+ AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+ int called_func_index =
+ ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
+ non_compiled_functions.push_back({offset, called_func_index});
+ // Call offset one instruction after the call. Remember the last called
+ // function before that offset.
+ if (offset < call_offset) func_to_return_idx = called_func_index;
+ }
+ }
+
+ // TODO(clemensh): compile all functions in non_compiled_functions in
+ // background, wait for func_to_return_idx.
+ CompileFunction(isolate, instance, func_to_return_idx);
+
+ if (is_js_to_wasm || patch_caller) {
+ DisallowHeapAllocation no_gc;
+ // Now patch the code object with all functions which are now compiled.
+ int idx = 0;
+ for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ Code* callee =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
+ DCHECK_GT(non_compiled_functions.size(), idx);
+ int called_func_index = non_compiled_functions[idx].func_index;
+ // Check that the callee agrees with our assumed called_func_index.
+ DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
+ Smi::ToInt(callee->deoptimization_data()->get(1)) ==
+ called_func_index);
+ if (is_js_to_wasm) {
+ DCHECK_EQ(func_to_return_idx, called_func_index);
+ } else {
+ DCHECK_EQ(non_compiled_functions[idx].offset,
+ it.rinfo()->pc() - caller->instruction_start());
+ }
+ ++idx;
+ Handle<Code> callee_compiled(
+ Code::cast(compiled_module->code_table()->get(called_func_index)));
+ if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
+ DCHECK_NE(func_to_return_idx, called_func_index);
+ continue;
+ }
+ DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
+ it.rinfo()->set_target_address(isolate,
+ callee_compiled->instruction_start());
+ }
+ DCHECK_EQ(non_compiled_functions.size(), idx);
+ }
+
+ Code* ret =
+ Code::cast(compiled_module->code_table()->get(func_to_return_idx));
+ DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
+ return handle(ret, isolate);
+}
+
ModuleCompiler::CodeGenerationSchedule::CodeGenerationSchedule(
base::RandomNumberGenerator* random_number_generator, size_t max_memory)
: random_number_generator_(random_number_generator),
@@ -82,11 +941,10 @@ size_t ModuleCompiler::CodeGenerationSchedule::GetRandomIndexInSchedule() {
return index;
}
-ModuleCompiler::ModuleCompiler(Isolate* isolate,
- std::unique_ptr<WasmModule> module,
+ModuleCompiler::ModuleCompiler(Isolate* isolate, WasmModule* module,
Handle<Code> centry_stub)
: isolate_(isolate),
- module_(std::move(module)),
+ module_(module),
async_counters_(isolate->async_counters()),
executed_units_(
isolate->random_number_generator(),
@@ -101,18 +959,6 @@ ModuleCompiler::ModuleCompiler(Isolate* isolate,
centry_stub_(centry_stub) {}
// The actual runnable task that performs compilations in the background.
-ModuleCompiler::CompilationTask::CompilationTask(ModuleCompiler* compiler)
- : CancelableTask(&compiler->background_task_manager_),
- compiler_(compiler) {}
-
-void ModuleCompiler::CompilationTask::RunInternal() {
- while (compiler_->executed_units_.CanAcceptWork() &&
- compiler_->FetchAndExecuteCompilationUnit()) {
- }
-
- compiler_->OnBackgroundTaskStopped();
-}
-
void ModuleCompiler::OnBackgroundTaskStopped() {
base::LockGuard<base::Mutex> guard(&tasks_mutex_);
++stopped_compilation_tasks_;
@@ -323,16 +1169,16 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
}
}
+// static
MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
+ Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
-
- TimedHistogramScope wasm_compile_module_time_scope(
- module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
- : counters()->wasm_compile_asm_module_time());
- return CompileToModuleObjectInternal(
- isolate_, thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes);
+ Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
+ ModuleCompiler compiler(isolate, module.get(), centry_stub);
+ return compiler.CompileToModuleObjectInternal(thrower, std::move(module),
+ wire_bytes, asm_js_script,
+ asm_js_offset_table_bytes);
}
namespace {
@@ -491,11 +1337,11 @@ bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
using WasmInstanceMap =
IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-Handle<Code> UnwrapOrCompileImportWrapper(
+Handle<Code> UnwrapExportOrCompileImportWrapper(
Isolate* isolate, int index, FunctionSig* sig, Handle<JSReceiver> target,
- Handle<String> module_name, MaybeHandle<String> import_name,
- ModuleOrigin origin, WasmInstanceMap* imported_instances) {
- WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
+ ModuleOrigin origin, WasmInstanceMap* imported_instances,
+ Handle<FixedArray> js_imports_table, Handle<WasmInstanceObject> instance) {
+ WasmFunction* other_func = GetWasmFunctionForExport(isolate, target);
if (other_func) {
if (!sig->Equals(other_func->sig)) return Handle<Code>::null();
// Signature matched. Unwrap the import wrapper and return the raw wasm
@@ -504,12 +1350,34 @@ Handle<Code> UnwrapOrCompileImportWrapper(
Handle<WasmInstanceObject> imported_instance(
Handle<WasmExportedFunction>::cast(target)->instance(), isolate);
imported_instances->Set(imported_instance, imported_instance);
- return UnwrapImportWrapper(target);
+ Handle<Code> wasm_code =
+ UnwrapExportWrapper(Handle<JSFunction>::cast(target));
+ // Create a WasmToWasm wrapper to replace the current wasm context with
+ // the imported_instance one, in order to access the right memory.
+ // If the imported instance does not have memory, avoid the wrapper.
+ // TODO(wasm): Avoid the wrapper also if instance memory and imported
+ // instance share the same memory object.
+ bool needs_wasm_to_wasm_wrapper = imported_instance->has_memory_object();
+ if (!needs_wasm_to_wasm_wrapper) return wasm_code;
+ Address new_wasm_context =
+ reinterpret_cast<Address>(imported_instance->wasm_context());
+ Handle<Code> wrapper_code = compiler::CompileWasmToWasmWrapper(
+ isolate, wasm_code, sig, index, new_wasm_context);
+ // Set the deoptimization data for the WasmToWasm wrapper.
+ // TODO(wasm): Remove the deoptimization data when we will use tail calls
+ // for WasmToWasm wrappers.
+ Factory* factory = isolate->factory();
+ Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
+ Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+ deopt_data->set(0, *weak_link);
+ deopt_data->set(1, Smi::FromInt(index));
+ wrapper_code->set_deoptimization_data(*deopt_data);
+ return wrapper_code;
}
// No wasm function or being debugged. Compile a new wrapper for the new
// signature.
- return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
- module_name, import_name, origin);
+ return compiler::CompileWasmToJSWrapper(isolate, target, sig, index, origin,
+ js_imports_table);
}
double MonotonicallyIncreasingTimeInMs() {
@@ -553,8 +1421,6 @@ std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
signature_maps, // --
empty_code, // --
illegal_builtin, // --
- 0, // --
- 0, // --
0 // --
};
return std::unique_ptr<compiler::ModuleEnv>(new compiler::ModuleEnv(result));
@@ -581,12 +1447,21 @@ void ReopenHandles(Isolate* isolate, const std::vector<Handle<T>>& vec) {
} // namespace
MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
+ ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
- Factory* factory = isolate->factory();
+ TimedHistogramScope wasm_compile_module_time_scope(
+ module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
+ : counters()->wasm_compile_asm_module_time());
+ // The {module> parameter is passed in to transfer ownership of the WasmModule
+ // to this function. The WasmModule itself existed already as an instance
+ // variable of the ModuleCompiler. We check here that the parameter and the
+ // instance variable actually point to the same object.
+ DCHECK_EQ(module.get(), module_);
// Check whether lazy compilation is enabled for this module.
- bool lazy_compile = compile_lazy(module_.get());
+ bool lazy_compile = compile_lazy(module_);
+
+ Factory* factory = isolate_->factory();
// If lazy compile: Initialize the code table with the lazy compile builtin.
// Otherwise: Initialize with the illegal builtin. All call sites will be
@@ -595,7 +1470,7 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
? BUILTIN_CODE(isolate_, WasmCompileLazy)
: BUILTIN_CODE(isolate_, Illegal);
- auto env = CreateDefaultModuleEnv(isolate, module_.get(), init_builtin);
+ auto env = CreateDefaultModuleEnv(isolate_, module_, init_builtin);
// The {code_table} array contains import wrappers and functions (which
// are both included in {functions.size()}, and export wrappers).
@@ -675,8 +1550,7 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// The {module_wrapper} will take ownership of the {WasmModule} object,
// and it will be destroyed when the GC reclaims the wrapper object.
Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate_, module_.release());
- WasmModule* module = module_wrapper->get();
+ WasmModuleWrapper::From(isolate_, module.release());
// Create the shared module data.
// TODO(clemensh): For the same module (same bytes / same hash), we should
@@ -705,12 +1579,12 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// Compile JS->wasm wrappers for exported functions.
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
- for (auto exp : module->export_table) {
+ for (auto exp : module_->export_table) {
if (exp.kind != kExternalFunction) continue;
Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- isolate_, module, wasm_code, exp.index);
+ isolate_, module_, wasm_code, exp.index);
export_wrappers->set(wrapper_index, *wrapper_code);
RecordStats(*wrapper_code, counters());
++wrapper_index;
@@ -718,38 +1592,6 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
return WasmModuleObject::New(isolate_, compiled_module);
}
-Handle<Code> JSToWasmWrapperCache::CloneOrCompileJSToWasmWrapper(
- Isolate* isolate, wasm::WasmModule* module, Handle<Code> wasm_code,
- uint32_t index) {
- const wasm::WasmFunction* func = &module->functions[index];
- int cached_idx = sig_map_.Find(func->sig);
- if (cached_idx >= 0) {
- Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
- // Now patch the call to wasm code.
- for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
- DCHECK(!it.done());
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION ||
- target->kind() == Code::WASM_TO_JS_FUNCTION ||
- target->builtin_index() == Builtins::kIllegal ||
- target->builtin_index() == Builtins::kWasmCompileLazy) {
- it.rinfo()->set_target_address(isolate, wasm_code->instruction_start());
- break;
- }
- }
- return code;
- }
-
- Handle<Code> code =
- compiler::CompileJSToWasmWrapper(isolate, module, wasm_code, index);
- uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
- DCHECK_EQ(code_cache_.size(), new_cache_idx);
- USE(new_cache_idx);
- code_cache_.push_back(code);
- return code;
-}
-
InstanceBuilder::InstanceBuilder(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi,
@@ -957,7 +1799,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set externally passed ArrayBuffer non neuterable.
memory->set_is_neuterable(false);
- DCHECK_IMPLIES(EnableGuardRegions(),
+ DCHECK_IMPLIES(trap_handler::UseTrapHandler(),
module_->is_asm_js() || memory->has_guard_region());
} else if (initial_pages > 0) {
memory_ = AllocateMemory(initial_pages);
@@ -997,29 +1839,38 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Initialize memory.
//--------------------------------------------------------------------------
- uint32_t mem_size = 0;
Address mem_start = nullptr;
-
- // Stash old values of mem_start, and mem_size before
- // SetSpecializationMemInfoFrom, to patch memory references
- uint32_t old_mem_size = compiled_module_->GetEmbeddedMemSizeOrZero();
- Address old_mem_start = compiled_module_->GetEmbeddedMemStartOrNull();
+ uint32_t mem_size = 0;
if (!memory_.is_null()) {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
mem_start = static_cast<Address>(memory->backing_store());
CHECK(memory->byte_length()->ToUint32(&mem_size));
LoadDataSegments(mem_start, mem_size);
-
// Just like with globals, we need to keep both the JSArrayBuffer
// and save the start pointer.
instance->set_memory_buffer(*memory);
- WasmCompiledModule::SetSpecializationMemInfoFrom(factory, compiled_module_,
- memory);
}
- // We might get instantiated again with the same memory. No patching
- // needed in this case.
- code_specialization.RelocateMemoryReferences(old_mem_start, old_mem_size,
- mem_start, mem_size);
+
+ //--------------------------------------------------------------------------
+ // Create a memory object to have a WasmContext.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ if (!instance->has_memory_object()) {
+ Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
+ isolate_,
+ instance->has_memory_buffer() ? handle(instance->memory_buffer())
+ : Handle<JSArrayBuffer>::null(),
+ module_->maximum_pages != 0 ? module_->maximum_pages : -1);
+ instance->set_memory_object(*memory_object);
+ }
+
+ code_specialization.RelocateWasmContextReferences(
+ reinterpret_cast<Address>(instance->wasm_context()));
+ // Store the wasm_context address in the JSToWasmWrapperCache so that it can
+ // be used to compile JSToWasmWrappers.
+ js_to_wasm_cache_.SetContextAddress(
+ reinterpret_cast<Address>(instance->wasm_context()));
+ }
//--------------------------------------------------------------------------
// Set up the runtime support for the new instance.
@@ -1074,6 +1925,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
FlushICache(isolate_, code_table);
+ FlushICache(isolate_, wrapper_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
@@ -1167,7 +2019,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
DCHECK(isolate_->has_pending_exception());
// It's unfortunate that the new instance is already linked in the
// chain. However, we need to set up everything before executing the
- // startup function, such that stack trace information can be generated
+ // startup unction, such that stack trace information can be generated
// correctly already in the start function.
return {};
}
@@ -1346,6 +2198,27 @@ void InstanceBuilder::SanitizeImports() {
}
}
+Handle<FixedArray> InstanceBuilder::SetupWasmToJSImportsTable(
+ Handle<WasmInstanceObject> instance) {
+ // The js_imports_table is set up so that index 0 has isolate->native_context
+ // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's
+ // global proxy and 3*index+3 has function's context. Hence, the fixed array's
+ // size is 3*import_table.size+1.
+ int size = static_cast<int>(module_->import_table.size());
+ CHECK_LE(size, (kMaxInt - 1) / 3);
+ Handle<FixedArray> func_table =
+ isolate_->factory()->NewFixedArray(3 * size + 1, TENURED);
+ Handle<FixedArray> js_imports_table =
+ isolate_->global_handles()->Create(*func_table);
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(js_imports_table.location()),
+ js_imports_table.location(), &FunctionTableFinalizer,
+ v8::WeakCallbackType::kFinalizer);
+ instance->set_js_imports_table(*func_table);
+ js_imports_table->set(0, *isolate_->native_context());
+ return js_imports_table;
+}
+
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
@@ -1353,6 +2226,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<WasmInstanceObject> instance) {
int num_imported_functions = 0;
int num_imported_tables = 0;
+ Handle<FixedArray> js_imports_table = SetupWasmToJSImportsTable(instance);
WasmInstanceMap imported_wasm_instances(isolate_->heap());
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (int index = 0; index < static_cast<int>(module_->import_table.size());
@@ -1372,17 +2246,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
return -1;
}
- Handle<Code> import_wrapper = UnwrapOrCompileImportWrapper(
+ Handle<Code> import_code = UnwrapExportOrCompileImportWrapper(
isolate_, index, module_->functions[import.index].sig,
- Handle<JSReceiver>::cast(value), module_name, import_name,
- module_->origin(), &imported_wasm_instances);
- if (import_wrapper.is_null()) {
+ Handle<JSReceiver>::cast(value), module_->origin(),
+ &imported_wasm_instances, js_imports_table, instance);
+ if (import_code.is_null()) {
ReportLinkError("imported function does not match the expected type",
index, module_name, import_name);
return -1;
}
- code_table->set(num_imported_functions, *import_wrapper);
- RecordStats(*import_wrapper, counters());
+ code_table->set(num_imported_functions, *import_code);
+ RecordStats(*import_code, counters());
num_imported_functions++;
break;
}
@@ -1440,8 +2314,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
for (int i = 0; i < table_size; ++i) {
Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
if (!val->IsJSFunction()) continue;
- WasmFunction* function =
- GetWasmFunctionForImportWrapper(isolate_, val);
+ WasmFunction* function = GetWasmFunctionForExport(isolate_, val);
if (function == nullptr) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
index, i);
@@ -1449,7 +2322,8 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
}
int sig_index = table.map.FindOrInsert(function->sig);
table_instance.signature_table->set(i, Smi::FromInt(sig_index));
- table_instance.function_table->set(i, *UnwrapImportWrapper(val));
+ table_instance.function_table->set(
+ i, *UnwrapExportWrapper(Handle<JSFunction>::cast(val)));
}
num_imported_tables++;
@@ -1492,6 +2366,14 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
return -1;
}
}
+ if (module_->has_shared_memory != buffer->is_shared()) {
+ thrower_->LinkError(
+ "mismatch in shared state of memory, declared = %d, imported = "
+ "%d",
+ module_->has_shared_memory, buffer->is_shared());
+ return -1;
+ }
+
break;
}
case kExternalGlobal: {
@@ -1597,7 +2479,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions = EnableGuardRegions();
+ const bool enable_guard_regions = trap_handler::UseTrapHandler();
Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
isolate_, num_pages * WasmModule::kPageSize, enable_guard_regions);
@@ -1722,23 +2604,12 @@ void InstanceBuilder::ProcessExports(
break;
}
case kExternalMemory: {
- // Export the memory as a WebAssembly.Memory object.
- Handle<WasmMemoryObject> memory_object;
- if (!instance->has_memory_object()) {
- // If there was no imported WebAssembly.Memory object, create one.
- memory_object = WasmMemoryObject::New(
- isolate_,
- (instance->has_memory_buffer())
- ? handle(instance->memory_buffer())
- : Handle<JSArrayBuffer>::null(),
- (module_->maximum_pages != 0) ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- } else {
- memory_object =
- Handle<WasmMemoryObject>(instance->memory_object(), isolate_);
- }
-
- desc.set_value(memory_object);
+ // Export the memory as a WebAssembly.Memory object. A WasmMemoryObject
+ // should already be available if the module has memory, since we always
+ // create or import it when building an WasmInstanceObject.
+ DCHECK(instance->has_memory_object());
+ desc.set_value(
+ Handle<WasmMemoryObject>(instance->memory_object(), isolate_));
break;
}
case kExternalGlobal: {
@@ -2006,19 +2877,80 @@ void AsyncCompileJob::Start() {
DoAsync<DecodeModule>(); // --
}
+void AsyncCompileJob::Abort() {
+ background_task_manager_.CancelAndWait();
+ if (num_pending_foreground_tasks_ == 0) {
+ // No task is pending, we can just remove the AsyncCompileJob.
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
+ } else {
+ // There is still a compilation task in the task queue. We enter the
+ // AbortCompilation state and wait for this compilation task to abort the
+ // AsyncCompileJob.
+ NextStep<AbortCompilation>();
+ }
+}
+
+class AsyncStreamingProcessor final : public StreamingProcessor {
+ public:
+ explicit AsyncStreamingProcessor(AsyncCompileJob* job);
+
+ bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) override;
+
+ bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ uint32_t offset) override;
+
+ bool ProcessCodeSectionHeader(size_t functions_count,
+ uint32_t offset) override;
+
+ bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) override;
+
+ void OnFinishedChunk() override;
+
+ void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) override;
+
+ void OnError(DecodeResult result) override;
+
+ void OnAbort() override;
+
+ private:
+ // Finishes the AsyncCOmpileJob with an error.
+ void FinishAsyncCompileJobWithError(ResultBase result);
+
+ ModuleDecoder decoder_;
+ AsyncCompileJob* job_;
+ std::unique_ptr<ModuleCompiler::CompilationUnitBuilder>
+ compilation_unit_builder_;
+ uint32_t next_function_ = 0;
+};
+
+std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
+ DCHECK_NULL(stream_);
+ stream_.reset(
+ new StreamingDecoder(base::make_unique<AsyncStreamingProcessor>(this)));
+ return stream_;
+}
+
AsyncCompileJob::~AsyncCompileJob() {
background_task_manager_.CancelAndWait();
for (auto d : deferred_handles_) delete d;
}
void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
+ if (stream_) stream_->NotifyError();
+ // {job} keeps the {this} pointer alive.
+ std::shared_ptr<AsyncCompileJob> job =
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
RejectPromise(isolate_, context_, thrower, module_promise_);
- isolate_->wasm_compilation_manager()->RemoveJob(this);
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
+ // {job} keeps the {this} pointer alive.
+ std::shared_ptr<AsyncCompileJob> job =
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
ResolvePromise(isolate_, context_, module_promise_, result);
- isolate_->wasm_compilation_manager()->RemoveJob(this);
}
// A closure to run a compilation step (either as foreground or background
@@ -2032,7 +2964,9 @@ class AsyncCompileJob::CompileStep {
void Run(bool on_foreground) {
if (on_foreground) {
- DCHECK_EQ(1, job_->num_pending_foreground_tasks_--);
+ HandleScope scope(job_->isolate_);
+ --job_->num_pending_foreground_tasks_;
+ DCHECK_EQ(0, job_->num_pending_foreground_tasks_);
SaveContext saved_context(job_->isolate_);
job_->isolate_->set_context(*job_->context_);
RunInForeground();
@@ -2070,16 +3004,19 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
};
void AsyncCompileJob::StartForegroundTask() {
- DCHECK_EQ(0, num_pending_foreground_tasks_++);
-
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate_), new CompileTask(this, true));
+ ++num_pending_foreground_tasks_;
+ DCHECK_EQ(1, num_pending_foreground_tasks_);
+
+ v8::Platform* platform = V8::GetCurrentPlatform();
+ // TODO(ahaas): This is a CHECK to debug issue 764313.
+ CHECK(platform);
+ platform->CallOnForegroundThread(reinterpret_cast<v8::Isolate*>(isolate_),
+ new CompileTask(this, true));
}
-template <typename State, typename... Args>
+template <typename Step, typename... Args>
void AsyncCompileJob::DoSync(Args&&... args) {
- step_.reset(new State(std::forward<Args>(args)...));
- step_->job_ = this;
+ NextStep<Step>(std::forward<Args>(args)...);
StartForegroundTask();
}
@@ -2088,16 +3025,30 @@ void AsyncCompileJob::StartBackgroundTask() {
new CompileTask(this, false), v8::Platform::kShortRunningTask);
}
-template <typename State, typename... Args>
+void AsyncCompileJob::RestartBackgroundTasks() {
+ size_t num_restarts = stopped_tasks_.Value();
+ stopped_tasks_.Decrement(num_restarts);
+
+ for (size_t i = 0; i < num_restarts; ++i) {
+ StartBackgroundTask();
+ }
+}
+
+template <typename Step, typename... Args>
void AsyncCompileJob::DoAsync(Args&&... args) {
- step_.reset(new State(std::forward<Args>(args)...));
- step_->job_ = this;
+ NextStep<Step>(std::forward<Args>(args)...);
size_t end = step_->NumberOfBackgroundTasks();
for (size_t i = 0; i < end; ++i) {
StartBackgroundTask();
}
}
+template <typename Step, typename... Args>
+void AsyncCompileJob::NextStep(Args&&... args) {
+ step_.reset(new Step(std::forward<Args>(args)...));
+ step_->job_ = this;
+}
+
//==========================================================================
// Step 1: (async) Decode the module.
//==========================================================================
@@ -2121,7 +3072,8 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
job_->DoSync<DecodeFail>(std::move(result));
} else {
// Decode passed.
- job_->DoSync<PrepareAndStartCompile>(std::move(result.val));
+ job_->module_ = std::move(result.val);
+ job_->DoSync<PrepareAndStartCompile>(job_->module_.get(), true);
}
}
};
@@ -2137,7 +3089,6 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
ModuleResult result_;
void RunInForeground() override {
TRACE_COMPILE("(1b) Decoding failed.\n");
- HandleScope scope(job_->isolate_);
ErrorThrower thrower(job_->isolate_, "AsyncCompile");
thrower.CompileFailed("Wasm decoding failed", result_);
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
@@ -2150,20 +3101,21 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
//==========================================================================
class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
public:
- explicit PrepareAndStartCompile(std::unique_ptr<WasmModule> module)
- : module_(std::move(module)) {}
+ explicit PrepareAndStartCompile(WasmModule* module, bool start_compilation)
+ : module_(module), start_compilation_(start_compilation) {}
private:
- std::unique_ptr<WasmModule> module_;
+ WasmModule* module_;
+ bool start_compilation_;
+
void RunInForeground() override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
Isolate* isolate = job_->isolate_;
- HandleScope scope(isolate);
Factory* factory = isolate->factory();
Handle<Code> illegal_builtin = BUILTIN_CODE(isolate, Illegal);
job_->module_env_ =
- CreateDefaultModuleEnv(isolate, module_.get(), illegal_builtin);
+ CreateDefaultModuleEnv(isolate, module_, illegal_builtin);
// The {code_table} array contains import wrappers and functions (which
// are both included in {functions.size()}.
@@ -2181,9 +3133,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
// Transfer ownership of the {WasmModule} to the {ModuleCompiler}, but
// keep a pointer.
- WasmModule* module = module_.get();
Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
-
{
// Now reopen the handles in a deferred scope in order to use
// them in the concurrent steps.
@@ -2202,13 +3152,12 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
job_->deferred_handles_.push_back(deferred.Detach());
}
- job_->compiler_.reset(
- new ModuleCompiler(isolate, std::move(module_), centry_stub));
+ job_->compiler_.reset(new ModuleCompiler(isolate, module_, centry_stub));
job_->compiler_->EnableThrottling();
- DCHECK_LE(module->num_imported_functions, module->functions.size());
+ DCHECK_LE(module_->num_imported_functions, module_->functions.size());
size_t num_functions =
- module->functions.size() - module->num_imported_functions;
+ module_->functions.size() - module_->num_imported_functions;
if (num_functions == 0) {
// Degenerate case of an empty module.
job_->DoSync<FinishCompile>();
@@ -2222,10 +3171,20 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()
->NumberOfAvailableBackgroundThreads())));
- job_->outstanding_units_ = job_->compiler_->InitializeCompilationUnits(
- module->functions, job_->wire_bytes_, job_->module_env_.get());
- job_->DoAsync<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ if (start_compilation_) {
+ // TODO(ahaas): Try to remove the {start_compilation_} check when
+ // streaming decoding is done in the background. If
+ // InitializeCompilationUnits always returns 0 for streaming compilation,
+ // then DoAsync would do the same as NextStep already.
+ job_->outstanding_units_ = job_->compiler_->InitializeCompilationUnits(
+ module_->functions, job_->wire_bytes_, job_->module_env_.get());
+
+ job_->DoAsync<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ } else {
+ job_->stopped_tasks_ = num_background_tasks;
+ job_->NextStep<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ }
}
};
@@ -2253,16 +3212,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
break;
}
}
- stopped_tasks_.Increment(1);
- }
-
- void RestartCompilationTasks() {
- size_t num_restarts = stopped_tasks_.Value();
- stopped_tasks_.Decrement(num_restarts);
-
- for (size_t i = 0; i < num_restarts; ++i) {
- job_->StartBackgroundTask();
- }
+ job_->stopped_tasks_.Increment(1);
}
void RunInForeground() override {
@@ -2272,7 +3222,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
job_->compiler_->SetFinisherIsRunning(false);
return;
}
- HandleScope scope(job_->isolate_);
ErrorThrower thrower(job_->isolate_, "AsyncCompile");
// We execute for 1 ms and then reschedule the task, same as the GC.
@@ -2280,7 +3229,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
while (true) {
if (!finished_ && job_->compiler_->ShouldIncreaseWorkload()) {
- RestartCompilationTasks();
+ job_->RestartBackgroundTasks();
}
int func_index = -1;
@@ -2299,7 +3248,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
// FinishCompilationUnits task again.
break;
} else {
- DCHECK(func_index >= 0);
+ DCHECK_LE(0, func_index);
job_->code_table_->set(func_index, *result.ToHandleChecked());
--job_->outstanding_units_;
}
@@ -2323,14 +3272,13 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
if (job_->outstanding_units_ == 0) {
// Make sure all compilation tasks stopped running.
job_->background_task_manager_.CancelAndWait();
- job_->DoSync<FinishCompile>();
+ if (job_->DecrementAndCheckFinisherCount()) job_->DoSync<FinishCompile>();
}
}
private:
std::atomic<bool> failed_{false};
std::atomic<bool> finished_{false};
- base::AtomicNumber<size_t> stopped_tasks_{0};
};
//==========================================================================
@@ -2339,7 +3287,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
class AsyncCompileJob::FinishCompile : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(5b) Finish compile...\n");
- HandleScope scope(job_->isolate_);
// At this point, compilation has completed. Update the code table.
for (int i = FLAG_skip_compiling_wasm_funcs,
e = job_->code_table_->length();
@@ -2367,8 +3314,8 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
// The {module_wrapper} will take ownership of the {WasmModule} object,
// and it will be destroyed when the GC reclaims the wrapper object.
- Handle<WasmModuleWrapper> module_wrapper = WasmModuleWrapper::New(
- job_->isolate_, job_->compiler_->ReleaseModule().release());
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::From(job_->isolate_, job_->module_.release());
// Create the shared module data.
// TODO(clemensh): For the same module (same bytes / same hash), we should
@@ -2403,10 +3350,11 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
// Step 6 (sync): Compile JS->wasm wrappers.
//==========================================================================
class AsyncCompileJob::CompileWrappers : public CompileStep {
+ // TODO(wasm): Compile all wrappers here, including the start function wrapper
+ // and the wrappers for the function table elements.
void RunInForeground() override {
TRACE_COMPILE("(6) Compile wrappers...\n");
// Compile JS->wasm wrappers for exported functions.
- HandleScope scope(job_->isolate_);
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
WasmModule* module = job_->compiled_module_->module();
@@ -2432,7 +3380,6 @@ class AsyncCompileJob::CompileWrappers : public CompileStep {
class AsyncCompileJob::FinishModule : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(7) Finish module...\n");
- HandleScope scope(job_->isolate_);
Handle<WasmModuleObject> result =
WasmModuleObject::New(job_->isolate_, job_->compiled_module_);
// {job_} is deleted in AsyncCompileSucceeded, therefore the {return}.
@@ -2440,8 +3387,166 @@ class AsyncCompileJob::FinishModule : public CompileStep {
}
};
-#undef TRACE
+class AsyncCompileJob::AbortCompilation : public CompileStep {
+ void RunInForeground() override {
+ TRACE_COMPILE("Abort asynchronous compilation ...\n");
+ job_->isolate_->wasm_compilation_manager()->RemoveJob(job_);
+ }
+};
+
+AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
+ : job_(job), compilation_unit_builder_(nullptr) {}
+
+void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
+ // Make sure all background tasks stopped executing before we change the state
+ // of the AsyncCompileJob to DecodeFail.
+ job_->background_task_manager_.CancelAndWait();
+
+ // Create a ModuleResult from the result we got as parameter. Since there was
+ // no error, we don't have to provide a real wasm module to the ModuleResult.
+ ModuleResult result(nullptr);
+ result.MoveErrorFrom(error);
+
+ // Check if there is already a ModuleCompiler, in which case we have to clean
+ // it up as well.
+ if (job_->compiler_) {
+ // If {IsFinisherRunning} is true, then there is already a foreground task
+ // in the task queue to execute the DecodeFail step. We do not have to start
+ // a new task ourselves with DoSync.
+ if (job_->compiler_->IsFinisherRunning()) {
+ job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result));
+ } else {
+ job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ }
+
+ compilation_unit_builder_->Clear();
+ } else {
+ job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ }
+}
+
+// Process the module header.
+bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ TRACE_STREAMING("Process module header...\n");
+ decoder_.StartDecoding(job_->isolate());
+ decoder_.DecodeModuleHeader(bytes, offset);
+ if (!decoder_.ok()) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ return false;
+ }
+ return true;
+}
+
+// Process all sections except for the code section.
+bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
+ Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ TRACE_STREAMING("Process section %d ...\n", section_code);
+ if (section_code == SectionCode::kUnknownSectionCode) {
+ // No need to decode unknown sections, even the names section. If decoding
+ // of the unknown section fails, compilation should succeed anyways, and
+ // even decoding the names section is unnecessary because the result comes
+ // too late for streaming compilation.
+ return true;
+ }
+ constexpr bool verify_functions = false;
+ decoder_.DecodeSection(section_code, bytes, offset, verify_functions);
+ if (!decoder_.ok()) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ return false;
+ }
+ return true;
+}
+
+// Start the code section.
+bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
+ uint32_t offset) {
+ TRACE_STREAMING("Start the code section with %zu functions...\n",
+ functions_count);
+ if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(functions_count),
+ offset)) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ return false;
+ }
+ job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(decoder_.module(),
+ false);
+ // Execute the PrepareAndStartCompile step immediately and not in a separate
+ // task. The step expects to be run on a separate foreground thread though, so
+ // we to increment {num_pending_foreground_tasks_} to look like one.
+ ++job_->num_pending_foreground_tasks_;
+ DCHECK_EQ(1, job_->num_pending_foreground_tasks_);
+ constexpr bool on_foreground = true;
+ job_->step_->Run(on_foreground);
+
+ job_->outstanding_units_ = functions_count;
+ // Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
+ // AsyncStreamingProcessor have to finish.
+ job_->outstanding_finishers_.SetValue(2);
+ next_function_ = decoder_.module()->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs;
+ compilation_unit_builder_.reset(
+ new ModuleCompiler::CompilationUnitBuilder(job_->compiler_.get()));
+ return true;
+}
+
+// Process a function body.
+bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ TRACE_STREAMING("Process function body %d ...\n", next_function_);
+
+ decoder_.DecodeFunctionBody(
+ next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
+ if (next_function_ >= decoder_.module()->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs) {
+ const WasmFunction* func = &decoder_.module()->functions[next_function_];
+ WasmName name = {nullptr, 0};
+ compilation_unit_builder_->AddUnit(job_->module_env_.get(), func, offset,
+ bytes, name);
+ }
+ ++next_function_;
+ return true;
+}
+
+void AsyncStreamingProcessor::OnFinishedChunk() {
+ // TRACE_STREAMING("FinishChunk...\n");
+ if (compilation_unit_builder_) {
+ compilation_unit_builder_->Commit();
+ job_->RestartBackgroundTasks();
+ }
+}
+
+// Finish the processing of the stream.
+void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) {
+ TRACE_STREAMING("Finish stream...\n");
+ job_->bytes_copy_ = std::move(bytes);
+ job_->wire_bytes_ = ModuleWireBytes(job_->bytes_copy_.get(),
+ job_->bytes_copy_.get() + length);
+ ModuleResult result = decoder_.FinishDecoding(false);
+ DCHECK(result.ok());
+ job_->module_ = std::move(result.val);
+ if (job_->DecrementAndCheckFinisherCount())
+ job_->DoSync<AsyncCompileJob::FinishCompile>();
+}
+
+// Report an error detected in the StreamingDecoder.
+void AsyncStreamingProcessor::OnError(DecodeResult result) {
+ TRACE_STREAMING("Stream error...\n");
+ FinishAsyncCompileJobWithError(std::move(result));
+}
+
+void AsyncStreamingProcessor::OnAbort() {
+ TRACE_STREAMING("Abort stream...\n");
+ job_->Abort();
+}
} // namespace wasm
} // namespace internal
} // namespace v8
+
+#undef TRACE
+#undef TRACE_COMPILE
+#undef TRACE_STREAMING
+#undef TRACE_CHAIN
+#undef ERROR_THROWER_WITH_MESSAGE
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 084b6833fd..42ea037d03 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -8,311 +8,72 @@
#include <functional>
#include "src/base/atomic-utils.h"
-#include "src/base/utils/random-number-generator.h"
#include "src/cancelable-task.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/isolate.h"
-#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
namespace wasm {
-// A class compiling an entire module.
-class ModuleCompiler {
- public:
- // The ModuleCompiler takes ownership of the {WasmModule}.
- // In {CompileToModuleObject}, it will transfer ownership to the generated
- // {WasmModuleWrapper}. If this method is not called, ownership may be
- // reclaimed by explicitely releasing the {module_} field.
- ModuleCompiler(Isolate* isolate, std::unique_ptr<WasmModule> module,
- Handle<Code> centry_stub);
-
- // The actual runnable task that performs compilations in the background.
- class CompilationTask : public CancelableTask {
- public:
- ModuleCompiler* compiler_;
- explicit CompilationTask(ModuleCompiler* helper);
-
- void RunInternal() override;
- };
-
- // The CompilationUnitBuilder builds compilation units and stores them in an
- // internal buffer. The buffer is moved into the working queue of the
- // ModuleCompiler when {Commit} is called.
- class CompilationUnitBuilder {
- public:
- explicit CompilationUnitBuilder(ModuleCompiler* compiler)
- : compiler_(compiler) {}
-
- ~CompilationUnitBuilder() { DCHECK(units_.empty()); }
-
- void AddUnit(compiler::ModuleEnv* module_env, const WasmFunction* function,
- uint32_t buffer_offset, Vector<const uint8_t> bytes,
- WasmName name) {
- units_.emplace_back(new compiler::WasmCompilationUnit(
- compiler_->isolate_, module_env,
- wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
- bytes.end()},
- name, function->func_index, compiler_->centry_stub_,
- compiler_->async_counters()));
- }
-
- void Commit() {
- {
- base::LockGuard<base::Mutex> guard(
- &compiler_->compilation_units_mutex_);
- compiler_->compilation_units_.insert(
- compiler_->compilation_units_.end(),
- std::make_move_iterator(units_.begin()),
- std::make_move_iterator(units_.end()));
- }
- units_.clear();
- }
-
- private:
- ModuleCompiler* compiler_;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
- };
-
- class CodeGenerationSchedule {
- public:
- explicit CodeGenerationSchedule(
- base::RandomNumberGenerator* random_number_generator,
- size_t max_memory = 0);
-
- void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
-
- bool IsEmpty() const { return schedule_.empty(); }
-
- std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
-
- bool CanAcceptWork() const;
-
- bool ShouldIncreaseWorkload() const;
-
- void EnableThrottling() { throttle_ = true; }
-
- private:
- size_t GetRandomIndexInSchedule();
-
- base::RandomNumberGenerator* random_number_generator_ = nullptr;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
- const size_t max_memory_;
- bool throttle_ = false;
- base::AtomicNumber<size_t> allocated_memory_{0};
- };
-
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
- }
- Counters* counters() const { return async_counters().get(); }
-
- // Run by each compilation task and by the main thread (i.e. in both
- // foreground and background threads). The no_finisher_callback is called
- // within the result_mutex_ lock when no finishing task is running, i.e. when
- // the finisher_is_running_ flag is not set.
- bool FetchAndExecuteCompilationUnit(
- std::function<void()> no_finisher_callback = nullptr);
-
- void OnBackgroundTaskStopped();
-
- void EnableThrottling() { executed_units_.EnableThrottling(); }
-
- bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
-
- bool ShouldIncreaseWorkload() const {
- return executed_units_.ShouldIncreaseWorkload();
- }
-
- size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
- const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env);
-
- void RestartCompilationTasks();
+class ModuleCompiler;
+
+V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
+ const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
+
+V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
+ Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports);
+
+// Triggered by the WasmCompileLazy builtin.
+// Walks the stack (top three frames) to determine the wasm instance involved
+// and which function to compile.
+// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
+// patching the call site or indirect function tables.
+// Returns either the Code object that has been lazily compiled, or Illegal if
+// an error occurred. In the latter case, a pending exception has been set,
+// which will be triggered when returning from the runtime function, i.e. the
+// Illegal builtin will never be called.
+Handle<Code> CompileLazy(Isolate* isolate);
+
+// This class orchestrates the lazy compilation of wasm functions. It is
+// triggered by the WasmCompileLazy builtin.
+// It contains the logic for compiling and specializing wasm functions, and
+// patching the calling wasm code.
+// Once we support concurrent lazy compilation, this class will contain the
+// logic to actually orchestrate parallel execution of wasm compilation jobs.
+// TODO(clemensh): Implement concurrent lazy compilation.
+class LazyCompilationOrchestrator {
+ void CompileFunction(Isolate*, Handle<WasmInstanceObject>, int func_index);
- size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
-
- void SetFinisherIsRunning(bool value);
-
- MaybeHandle<Code> FinishCompilationUnit(ErrorThrower* thrower,
- int* func_index);
-
- void CompileInParallel(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
-
- void CompileSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
-
- void ValidateSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- ErrorThrower* thrower);
-
- MaybeHandle<WasmModuleObject> CompileToModuleObject(
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- std::unique_ptr<WasmModule> ReleaseModule() { return std::move(module_); }
-
- private:
- MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
- Isolate* isolate, ErrorThrower* thrower,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- Isolate* isolate_;
- std::unique_ptr<WasmModule> module_;
- const std::shared_ptr<Counters> async_counters_;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
- compilation_units_;
- base::Mutex compilation_units_mutex_;
- CodeGenerationSchedule executed_units_;
- base::Mutex result_mutex_;
- const size_t num_background_tasks_;
- // This flag should only be set while holding result_mutex_.
- bool finisher_is_running_ = false;
- CancelableTaskManager background_task_manager_;
- size_t stopped_compilation_tasks_ = 0;
- base::Mutex tasks_mutex_;
- Handle<Code> centry_stub_;
-};
-
-class JSToWasmWrapperCache {
public:
- Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
- wasm::WasmModule* module,
- Handle<Code> wasm_code,
- uint32_t index);
-
- private:
- // sig_map_ maps signatures to an index in code_cache_.
- wasm::SignatureMap sig_map_;
- std::vector<Handle<Code>> code_cache_;
-};
-
-// A helper class to simplify instantiating a module from a compiled module.
-// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
-// etc.
-class InstanceBuilder {
- public:
- InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback);
-
- // Build an instance, in all of its glory.
- MaybeHandle<WasmInstanceObject> Build();
-
- private:
- // Represents the initialized state of a table.
- struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> function_table; // internal code array
- Handle<FixedArray> signature_table; // internal sig array
- };
-
- // A pre-evaluated value to use in import binding.
- struct SanitizedImport {
- Handle<String> module_name;
- Handle<String> import_name;
- Handle<Object> value;
- };
-
- Isolate* isolate_;
- WasmModule* const module_;
- const std::shared_ptr<Counters> async_counters_;
- ErrorThrower* thrower_;
- Handle<WasmModuleObject> module_object_;
- MaybeHandle<JSReceiver> ffi_;
- MaybeHandle<JSArrayBuffer> memory_;
- Handle<JSArrayBuffer> globals_;
- Handle<WasmCompiledModule> compiled_module_;
- std::vector<TableInstance> table_instances_;
- std::vector<Handle<JSFunction>> js_wrappers_;
- JSToWasmWrapperCache js_to_wasm_cache_;
- WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
- std::vector<SanitizedImport> sanitized_imports_;
-
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
- }
- Counters* counters() const { return async_counters().get(); }
-
-// Helper routines to print out errors with imports.
-#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
- void Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name, Handle<String> import_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
- index, module_name->ToCString().get(), \
- import_name->ToCString().get(), error); \
- } \
- \
- MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
- module_name->ToCString().get(), error); \
- return MaybeHandle<Object>(); \
- }
-
- ERROR_THROWER_WITH_MESSAGE(LinkError)
- ERROR_THROWER_WITH_MESSAGE(TypeError)
-
- // Look up an import value in the {ffi_} object.
- MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
- Handle<String> import_name);
-
- // Look up an import value in the {ffi_} object specifically for linking an
- // asm.js module. This only performs non-observable lookups, which allows
- // falling back to JavaScript proper (and hence re-executing all lookups) if
- // module instantiation fails.
- MaybeHandle<Object> LookupImportAsm(uint32_t index,
- Handle<String> import_name);
-
- uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
-
- // Load data segments into the memory.
- void LoadDataSegments(Address mem_addr, size_t mem_size);
-
- void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
-
- void SanitizeImports();
- // Process the imports, including functions, tables, globals, and memory, in
- // order, loading them from the {ffi_} object. Returns the number of imported
- // functions.
- int ProcessImports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance);
-
- template <typename T>
- T* GetRawGlobalPtr(WasmGlobal& global);
-
- // Process initialization of globals.
- void InitGlobals();
-
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
-
- bool NeedsWrappers() const;
-
- // Process the exports, creating wrappers for functions, tables, memories,
- // and globals.
- void ProcessExports(Handle<WasmInstanceObject> instance,
- Handle<WasmCompiledModule> compiled_module);
-
- void InitializeTables(Handle<WasmInstanceObject> instance,
- CodeSpecialization* code_specialization);
-
- void LoadTableSegments(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance);
+ Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int call_offset,
+ int exported_func_index, bool patch_caller);
};
// Encapsulates all the state and steps of an asynchronous compilation.
@@ -330,6 +91,10 @@ class AsyncCompileJob {
void Start();
+ std::shared_ptr<StreamingDecoder> CreateStreamingDecoder();
+
+ void Abort();
+
~AsyncCompileJob();
private:
@@ -346,6 +111,41 @@ class AsyncCompileJob {
class FinishCompile;
class CompileWrappers;
class FinishModule;
+ class AbortCompilation;
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+ void AsyncCompileFailed(ErrorThrower& thrower);
+
+ void AsyncCompileSucceeded(Handle<Object> result);
+
+ void StartForegroundTask();
+
+ void StartBackgroundTask();
+
+ void RestartBackgroundTasks();
+
+ // Switches to the compilation step {Step} and starts a foreground task to
+ // execute it.
+ template <typename Step, typename... Args>
+ void DoSync(Args&&... args);
+
+ // Switches to the compilation step {Step} and starts a background task to
+ // execute it.
+ template <typename Step, typename... Args>
+ void DoAsync(Args&&... args);
+
+ // Switches to the compilation step {Step} but does not start a task to
+ // execute it.
+ template <typename Step, typename... Args>
+ void NextStep(Args&&... args);
+
+ Isolate* isolate() { return isolate_; }
+
+ friend class AsyncStreamingProcessor;
Isolate* isolate_;
const std::shared_ptr<Counters> async_counters_;
@@ -355,6 +155,7 @@ class AsyncCompileJob {
Handle<JSPromise> module_promise_;
std::unique_ptr<ModuleCompiler> compiler_;
std::unique_ptr<compiler::ModuleEnv> module_env_;
+ std::unique_ptr<WasmModule> module_;
std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
@@ -364,29 +165,29 @@ class AsyncCompileJob {
size_t outstanding_units_ = 0;
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
-#if DEBUG
- // Counts the number of pending foreground tasks.
- int32_t num_pending_foreground_tasks_ = 0;
-#endif
-
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
+ // The number of background tasks which stopped executing within a step.
+ base::AtomicNumber<size_t> stopped_tasks_{0};
+
+ // For async compilation the AsyncCompileJob is the only finisher. For
+ // streaming compilation also the AsyncStreamingProcessor has to finish before
+ // compilation can be finished.
+ base::AtomicNumber<int32_t> outstanding_finishers_{1};
+
+ // Decrements the number of outstanding finishers. The last caller of this
+ // function should finish the asynchronous compilation, see the comment on
+ // {outstanding_finishers_}.
+ V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount() {
+ return outstanding_finishers_.Decrement(1) == 0;
}
- Counters* counters() const { return async_counters().get(); }
-
- void AsyncCompileFailed(ErrorThrower& thrower);
-
- void AsyncCompileSucceeded(Handle<Object> result);
-
- template <typename Task, typename... Args>
- void DoSync(Args&&... args);
- void StartForegroundTask();
-
- void StartBackgroundTask();
+ // Counts the number of pending foreground tasks.
+ int32_t num_pending_foreground_tasks_ = 0;
- template <typename Task, typename... Args>
- void DoAsync(Args&&... args);
+ // The AsyncCompileJob owns the StreamingDecoder because the StreamingDecoder
+ // contains data which is needed by the AsyncCompileJob for streaming
+ // compilation. The AsyncCompileJob does not actively use the
+ // StreamingDecoder.
+ std::shared_ptr<StreamingDecoder> stream_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 5280af7374..d7a0156a7b 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -237,11 +237,17 @@ class WasmSectionIterator {
}
};
+} // namespace
+
// The main logic for decoding the bytes of a module.
-class ModuleDecoder : public Decoder {
+class ModuleDecoderImpl : public Decoder {
public:
- ModuleDecoder(const byte* module_start, const byte* module_end,
- ModuleOrigin origin)
+ explicit ModuleDecoderImpl(ModuleOrigin origin)
+ : Decoder(nullptr, nullptr),
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {}
+
+ ModuleDecoderImpl(const byte* module_start, const byte* module_end,
+ ModuleOrigin origin)
: Decoder(module_start, module_end),
origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
if (end_ < start_) {
@@ -265,13 +271,9 @@ class ModuleDecoder : public Decoder {
}
// File are named `HASH.{ok,failed}.wasm`.
size_t hash = base::hash_range(start_, end_);
- char buf[32] = {'\0'};
-#if V8_OS_WIN && _MSC_VER < 1900
-#define snprintf sprintf_s
-#endif
- snprintf(buf, sizeof(buf) - 1, "%016zx.%s.wasm", hash,
- result.ok() ? "ok" : "failed");
- std::string name(buf);
+ EmbeddedVector<char, 32> buf;
+ SNPrintF(buf, "%016zx.%s.wasm", hash, result.ok() ? "ok" : "failed");
+ std::string name(buf.start());
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
if (fwrite(start_, end_ - start_, 1, wasm_file) != 1) {
OFStream os(stderr);
@@ -316,12 +318,16 @@ class ModuleDecoder : public Decoder {
BYTES(kWasmVersion), BYTES(magic_version));
}
}
+#undef BYTES
}
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
if (failed()) return;
Reset(bytes, offset);
+ TRACE("Section: %s\n", SectionName(section_code));
+ TRACE("Decode Section %p - %p\n", static_cast<const void*>(bytes.begin()),
+ static_cast<const void*>(bytes.end()));
// Check if the section is out-of-order.
if (section_code < next_section_) {
@@ -477,7 +483,8 @@ class ModuleDecoder : public Decoder {
consume_resizable_limits(
"memory", "pages", FLAG_wasm_max_mem_pages,
&module_->initial_pages, &module_->has_maximum_pages,
- kSpecMaxWasmMemoryPages, &module_->maximum_pages);
+ kSpecMaxWasmMemoryPages, &module_->maximum_pages,
+ &module_->has_shared_memory);
break;
}
case kExternalGlobal: {
@@ -545,7 +552,7 @@ class ModuleDecoder : public Decoder {
consume_resizable_limits(
"memory", "pages", FLAG_wasm_max_mem_pages, &module_->initial_pages,
&module_->has_maximum_pages, kSpecMaxWasmMemoryPages,
- &module_->maximum_pages);
+ &module_->maximum_pages, &module_->has_shared_memory);
}
}
@@ -561,6 +568,7 @@ class ModuleDecoder : public Decoder {
WasmGlobal* global = &module_->globals.back();
DecodeGlobalInModule(module_.get(), i + imported_globals, global);
}
+ if (ok()) CalculateGlobalOffsets(module_.get());
}
void DecodeExportSection() {
@@ -701,26 +709,42 @@ class ModuleDecoder : public Decoder {
}
void DecodeCodeSection(bool verify_functions) {
- const byte* pos = pc_;
+ uint32_t pos = pc_offset();
uint32_t functions_count = consume_u32v("functions count");
- if (functions_count != module_->num_declared_functions) {
- errorf(pos, "function body count %u mismatch (%u expected)",
- functions_count, module_->num_declared_functions);
- }
- for (uint32_t i = 0; i < functions_count; ++i) {
+ CheckFunctionsCount(functions_count, pos);
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
uint32_t size = consume_u32v("body size");
uint32_t offset = pc_offset();
consume_bytes(size, "function body");
if (failed()) break;
- WasmFunction* function =
- &module_->functions[i + module_->num_imported_functions];
- function->code = {offset, size};
- if (verify_functions) {
- ModuleWireBytes bytes(start_, end_);
- VerifyFunctionBody(module_->signature_zone->allocator(),
- i + module_->num_imported_functions, bytes,
- module_.get(), function);
- }
+ DecodeFunctionBody(i, size, offset, verify_functions);
+ }
+ }
+
+ bool CheckFunctionsCount(uint32_t functions_count, uint32_t offset) {
+ if (functions_count != module_->num_declared_functions) {
+ Reset(nullptr, nullptr, offset);
+ errorf(nullptr, "function body count %u mismatch (%u expected)",
+ functions_count, module_->num_declared_functions);
+ return false;
+ }
+ return true;
+ }
+
+ void DecodeFunctionBody(uint32_t index, uint32_t length, uint32_t offset,
+ bool verify_functions) {
+ auto size_histogram = module_->is_wasm()
+ ? GetCounters()->wasm_wasm_function_size_bytes()
+ : GetCounters()->wasm_asm_function_size_bytes();
+ size_histogram->AddSample(length);
+ WasmFunction* function =
+ &module_->functions[index + module_->num_imported_functions];
+ function->code = {offset, length};
+ if (verify_functions) {
+ ModuleWireBytes bytes(start_, end_);
+ VerifyFunctionBody(module_->signature_zone->allocator(),
+ index + module_->num_imported_functions, bytes,
+ module_.get(), function);
}
}
@@ -881,6 +905,8 @@ class ModuleDecoder : public Decoder {
return consume_init_expr(nullptr, kWasmStmt);
}
+ WasmModule* module() { return module_.get(); }
+
bool IsWasm() { return origin_ == kWasmOrigin; }
Counters* GetCounters() {
@@ -902,7 +928,7 @@ class ModuleDecoder : public Decoder {
// We store next_section_ as uint8_t instead of SectionCode so that we can
// increment it. This static_assert should make sure that SectionCode does not
// get bigger than uint8_t accidentially.
- static_assert(sizeof(ModuleDecoder::next_section_) == sizeof(SectionCode),
+ static_assert(sizeof(ModuleDecoderImpl::next_section_) == sizeof(SectionCode),
"type mismatch");
Result<bool> intermediate_result_;
ModuleOrigin origin_;
@@ -1085,9 +1111,30 @@ class ModuleDecoder : public Decoder {
void consume_resizable_limits(const char* name, const char* units,
uint32_t max_initial, uint32_t* initial,
bool* has_max, uint32_t max_maximum,
- uint32_t* maximum) {
- uint32_t flags = consume_u32v("resizable limits flags");
+ uint32_t* maximum,
+ bool* has_shared_memory = nullptr) {
+ uint8_t flags = consume_u8("resizable limits flags");
const byte* pos = pc();
+
+ if (FLAG_experimental_wasm_threads) {
+ bool is_memory = (strcmp(name, "memory") == 0);
+ if (flags & 0xfc || (!is_memory && (flags & 0xfe))) {
+ errorf(pos - 1, "invalid %s limits flags", name);
+ }
+ if (flags == 3) {
+ DCHECK_NOT_NULL(has_shared_memory);
+ *has_shared_memory = true;
+ } else if (flags == 2) {
+ errorf(pos - 1,
+ "%s limits flags should have maximum defined if shared is true",
+ name);
+ }
+ } else {
+ if (flags & 0xfe) {
+ errorf(pos - 1, "invalid %s limits flags", name);
+ }
+ }
+
*initial = consume_u32v("initial size");
*has_max = false;
if (*initial > max_initial) {
@@ -1301,7 +1348,7 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
size_counter->AddSample(static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
- ModuleDecoder decoder(module_start, module_end, origin);
+ ModuleDecoderImpl decoder(module_start, module_end, origin);
ModuleResult result = decoder.DecodeModule(isolate, verify_functions);
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(titzer): this isn't accurate, since it doesn't count the data
@@ -1318,7 +1365,43 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
return result;
}
-} // namespace
+ModuleDecoder::ModuleDecoder() = default;
+ModuleDecoder::~ModuleDecoder() = default;
+
+WasmModule* ModuleDecoder::module() const { return impl_->module(); }
+
+void ModuleDecoder::StartDecoding(Isolate* isolate, ModuleOrigin origin) {
+ DCHECK_NULL(impl_);
+ impl_.reset(new ModuleDecoderImpl(origin));
+ impl_->StartDecoding(isolate);
+}
+
+void ModuleDecoder::DecodeModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) {
+ impl_->DecodeModuleHeader(bytes, offset);
+}
+
+void ModuleDecoder::DecodeSection(SectionCode section_code,
+ Vector<const uint8_t> bytes, uint32_t offset,
+ bool verify_functions) {
+ impl_->DecodeSection(section_code, bytes, offset, verify_functions);
+}
+
+void ModuleDecoder::DecodeFunctionBody(uint32_t index, uint32_t length,
+ uint32_t offset, bool verify_functions) {
+ impl_->DecodeFunctionBody(index, length, offset, verify_functions);
+}
+
+bool ModuleDecoder::CheckFunctionsCount(uint32_t functions_count,
+ uint32_t offset) {
+ return impl_->CheckFunctionsCount(functions_count, offset);
+}
+
+ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
+ return impl_->FinishDecoding(verify_functions);
+}
+
+bool ModuleDecoder::ok() { return impl_->ok(); }
ModuleResult SyncDecodeWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, bool verify_functions,
@@ -1337,13 +1420,13 @@ ModuleResult AsyncDecodeWasmModule(
FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
const byte* end) {
- ModuleDecoder decoder(start, end, kWasmOrigin);
+ ModuleDecoderImpl decoder(start, end, kWasmOrigin);
return decoder.DecodeFunctionSignature(zone, start);
}
WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
AccountingAllocator allocator;
- ModuleDecoder decoder(start, end, kWasmOrigin);
+ ModuleDecoderImpl decoder(start, end, kWasmOrigin);
return decoder.DecodeInitExpr(start);
}
@@ -1358,9 +1441,14 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
size_t size = function_end - function_start;
if (function_start > function_end)
return FunctionResult::Error("start > end");
+ auto size_histogram = module->is_wasm()
+ ? counters->wasm_wasm_function_size_bytes()
+ : counters->wasm_asm_function_size_bytes();
+ // TODO(bradnelson): Improve histogram handling of ptrdiff_t.
+ size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize)
return FunctionResult::Error("size > maximum function size: %zu", size);
- ModuleDecoder decoder(function_start, function_end, kWasmOrigin);
+ ModuleDecoderImpl decoder(function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
return decoder.DecodeSingleFunction(zone, wire_bytes, module,
base::make_unique<WasmFunction>());
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 5239c95e68..b6cd869ae7 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -19,13 +19,18 @@ struct ModuleEnv;
namespace wasm {
-const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x01;
const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
const uint8_t kResizableMaximumFlag = 1;
const uint8_t kNoMaximumFlag = 0;
+enum MemoryFlags : uint8_t {
+ kNoMaximum = 0,
+ kMaximum = 1,
+ kSharedNoMaximum = 2,
+ kSharedAndMaximum = 3
+};
+
enum SectionCode : int8_t {
kUnknownSectionCode = 0, // code for unknown sections
kTypeSectionCode = 1, // Function signature declarations
@@ -143,6 +148,36 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* module_start,
void DecodeLocalNames(const byte* module_start, const byte* module_end,
LocalNames* result);
+class ModuleDecoderImpl;
+
+class ModuleDecoder {
+ public:
+ ModuleDecoder();
+ ~ModuleDecoder();
+
+ void StartDecoding(Isolate* isolate,
+ ModuleOrigin origin = ModuleOrigin::kWasmOrigin);
+
+ void DecodeModuleHeader(Vector<const uint8_t> bytes, uint32_t offset);
+
+ void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ uint32_t offset, bool verify_functions = true);
+
+ bool CheckFunctionsCount(uint32_t functions_count, uint32_t offset);
+
+ void DecodeFunctionBody(uint32_t index, uint32_t size, uint32_t offset,
+ bool verify_functions = true);
+
+ ModuleResult FinishDecoding(bool verify_functions = true);
+
+ WasmModule* module() const;
+
+ bool ok();
+
+ private:
+ std::unique_ptr<ModuleDecoderImpl> impl_;
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index c0c51c9d76..b48d11c902 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -15,31 +15,25 @@
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
size_t current = 0;
- while (decoder()->ok() && current < bytes.size()) {
+ while (ok() && current < bytes.size()) {
size_t num_bytes =
state_->ReadBytes(this, bytes.SubVector(current, bytes.size()));
current += num_bytes;
+ module_offset_ += num_bytes;
if (state_->is_finished()) {
state_ = state_->Next(this);
}
}
total_size_ += bytes.size();
+ if (ok()) {
+ processor_->OnFinishedChunk();
+ }
}
size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
@@ -50,13 +44,36 @@ size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
return num_bytes;
}
-MaybeHandle<WasmModuleObject> StreamingDecoder::Finish() {
- UNIMPLEMENTED();
- return Handle<WasmModuleObject>::null();
+void StreamingDecoder::Finish() {
+ if (!ok()) {
+ return;
+ }
+
+ if (!state_->is_finishing_allowed()) {
+ // The byte stream ended too early, we report an error.
+ Error("unexpected end of stream");
+ return;
+ }
+
+ std::unique_ptr<uint8_t[]> bytes(new uint8_t[total_size_]);
+ uint8_t* cursor = bytes.get();
+ {
+#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+ uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
+#undef BYTES
+ memcpy(cursor, module_header, arraysize(module_header));
+ cursor += arraysize(module_header);
+ }
+ for (auto&& buffer : section_buffers_) {
+ DCHECK_LE(cursor - bytes.get() + buffer->length(), total_size_);
+ memcpy(cursor, buffer->bytes(), buffer->length());
+ cursor += buffer->length();
+ }
+ processor_->OnFinishedStream(std::move(bytes), total_size_);
}
-bool StreamingDecoder::FinishForTesting() {
- return decoder_.ok() && state_->is_finishing_allowed();
+void StreamingDecoder::Abort() {
+ if (ok()) processor_->OnAbort();
}
// An abstract class to share code among the states which decode VarInts. This
@@ -64,7 +81,8 @@ bool StreamingDecoder::FinishForTesting() {
// code with the decoded value.
class StreamingDecoder::DecodeVarInt32 : public DecodingState {
public:
- explicit DecodeVarInt32(size_t max_value) : max_value_(max_value) {}
+ explicit DecodeVarInt32(size_t max_value, const char* field_name)
+ : max_value_(max_value), field_name_(field_name) {}
uint8_t* buffer() override { return byte_buffer_; }
size_t size() const override { return kMaxVarInt32Size; }
@@ -84,6 +102,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
// The maximum valid value decoded in this state. {Next} returns an error if
// this value is exceeded.
size_t max_value_;
+ const char* field_name_;
size_t value_ = 0;
size_t bytes_needed_ = 0;
};
@@ -106,30 +125,43 @@ class StreamingDecoder::DecodeModuleHeader : public DecodingState {
class StreamingDecoder::DecodeSectionID : public DecodingState {
public:
+ explicit DecodeSectionID(uint32_t module_offset)
+ : module_offset_(module_offset) {}
+
size_t size() const override { return 1; }
uint8_t* buffer() override { return &id_; }
bool is_finishing_allowed() const override { return true; }
uint8_t id() const { return id_; }
+ uint32_t module_offset() const { return module_offset_; }
+
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
private:
uint8_t id_ = 0;
+ // The start offset of this section in the module.
+ uint32_t module_offset_;
};
class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
public:
- explicit DecodeSectionLength(uint8_t id)
- : DecodeVarInt32(kV8MaxWasmModuleSize), section_id_(id) {}
+ explicit DecodeSectionLength(uint8_t id, uint32_t module_offset)
+ : DecodeVarInt32(kV8MaxWasmModuleSize, "section length"),
+ section_id_(id),
+ module_offset_(module_offset) {}
uint8_t section_id() const { return section_id_; }
+ uint32_t module_offset() const { return module_offset_; }
+
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
private:
uint8_t section_id_;
+ // The start offset of this section in the module.
+ uint32_t module_offset_;
};
class StreamingDecoder::DecodeSectionPayload : public DecodingState {
@@ -144,6 +176,8 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ SectionBuffer* section_buffer() const { return section_buffer_; }
+
private:
SectionBuffer* section_buffer_;
};
@@ -151,7 +185,8 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
public:
explicit DecodeNumberOfFunctions(SectionBuffer* section_buffer)
- : DecodeVarInt32(kV8MaxWasmFunctions), section_buffer_(section_buffer) {}
+ : DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
+ section_buffer_(section_buffer) {}
SectionBuffer* section_buffer() const { return section_buffer_; }
@@ -167,7 +202,7 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
explicit DecodeFunctionLength(SectionBuffer* section_buffer,
size_t buffer_offset,
size_t num_remaining_functions)
- : DecodeVarInt32(kV8MaxWasmFunctionSize),
+ : DecodeVarInt32(kV8MaxWasmFunctionSize, "body size"),
section_buffer_(section_buffer),
buffer_offset_(buffer_offset),
// We are reading a new function, so one function less is remaining.
@@ -192,18 +227,21 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
public:
explicit DecodeFunctionBody(SectionBuffer* section_buffer,
size_t buffer_offset, size_t function_length,
- size_t num_remaining_functions)
+ size_t num_remaining_functions,
+ uint32_t module_offset)
: section_buffer_(section_buffer),
buffer_offset_(buffer_offset),
size_(function_length),
- num_remaining_functions_(num_remaining_functions) {}
+ num_remaining_functions_(num_remaining_functions),
+ module_offset_(module_offset) {}
+ size_t buffer_offset() const { return buffer_offset_; }
size_t size() const override { return size_; }
uint8_t* buffer() override {
return section_buffer_->bytes() + buffer_offset_;
}
size_t num_remaining_functions() const { return num_remaining_functions_; }
- size_t buffer_offset() const { return buffer_offset_; }
+ uint32_t module_offset() const { return module_offset_; }
SectionBuffer* section_buffer() const { return section_buffer_; }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
@@ -213,23 +251,24 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
size_t buffer_offset_;
size_t size_;
size_t num_remaining_functions_;
+ uint32_t module_offset_;
};
size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
size_t bytes_read = std::min(bytes.size(), remaining());
memcpy(buffer() + offset(), &bytes.first(), bytes_read);
- streaming->decoder()->Reset(buffer(), buffer() + offset() + bytes_read);
- value_ = streaming->decoder()->consume_i32v();
+ Decoder decoder(buffer(), buffer() + offset() + bytes_read,
+ streaming->module_offset());
+ value_ = decoder.consume_u32v(field_name_);
// The number of bytes we actually needed to read.
- DCHECK_GT(streaming->decoder()->pc(), buffer());
- bytes_needed_ = static_cast<size_t>(streaming->decoder()->pc() - buffer());
-
- if (streaming->decoder()->failed()) {
- if (offset() + bytes_read < size()) {
- // We did not decode a full buffer, so we ignore errors. Maybe the
- // decoding will succeed when we have more bytes.
- streaming->decoder()->Reset(nullptr, nullptr);
+ DCHECK_GT(decoder.pc(), buffer());
+ bytes_needed_ = static_cast<size_t>(decoder.pc() - buffer());
+
+ if (decoder.failed()) {
+ if (offset() + bytes_read == size()) {
+ // We only report an error if we read all bytes.
+ streaming->Error(decoder.toResult(nullptr));
}
set_offset(offset() + bytes_read);
return bytes_read;
@@ -244,72 +283,70 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
- if (streaming->decoder()->failed()) {
+ if (!streaming->ok()) {
return nullptr;
}
if (value() > max_value_) {
- streaming->decoder()->errorf(buffer(), "size > maximum function size: %zu",
- value());
- return nullptr;
+ std::ostringstream oss;
+ oss << "function size > maximum function size: " << value() << " < "
+ << max_value_;
+ return streaming->Error(oss.str());
}
return NextWithValue(streaming);
}
-#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
-// Decode the module header. The error state of the decoder stores the result.
-void StreamingDecoder::DecodeModuleHeader::CheckHeader(Decoder* decoder) {
- // TODO(ahaas): Share code with the module-decoder.
- decoder->Reset(buffer(), buffer() + size());
- uint32_t magic_word = decoder->consume_u32("wasm magic");
- if (magic_word != kWasmMagic) {
- decoder->errorf(buffer(),
- "expected magic word %02x %02x %02x %02x, "
- "found %02x %02x %02x %02x",
- BYTES(kWasmMagic), BYTES(magic_word));
- }
- uint32_t magic_version = decoder->consume_u32("wasm version");
- if (magic_version != kWasmVersion) {
- decoder->errorf(buffer(),
- "expected version %02x %02x %02x %02x, "
- "found %02x %02x %02x %02x",
- BYTES(kWasmVersion), BYTES(magic_version));
- }
-}
-#undef BYTES
-
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
- CheckHeader(streaming->decoder());
- return base::make_unique<DecodeSectionID>();
+ streaming->ProcessModuleHeader();
+ if (streaming->ok()) {
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ }
+ return nullptr;
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
- return base::make_unique<DecodeSectionLength>(id());
+ return base::make_unique<DecodeSectionLength>(id(), module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionLength::NextWithValue(
StreamingDecoder* streaming) {
SectionBuffer* buf = streaming->CreateNewBuffer(
- section_id(), value(),
+ module_offset(), section_id(), value(),
Vector<const uint8_t>(buffer(), static_cast<int>(bytes_needed())));
+ if (!buf) return nullptr;
if (value() == 0) {
- // There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>();
- } else if (section_id() == SectionCode::kCodeSectionCode) {
- // We reached the code section. All functions of the code section are put
- // into the same SectionBuffer.
- return base::make_unique<DecodeNumberOfFunctions>(buf);
+ if (section_id() == SectionCode::kCodeSectionCode) {
+ return streaming->Error("Code section cannot have size 0");
+ } else {
+ streaming->ProcessSection(buf);
+ if (streaming->ok()) {
+ // There is no payload, we go to the next section immediately.
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ } else {
+ return nullptr;
+ }
+ }
} else {
- return base::make_unique<DecodeSectionPayload>(buf);
+ if (section_id() == SectionCode::kCodeSectionCode) {
+ // We reached the code section. All functions of the code section are put
+ // into the same SectionBuffer.
+ return base::make_unique<DecodeNumberOfFunctions>(buf);
+ } else {
+ return base::make_unique<DecodeSectionPayload>(buf);
+ }
}
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
- return base::make_unique<DecodeSectionID>();
+ streaming->ProcessSection(section_buffer());
+ if (streaming->ok()) {
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ }
+ return nullptr;
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -320,17 +357,18 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
buffer(), bytes_needed());
} else {
- streaming->decoder()->error("Invalid code section length");
- return base::make_unique<DecodeSectionID>();
+ return streaming->Error("Invalid code section length");
}
// {value} is the number of functions.
if (value() > 0) {
+ streaming->StartCodeSection(value());
+ if (!streaming->ok()) return nullptr;
return base::make_unique<DecodeFunctionLength>(
section_buffer(), section_buffer()->payload_offset() + bytes_needed(),
value());
} else {
- return base::make_unique<DecodeSectionID>();
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
}
@@ -341,49 +379,47 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
if (section_buffer_->length() >= buffer_offset_ + bytes_needed()) {
memcpy(section_buffer_->bytes() + buffer_offset_, buffer(), bytes_needed());
} else {
- streaming->decoder()->error("Invalid code section length");
- return base::make_unique<DecodeSectionID>();
+ return streaming->Error("Invalid code section length");
}
// {value} is the length of the function.
if (value() == 0) {
- streaming->decoder()->errorf(buffer(), "Invalid function length (0)");
- return nullptr;
+ return streaming->Error("Invalid function length (0)");
} else if (buffer_offset() + bytes_needed() + value() >
section_buffer()->length()) {
- streaming->decoder()->errorf(buffer(), "not enough code section bytes");
+ streaming->Error("not enough code section bytes");
return nullptr;
}
return base::make_unique<DecodeFunctionBody>(
section_buffer(), buffer_offset() + bytes_needed(), value(),
- num_remaining_functions());
+ num_remaining_functions(), streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
- // TODO(ahaas): Start compilation of the function here.
+ streaming->ProcessFunctionBody(
+ Vector<const uint8_t>(buffer(), static_cast<int>(size())),
+ module_offset());
+ if (!streaming->ok()) {
+ return nullptr;
+ }
if (num_remaining_functions() != 0) {
return base::make_unique<DecodeFunctionLength>(
section_buffer(), buffer_offset() + size(), num_remaining_functions());
} else {
if (buffer_offset() + size() != section_buffer()->length()) {
- streaming->decoder()->Reset(
- section_buffer()->bytes(),
- section_buffer()->bytes() + section_buffer()->length());
- streaming->decoder()->errorf(
- section_buffer()->bytes() + buffer_offset() + size(),
- "not all code section bytes were used");
- return nullptr;
+ return streaming->Error("not all code section bytes were used");
}
- return base::make_unique<DecodeSectionID>();
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
}
-StreamingDecoder::StreamingDecoder(Isolate* isolate)
- : isolate_(isolate),
+StreamingDecoder::StreamingDecoder(
+ std::unique_ptr<StreamingProcessor> processor)
+ : processor_(std::move(processor)),
// A module always starts with a module header.
- state_(new DecodeModuleHeader()),
- decoder_(nullptr, nullptr) {
- USE(isolate_);
-}
+ state_(new DecodeModuleHeader()) {}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 349e013e6c..2bf5f625d5 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -7,30 +7,72 @@
#include <vector>
#include "src/isolate.h"
-#include "src/wasm/decoder.h"
+#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
namespace wasm {
+// This class is an interface for the StreamingDecoder to start the processing
+// of the incoming module bytes.
+class V8_EXPORT_PRIVATE StreamingProcessor {
+ public:
+ virtual ~StreamingProcessor() = default;
+ // Process the first 8 bytes of a WebAssembly module. Returns true if the
+ // processing finished successfully and the decoding should continue.
+ virtual bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ uint32_t offset) = 0;
+
+ // Process all sections but the code section. Returns true if the processing
+ // finished successfully and the decoding should continue.
+ virtual bool ProcessSection(SectionCode section_code,
+ Vector<const uint8_t> bytes, uint32_t offset) = 0;
+
+ // Process the start of the code section. Returns true if the processing
+ // finished successfully and the decoding should continue.
+ virtual bool ProcessCodeSectionHeader(size_t num_functions,
+ uint32_t offset) = 0;
+
+ // Process a function body. Returns true if the processing finished
+ // successfully and the decoding should continue.
+ virtual bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t offset) = 0;
+
+ // Report the end of a chunk.
+ virtual void OnFinishedChunk() = 0;
+ // Report the end of the stream. If the stream was successful, all
+ // received bytes are passed by parameter. If there has been an error, an
+ // empty array is passed.
+ virtual void OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
+ size_t length) = 0;
+ // Report an error detected in the StreamingDecoder.
+ virtual void OnError(DecodeResult result) = 0;
+ // Report the abortion of the stream.
+ virtual void OnAbort() = 0;
+};
+
// The StreamingDecoder takes a sequence of byte arrays, each received by a call
// of {OnBytesReceived}, and extracts the bytes which belong to section payloads
// and function bodies.
class V8_EXPORT_PRIVATE StreamingDecoder {
public:
- explicit StreamingDecoder(Isolate* isolate);
+ explicit StreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(Vector<const uint8_t> bytes);
- // Finishes the stream and returns compiled WasmModuleObject.
- MaybeHandle<WasmModuleObject> Finish();
+ void Finish();
- // Finishes the streaming and returns true if no error was detected.
- bool FinishForTesting();
+ void Abort();
+
+ // Notify the StreamingDecoder that there has been an compilation error.
+ void NotifyError() { ok_ = false; }
private:
+ // TODO(ahaas): Put the whole private state of the StreamingDecoder into the
+ // cc file (PIMPL design pattern).
+
// The SectionBuffer is the data object for the content of a single section.
// It stores all bytes of the section (including section id and section
// length), and the offset where the actual payload starts.
@@ -39,21 +81,33 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// id: The section id.
// payload_length: The length of the payload.
// length_bytes: The section length, as it is encoded in the module bytes.
- SectionBuffer(uint8_t id, size_t payload_length,
+ SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
Vector<const uint8_t> length_bytes)
: // ID + length + payload
+ module_offset_(module_offset),
length_(1 + length_bytes.length() + payload_length),
bytes_(new uint8_t[length_]),
payload_offset_(1 + length_bytes.length()) {
bytes_[0] = id;
memcpy(bytes_.get() + 1, &length_bytes.first(), length_bytes.length());
}
+
+ SectionCode section_code() const {
+ return static_cast<SectionCode>(bytes_[0]);
+ }
+
+ uint32_t module_offset() const { return module_offset_; }
uint8_t* bytes() const { return bytes_.get(); }
size_t length() const { return length_; }
size_t payload_offset() const { return payload_offset_; }
size_t payload_length() const { return length_ - payload_offset_; }
+ Vector<const uint8_t> payload() const {
+ return Vector<const uint8_t>(bytes() + payload_offset(),
+ payload_length());
+ }
private:
+ uint32_t module_offset_;
size_t length_;
std::unique_ptr<uint8_t[]> bytes_;
size_t payload_offset_;
@@ -127,20 +181,75 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
class DecodeFunctionBody;
// Creates a buffer for the next section of the module.
- SectionBuffer* CreateNewBuffer(uint8_t id, size_t length,
+ SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t id,
+ size_t length,
Vector<const uint8_t> length_bytes) {
- section_buffers_.emplace_back(new SectionBuffer(id, length, length_bytes));
+ // Check the order of sections. Unknown sections can appear at any position.
+ if (id != kUnknownSectionCode) {
+ if (id < next_section_id_) {
+ Error("Unexpected section");
+ return nullptr;
+ }
+ next_section_id_ = id + 1;
+ }
+ section_buffers_.emplace_back(
+ new SectionBuffer(module_offset, id, length, length_bytes));
return section_buffers_.back().get();
}
- Decoder* decoder() { return &decoder_; }
+ std::unique_ptr<DecodingState> Error(DecodeResult result) {
+ if (ok_) processor_->OnError(std::move(result));
+ ok_ = false;
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+
+ std::unique_ptr<DecodingState> Error(std::string message) {
+ DecodeResult result(nullptr);
+ result.error(module_offset_ - 1, std::move(message));
+ return Error(std::move(result));
+ }
+
+ void ProcessModuleHeader() {
+ if (!ok_) return;
+ ok_ &= processor_->ProcessModuleHeader(
+ Vector<const uint8_t>(state_->buffer(),
+ static_cast<int>(state_->size())),
+ 0);
+ }
+
+ void ProcessSection(SectionBuffer* buffer) {
+ if (!ok_) return;
+ ok_ &= processor_->ProcessSection(
+ buffer->section_code(), buffer->payload(),
+ buffer->module_offset() +
+ static_cast<uint32_t>(buffer->payload_offset()));
+ }
+
+ void StartCodeSection(size_t num_functions) {
+ if (!ok_) return;
+ // The offset passed to {ProcessCodeSectionHeader} is an error offset and
+ // not the start offset of a buffer. Therefore we need the -1 here.
+ ok_ &= processor_->ProcessCodeSectionHeader(num_functions,
+ module_offset() - 1);
+ }
+
+ void ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t module_offset) {
+ if (!ok_) return;
+ ok_ &= processor_->ProcessFunctionBody(bytes, module_offset);
+ }
+
+ bool ok() const { return ok_; }
+
+ uint32_t module_offset() const { return module_offset_; }
- Isolate* isolate_;
+ std::unique_ptr<StreamingProcessor> processor_;
+ bool ok_ = true;
std::unique_ptr<DecodingState> state_;
- // The decoder is an instance variable because we use it for error handling.
- Decoder decoder_;
std::vector<std::unique_ptr<SectionBuffer>> section_buffers_;
+ uint32_t module_offset_ = 0;
size_t total_size_ = 0;
+ uint8_t next_section_id_ = kFirstSectionInModule;
DISALLOW_COPY_AND_ASSIGN(StreamingDecoder);
};
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 52e565c0a7..33db8bb7d2 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -9,20 +9,9 @@
#include "src/source-position-table.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -36,10 +25,6 @@ int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
return static_cast<int>(call_idx);
}
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
namespace {
int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
@@ -88,16 +73,10 @@ CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
CodeSpecialization::~CodeSpecialization() {}
-void CodeSpecialization::RelocateMemoryReferences(Address old_start,
- uint32_t old_size,
- Address new_start,
- uint32_t new_size) {
- DCHECK(old_mem_start == nullptr && old_mem_size == 0 &&
- new_mem_start == nullptr && new_mem_size == 0);
- old_mem_start = old_start;
- old_mem_size = old_size;
- new_mem_start = new_start;
- new_mem_size = new_size;
+void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
+ DCHECK_NOT_NULL(new_context);
+ DCHECK_NULL(new_wasm_context_address);
+ new_wasm_context_address = new_context;
}
void CodeSpecialization::RelocateGlobals(Address old_start, Address new_start) {
@@ -146,37 +125,51 @@ bool CodeSpecialization::ApplyToWholeInstance(
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
- // Patch all exported functions (if we shall relocate direct calls).
+ // Patch all exported functions (JS_TO_WASM_FUNCTION).
+ int reloc_mode = 0;
+ // We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
+ if (new_wasm_context_address) {
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
+ }
+ // Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
+ // calls, the instance registered for that (relocate_direct_calls_instance)
+ // should match the instance we currently patch (instance).
if (!relocate_direct_calls_instance.is_null()) {
- // If we patch direct calls, the instance registered for that
- // (relocate_direct_calls_instance) should match the instance we currently
- // patch (instance).
- int wrapper_index = 0;
DCHECK_EQ(instance, *relocate_direct_calls_instance);
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Code* export_wrapper =
- Code::cast(compiled_module->export_wrappers()->get(wrapper_index));
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
- // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
- for (RelocIterator it(export_wrapper,
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET));
- ; it.next()) {
- DCHECK(!it.done());
- // Ignore calls to other builtins like ToNumber.
- if (!IsAtWasmDirectCallTarget(it)) continue;
- Code* new_code = Code::cast(code_table->get(exp.index));
- it.rinfo()->set_target_address(new_code->GetIsolate(),
- new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- break;
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ }
+ if (!reloc_mode) return changed;
+ int wrapper_index = 0;
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Code* export_wrapper =
+ Code::cast(compiled_module->export_wrappers()->get(wrapper_index));
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ switch (mode) {
+ case RelocInfo::WASM_CONTEXT_REFERENCE:
+ it.rinfo()->set_wasm_context_reference(export_wrapper->GetIsolate(),
+ new_wasm_context_address,
+ icache_flush_mode);
+ break;
+ case RelocInfo::CODE_TARGET: {
+ // Ignore calls to other builtins like ToNumber.
+ if (!IsAtWasmDirectCallTarget(it)) continue;
+ Code* new_code = Code::cast(code_table->get(exp.index));
+ it.rinfo()->set_target_address(
+ new_code->GetIsolate(), new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } break;
+ default:
+ UNREACHABLE();
}
- changed = true;
- ++wrapper_index;
}
- DCHECK_EQ(code_table->length(), func_index);
- DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
+ changed = true;
+ ++wrapper_index;
}
+ DCHECK_EQ(code_table->length(), func_index);
+ DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
return changed;
}
@@ -185,8 +178,6 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
DisallowHeapAllocation no_gc;
DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
- bool reloc_mem_addr = old_mem_start != new_mem_start;
- bool reloc_mem_size = old_mem_size != new_mem_size;
bool reloc_globals = old_globals_start || new_globals_start;
bool patch_table_size = old_function_table_size || new_function_table_size;
bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
@@ -196,8 +187,6 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
if (cond) reloc_mode |= RelocInfo::ModeMask(mode);
};
- add_mode(reloc_mem_addr, RelocInfo::WASM_MEMORY_REFERENCE);
- add_mode(reloc_mem_size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
add_mode(reloc_globals, RelocInfo::WASM_GLOBAL_REFERENCE);
add_mode(patch_table_size, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
@@ -209,19 +198,6 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
for (RelocIterator it(code, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
- case RelocInfo::WASM_MEMORY_REFERENCE:
- DCHECK(reloc_mem_addr);
- it.rinfo()->update_wasm_memory_reference(code->GetIsolate(),
- old_mem_start, new_mem_start,
- icache_flush_mode);
- changed = true;
- break;
- case RelocInfo::WASM_MEMORY_SIZE_REFERENCE:
- DCHECK(reloc_mem_size);
- it.rinfo()->update_wasm_memory_size(code->GetIsolate(), old_mem_size,
- new_mem_size, icache_flush_mode);
- changed = true;
- break;
case RelocInfo::WASM_GLOBAL_REFERENCE:
DCHECK(reloc_globals);
it.rinfo()->update_wasm_global_reference(
@@ -281,3 +257,7 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
return changed;
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
index abcc941c5f..4cf422b64f 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -28,9 +28,8 @@ class CodeSpecialization {
CodeSpecialization(Isolate*, Zone*);
~CodeSpecialization();
- // Update memory references.
- void RelocateMemoryReferences(Address old_start, uint32_t old_size,
- Address new_start, uint32_t new_size);
+ // Update WasmContext references.
+ void RelocateWasmContextReferences(Address new_context);
// Update references to global variables.
void RelocateGlobals(Address old_start, Address new_start);
// Update function table size.
@@ -49,10 +48,7 @@ class CodeSpecialization {
bool ApplyToWasmCode(Code*, ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
- Address old_mem_start = 0;
- uint32_t old_mem_size = 0;
- Address new_mem_start = 0;
- uint32_t new_mem_size = 0;
+ Address new_wasm_context_address = 0;
Address old_globals_start = 0;
Address new_globals_start = 0;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 0770940484..79c784a0f7 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -17,20 +17,12 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/accounting-allocator.h"
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
namespace {
@@ -98,10 +90,6 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
return handle(String::cast(func_locals_names->get(local_index)));
}
-// Forward declaration.
-class InterpreterHandle;
-InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
-
class InterpreterHandle {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(InterpreterHandle);
Isolate* isolate_;
@@ -150,20 +138,16 @@ class InterpreterHandle {
static uint32_t GetMemSize(WasmDebugInfo* debug_info) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- debug_info->wasm_instance()->compiled_module();
- return compiled_module->has_embedded_mem_size()
- ? compiled_module->embedded_mem_size()
+ return debug_info->wasm_instance()->has_memory_object()
+ ? debug_info->wasm_instance()->wasm_context()->mem_size
: 0;
}
static byte* GetMemStart(WasmDebugInfo* debug_info) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- debug_info->wasm_instance()->compiled_module();
- return reinterpret_cast<byte*>(compiled_module->has_embedded_mem_start()
- ? compiled_module->embedded_mem_start()
- : 0);
+ return debug_info->wasm_instance()->has_memory_object()
+ ? debug_info->wasm_instance()->wasm_context()->mem_start
+ : nullptr;
}
static byte* GetGlobalsStart(WasmDebugInfo* debug_info) {
@@ -326,7 +310,12 @@ class InterpreterHandle {
WasmInterpreterEntryFrame* frame =
WasmInterpreterEntryFrame::cast(it.frame());
Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
- DCHECK_EQ(this, GetInterpreterHandle(instance_obj->debug_info()));
+ // Check that this is indeed the instance which is connected to this
+ // interpreter.
+ DCHECK_EQ(this, Managed<wasm::InterpreterHandle>::cast(
+ instance_obj->debug_info()->get(
+ WasmDebugInfo::kInterpreterHandleIndex))
+ ->get());
return instance_obj;
}
@@ -565,29 +554,35 @@ class InterpreterHandle {
}
};
-InterpreterHandle* GetOrCreateInterpreterHandle(
+} // namespace
+
+} // namespace wasm
+
+namespace {
+
+wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandleIndex),
isolate);
if (handle->IsUndefined(isolate)) {
- InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
- handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
+ handle = Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate,
+ *debug_info);
debug_info->set(WasmDebugInfo::kInterpreterHandleIndex, *handle);
}
- return Handle<Managed<InterpreterHandle>>::cast(handle)->get();
+ return Handle<Managed<wasm::InterpreterHandle>>::cast(handle)->get();
}
-InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
+wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
- return Managed<InterpreterHandle>::cast(handle_obj)->get();
+ return Managed<wasm::InterpreterHandle>::cast(handle_obj)->get();
}
-InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
+wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
- return Managed<InterpreterHandle>::cast(handle_obj)->get();
+ return Managed<wasm::InterpreterHandle>::cast(handle_obj)->get();
}
int GetNumFunctions(WasmInstanceObject* instance) {
@@ -657,14 +652,14 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
return debug_info;
}
-WasmInterpreter* WasmDebugInfo::SetupForTesting(
+wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
Handle<WasmInstanceObject> instance_obj) {
Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
Isolate* isolate = instance_obj->GetIsolate();
- InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
- Handle<Object> handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
- debug_info->set(kInterpreterHandleIndex, *handle);
- return cpp_handle->interpreter();
+ auto interp_handle =
+ Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
+ debug_info->set(kInterpreterHandleIndex, *interp_handle);
+ return interp_handle->get()->interpreter();
}
bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
@@ -691,9 +686,9 @@ WasmInstanceObject* WasmDebugInfo::wasm_instance() {
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
- InterpreterHandle* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+ auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
RedirectToInterpreter(debug_info, Vector<int>(&func_index, 1));
- const WasmFunction* func = &handle->module()->functions[func_index];
+ const wasm::WasmFunction* func = &handle->module()->functions[func_index];
handle->interpreter()->SetBreakpoint(func, offset, true);
}
@@ -753,12 +748,12 @@ void WasmDebugInfo::Unwind(Address frame_pointer) {
}
uint64_t WasmDebugInfo::NumInterpretedCalls() {
- auto handle = GetInterpreterHandleOrNull(this);
+ auto* handle = GetInterpreterHandleOrNull(this);
return handle ? handle->NumInterpretedCalls() : 0;
}
void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
- InterpreterHandle* interp_handle = GetInterpreterHandleOrNull(this);
+ auto* interp_handle = GetInterpreterHandleOrNull(this);
if (!interp_handle) return;
interp_handle->UpdateMemory(new_memory);
}
@@ -766,14 +761,14 @@ void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
// static
Handle<JSObject> WasmDebugInfo::GetScopeDetails(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
return interp_handle->GetScopeDetails(frame_pointer, frame_index, debug_info);
}
// static
Handle<JSObject> WasmDebugInfo::GetGlobalScopeObject(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
return interp_handle->GetGlobalScopeObject(frame.get(), debug_info);
}
@@ -781,22 +776,21 @@ Handle<JSObject> WasmDebugInfo::GetGlobalScopeObject(
// static
Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
return interp_handle->GetLocalScopeObject(frame.get(), debug_info);
}
// static
Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
- Handle<WasmDebugInfo> debug_info, FunctionSig* sig) {
+ Handle<WasmDebugInfo> debug_info, wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
if (!debug_info->has_c_wasm_entries()) {
auto entries = isolate->factory()->NewFixedArray(4, TENURED);
debug_info->set_c_wasm_entries(*entries);
- auto managed_map =
- Managed<wasm::SignatureMap>::New(isolate, new wasm::SignatureMap());
+ auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate);
debug_info->set_c_wasm_entry_map(*managed_map);
}
Handle<FixedArray> entries(debug_info->c_wasm_entries(), isolate);
@@ -810,7 +804,12 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
- Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
+ Address context_address = reinterpret_cast<Address>(
+ debug_info->wasm_instance()->has_memory_object()
+ ? debug_info->wasm_instance()->wasm_context()
+ : nullptr);
+ Handle<Code> new_entry_code =
+ compiler::CompileCWasmEntry(isolate, sig, context_address);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
Handle<SharedFunctionInfo> shared =
@@ -826,3 +825,6 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
}
return handle(JSFunction::cast(entries->get(index)));
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 8c2547bf3e..93a84583b9 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -10,6 +10,7 @@
#include "include/v8config.h"
#include "src/base/bits.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
#include "src/wasm/wasm-external-refs.h"
@@ -223,6 +224,10 @@ void float64_pow_wrapper(double* param0, double* param1) {
WriteDoubleValue(param0, Pow(x, y));
}
+void set_thread_in_wasm_flag() { trap_handler::SetThreadInWasm(); }
+
+void clear_thread_in_wasm_flag() { trap_handler::ClearThreadInWasm(); }
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 04337b99ec..e4e88de0db 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -61,6 +61,9 @@ uint32_t word64_popcnt_wrapper(uint64_t* input);
void float64_pow_wrapper(double* param0, double* param1);
+void set_thread_in_wasm_flag();
+void clear_thread_in_wasm_flag();
+
typedef void (*WasmTrapCallbackForTesting)();
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
diff --git a/deps/v8/src/wasm/wasm-heap.cc b/deps/v8/src/wasm/wasm-heap.cc
new file mode 100644
index 0000000000..b7d13b067f
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-heap.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-heap.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
+ ranges_.push_back({start, end});
+}
+
+void DisjointAllocationPool::Merge(DisjointAllocationPool&& other) {
+ auto dest_it = ranges_.begin();
+ auto dest_end = ranges_.end();
+
+ for (auto src_it = other.ranges_.begin(), src_end = other.ranges_.end();
+ src_it != src_end;) {
+ if (dest_it == dest_end) {
+ // everything else coming from src will be inserted
+ // at the back of ranges_ from now on.
+ ranges_.push_back(*src_it);
+ ++src_it;
+ continue;
+ }
+ // Before or adjacent to dest. Insert or merge, and advance
+ // just src.
+ if (dest_it->first >= src_it->second) {
+ if (dest_it->first == src_it->second) {
+ dest_it->first = src_it->first;
+ } else {
+ ranges_.insert(dest_it, {src_it->first, src_it->second});
+ }
+ ++src_it;
+ continue;
+ }
+ // Src is strictly after dest. Skip over this dest.
+ if (dest_it->second < src_it->first) {
+ ++dest_it;
+ continue;
+ }
+ // Src is adjacent from above. Merge and advance
+ // just src, because the next src, if any, is bound to be
+ // strictly above the newly-formed range.
+ DCHECK_EQ(dest_it->second, src_it->first);
+ dest_it->second = src_it->second;
+ ++src_it;
+ // Now that we merged, maybe this new range is adjacent to
+ // the next. Since we assume src to have come from the
+ // same original memory pool, it follows that the next src
+ // must be above or adjacent to the new bubble.
+ auto next_dest = dest_it;
+ ++next_dest;
+ if (next_dest != dest_end && dest_it->second == next_dest->first) {
+ dest_it->second = next_dest->second;
+ ranges_.erase(next_dest);
+ }
+
+ // src_it points now at the next, if any, src
+ DCHECK_IMPLIES(src_it != src_end, src_it->first >= dest_it->second);
+ }
+}
+
+DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
+ ExtractionMode mode) {
+ DisjointAllocationPool ret;
+ for (auto it = ranges_.begin(), end = ranges_.end(); it != end;) {
+ auto current = it;
+ ++it;
+ DCHECK_LT(current->first, current->second);
+ size_t current_size = reinterpret_cast<size_t>(current->second) -
+ reinterpret_cast<size_t>(current->first);
+ if (size == current_size) {
+ ret.ranges_.push_back(*current);
+ ranges_.erase(current);
+ return ret;
+ }
+ if (size < current_size) {
+ ret.ranges_.push_back({current->first, current->first + size});
+ current->first += size;
+ DCHECK(current->first < current->second);
+ return ret;
+ }
+ if (mode != kContiguous) {
+ size -= current_size;
+ ret.ranges_.push_back(*current);
+ ranges_.erase(current);
+ }
+ }
+ if (size > 0) {
+ Merge(std::move(ret));
+ return {};
+ }
+ return ret;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-heap.h b/deps/v8/src/wasm/wasm-heap.h
new file mode 100644
index 0000000000..60cbfb14ba
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-heap.h
@@ -0,0 +1,66 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_HEAP_H_
+#define V8_WASM_HEAP_H_
+
+#include <list>
+
+#include "src/base/macros.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Sorted, disjoint and non-overlapping memory ranges. A range is of the
+// form [start, end). So there's no [start, end), [end, other_end),
+// because that should have been reduced to [start, other_end).
+using AddressRange = std::pair<Address, Address>;
+class V8_EXPORT_PRIVATE DisjointAllocationPool final {
+ public:
+ enum ExtractionMode : bool { kAny = false, kContiguous = true };
+ DisjointAllocationPool() {}
+
+ explicit DisjointAllocationPool(Address, Address);
+
+ DisjointAllocationPool(DisjointAllocationPool&& other) = default;
+ DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
+
+ // Merge the ranges of the parameter into this object. Ordering is
+ // preserved. The assumption is that the passed parameter is
+ // not intersecting this object - for example, it was obtained
+ // from a previous Allocate{Pool}.
+ void Merge(DisjointAllocationPool&&);
+
+ // Allocate a contiguous range of size {size}. Return an empty pool on
+ // failure.
+ DisjointAllocationPool Allocate(size_t size) {
+ return Extract(size, kContiguous);
+ }
+
+ // Allocate a sub-pool of size {size}. Return an empty pool on failure.
+ DisjointAllocationPool AllocatePool(size_t size) {
+ return Extract(size, kAny);
+ }
+
+ bool IsEmpty() const { return ranges_.empty(); }
+ const std::list<AddressRange>& ranges() const { return ranges_; }
+
+ private:
+ // Extract out a total of {size}. By default, the return may
+ // be more than one range. If kContiguous is passed, the return
+ // will be one range. If the operation fails, this object is
+ // unchanged, and the return {IsEmpty()}
+ DisjointAllocationPool Extract(size_t size, ExtractionMode mode);
+
+ std::list<AddressRange> ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 8b81d73b4f..4269e18c8f 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -15,10 +15,11 @@
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -624,24 +625,11 @@ inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
Isolate* isolate = instance->GetIsolate();
int32_t ret = WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
-#ifdef DEBUG
// Ensure the effects of GrowMemory have been observed by the interpreter.
// See {UpdateMemory}. In all cases, we are in agreement with the runtime
// object's view.
- uint32_t cached_size = mem_info->mem_size;
- byte* cached_start = mem_info->mem_start;
- uint32_t instance_size =
- instance->compiled_module()->has_embedded_mem_size()
- ? instance->compiled_module()->embedded_mem_size()
- : 0;
- byte* instance_start =
- instance->compiled_module()->has_embedded_mem_start()
- ? reinterpret_cast<byte*>(
- instance->compiled_module()->embedded_mem_start())
- : nullptr;
- CHECK_EQ(cached_size, instance_size);
- CHECK_EQ(cached_start, instance_start);
-#endif
+ DCHECK_EQ(mem_info->mem_size, instance->wasm_context()->mem_size);
+ DCHECK_EQ(mem_info->mem_start, instance->wasm_context()->mem_start);
return ret;
}
@@ -667,23 +655,24 @@ const char* OpcodeName(uint32_t val) {
Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
Handle<Code> js_wrapper) {
DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, js_wrapper->kind());
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*js_wrapper, mask); !it.done(); it.next()) {
- HeapObject* obj = it.rinfo()->target_object();
- if (!obj->IsCallable()) continue;
-#ifdef DEBUG
- // There should only be this one reference to a callable object.
- for (it.next(); !it.done(); it.next()) {
- HeapObject* other = it.rinfo()->target_object();
- DCHECK(!other->IsCallable());
- }
-#endif
- return handle(obj, isolate);
+ Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
+ DCHECK_EQ(2, deopt_data->length());
+ intptr_t js_imports_table_loc = static_cast<intptr_t>(
+ HeapNumber::cast(deopt_data->get(0))->value_as_bits());
+ Handle<FixedArray> js_imports_table(
+ reinterpret_cast<FixedArray**>(js_imports_table_loc));
+ int index = 0;
+ CHECK(deopt_data->get(1)->ToInt32(&index));
+ DCHECK_GT(js_imports_table->length(), index);
+ Handle<Object> obj(js_imports_table->get(index), isolate);
+ if (obj->IsCallable()) {
+ return Handle<HeapObject>::cast(obj);
+ } else {
+ // If we did not find a callable object, this is an illegal JS import and
+ // obj must be undefined.
+ DCHECK(obj->IsUndefined(isolate));
+ return Handle<HeapObject>::null();
}
- // If we did not find a callable object, then there must be a reference to
- // the WasmThrowTypeError runtime function.
- // TODO(clemensh): Check that this is the case.
- return Handle<HeapObject>::null();
}
class SideTable;
@@ -1454,7 +1443,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) {
+ bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
+ MachineRepresentation rep) {
MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, operand.offset,
@@ -1467,12 +1457,20 @@ class ThreadImpl {
Push(result);
len = 1 + operand.length;
+
+ if (FLAG_wasm_trace_memory) {
+ tracing::TraceMemoryOperation(
+ tracing::kWasmInterpreted, false, rep, operand.offset + index,
+ code->function->func_index, static_cast<int>(pc),
+ cached_instance_info_->mem_start);
+ }
+
return true;
}
template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int& len) {
+ bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
+ MachineRepresentation rep) {
MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
WasmValue val = Pop();
@@ -1491,6 +1489,14 @@ class ThreadImpl {
} else if (std::is_same<double, ctype>::value) {
possible_nondeterminism_ |= std::isnan(val.to<double>());
}
+
+ if (FLAG_wasm_trace_memory) {
+ tracing::TraceMemoryOperation(
+ tracing::kWasmInterpreted, true, rep, operand.offset + index,
+ code->function->func_index, static_cast<int>(pc),
+ cached_instance_info_->mem_start);
+ }
+
return true;
}
@@ -1812,43 +1818,47 @@ class ThreadImpl {
break;
}
-#define LOAD_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len)) return; \
- break; \
- }
-
- LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
- LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
- LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
- LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
- LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
- LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
- LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
- LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
- LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
- LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
- LOAD_CASE(I32LoadMem, int32_t, int32_t);
- LOAD_CASE(I64LoadMem, int64_t, int64_t);
- LOAD_CASE(F32LoadMem, float, float);
- LOAD_CASE(F64LoadMem, double, double);
+#define LOAD_CASE(name, ctype, mtype, rep) \
+ case kExpr##name: { \
+ if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
+ MachineRepresentation::rep)) \
+ return; \
+ break; \
+ }
+
+ LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
+ LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
+ LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
+ LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
+ LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
+ LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
+ LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
+ LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
+ LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
+ LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
+ LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
+ LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
+ LOAD_CASE(F32LoadMem, float, float, kFloat32);
+ LOAD_CASE(F64LoadMem, double, double, kFloat64);
#undef LOAD_CASE
-#define STORE_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len)) return; \
- break; \
- }
-
- STORE_CASE(I32StoreMem8, int32_t, int8_t);
- STORE_CASE(I32StoreMem16, int32_t, int16_t);
- STORE_CASE(I64StoreMem8, int64_t, int8_t);
- STORE_CASE(I64StoreMem16, int64_t, int16_t);
- STORE_CASE(I64StoreMem32, int64_t, int32_t);
- STORE_CASE(I32StoreMem, int32_t, int32_t);
- STORE_CASE(I64StoreMem, int64_t, int64_t);
- STORE_CASE(F32StoreMem, float, float);
- STORE_CASE(F64StoreMem, double, double);
+#define STORE_CASE(name, ctype, mtype, rep) \
+ case kExpr##name: { \
+ if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
+ MachineRepresentation::rep)) \
+ return; \
+ break; \
+ }
+
+ STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
+ STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
+ STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
+ STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
+ STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
+ STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
+ STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
+ STORE_CASE(F32StoreMem, float, float, kFloat32);
+ STORE_CASE(F64StoreMem, double, double, kFloat64);
#undef STORE_CASE
#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
@@ -2004,6 +2014,7 @@ class ThreadImpl {
return;
PAUSE_IF_BREAK_FLAG(AfterReturn);
}
+#undef PAUSE_IF_BREAK_FLAG
}
state_ = WasmInterpreter::PAUSED;
@@ -2701,6 +2712,11 @@ WasmInterpreter::HeapObjectsScope::~HeapObjectsScope() {
}
#undef TRACE
+#undef FOREACH_INTERNAL_OPCODE
+#undef WASM_CTYPES
+#undef FOREACH_SIMPLE_BINOP
+#undef FOREACH_OTHER_BINOP
+#undef FOREACH_OTHER_UNOP
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index ce2e3f1341..6a017365aa 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -14,12 +14,14 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
using v8::internal::wasm::ErrorThrower;
@@ -48,11 +50,6 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- if (args.Length() < 1) {
- thrower->TypeError("Argument 0 must be a WebAssembly.Module");
- return {};
- }
-
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (!arg0->IsWasmModuleObject()) {
thrower->TypeError("Argument 0 must be a WebAssembly.Module");
@@ -66,11 +63,6 @@ i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- if (args.Length() < 1) {
- thrower->TypeError("Argument 0 must be a buffer source");
- return i::wasm::ModuleWireBytes(nullptr, nullptr);
- }
-
const uint8_t* start = nullptr;
size_t length = 0;
v8::Local<v8::Value> source = args[0];
@@ -122,6 +114,22 @@ void WebAssemblyCompileStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ // Manually create a promise and reject it.
+ Local<Context> context = isolate->GetCurrentContext();
+ ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(resolver->GetPromise());
+ i::wasm::ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.compileStreaming()");
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
+ return;
+ }
+
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
DCHECK_NOT_NULL(i_isolate->wasm_compile_streaming_callback());
i_isolate->wasm_compile_streaming_callback()(args);
@@ -136,6 +144,10 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ }
+
Local<Context> context = isolate->GetCurrentContext();
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
@@ -180,6 +192,11 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ return;
+ }
+
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
if (thrower.error()) {
@@ -393,16 +410,6 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Promise> module_promise = resolver->GetPromise();
args.GetReturnValue().Set(module_promise);
- if (args.Length() < 1) {
- thrower.TypeError(
- "Argument 0 must be provided and must be either a buffer source or a "
- "WebAssembly.Module object");
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
- return;
- }
-
Local<Value> first_arg_value = args[0];
i::Handle<i::Object> first_arg = Utils::OpenHandle(*first_arg_value);
if (!first_arg->IsJSObject()) {
@@ -470,12 +477,12 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
- if (args.Length() < 1 || !args[0]->IsObject()) {
+ if (!args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a table descriptor");
return;
}
Local<Context> context = isolate->GetCurrentContext();
- Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+ Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
// The descriptor's 'element'.
{
v8::MaybeLocal<v8::Value> maybe =
@@ -523,12 +530,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
- if (args.Length() < 1 || !args[0]->IsObject()) {
+ if (!args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a memory descriptor");
return;
}
Local<Context> context = isolate->GetCurrentContext();
- Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+ Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
// The descriptor's 'initial'.
int64_t initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
@@ -571,7 +578,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
static_cast<size_t>(initial);
i::Handle<i::JSArrayBuffer> buffer = i::wasm::NewArrayBuffer(
- i_isolate, size, i::FLAG_wasm_guard_pages,
+ i_isolate, size, internal::trap_handler::UseTrapHandler(),
is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (buffer.is_null()) {
thrower.RangeError("could not allocate memory");
@@ -590,17 +597,16 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
-#define NAME_OF_WasmMemoryObject "WebAssembly.Memory"
-#define NAME_OF_WasmModuleObject "WebAssembly.Module"
-#define NAME_OF_WasmInstanceObject "WebAssembly.Instance"
-#define NAME_OF_WasmTableObject "WebAssembly.Table"
+constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
+constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
+constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
#define EXTRACT_THIS(var, WasmType) \
i::Handle<i::WasmType> var; \
{ \
i::Handle<i::Object> this_arg = Utils::OpenHandle(*args.This()); \
if (!this_arg->Is##WasmType()) { \
- thrower.TypeError("Receiver is not a " NAME_OF_##WasmType); \
+ thrower.TypeError("Receiver is not a %s", kName_##WasmType); \
return; \
} \
var = i::Handle<i::WasmType>::cast(this_arg); \
@@ -639,27 +645,24 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- int64_t new_size64 = 0;
- if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&new_size64)) {
- return;
- }
+ int64_t grow_by = 0;
+ if (!args[0]->IntegerValue(context).To(&grow_by)) return;
i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
int old_size = old_array->length();
- new_size64 += old_size;
int64_t max_size64 = receiver->maximum_length()->Number();
if (max_size64 < 0 || max_size64 > i::FLAG_wasm_max_table_size) {
max_size64 = i::FLAG_wasm_max_table_size;
}
- if (new_size64 < old_size || new_size64 > max_size64) {
- thrower.RangeError(new_size64 < old_size ? "trying to shrink table"
- : "maximum table size exceeded");
+ if (grow_by < 0 || grow_by > max_size64 - old_size) {
+ thrower.RangeError(grow_by < 0 ? "trying to shrink table"
+ : "maximum table size exceeded");
return;
}
- int new_size = static_cast<int>(new_size64);
- receiver->grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
+ int new_size = static_cast<int>(old_size + grow_by);
+ receiver->Grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
if (new_size != old_size) {
i::Handle<i::FixedArray> new_array =
@@ -685,7 +688,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
int64_t i = 0;
- if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&i)) return;
+ if (!args[0]->IntegerValue(context).To(&i)) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
if (i < 0 || i >= array->length()) {
thrower.RangeError("index out of bounds");
@@ -705,11 +708,6 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- if (args.Length() < 2) {
- thrower.TypeError("Argument 1 must be null or a function");
- return;
- }
-
// Parameter 0.
int64_t index;
if (!args[0]->IntegerValue(context).To(&index)) return;
@@ -724,10 +722,15 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::wasm::TableSet(&thrower, i_isolate, receiver, index,
- value->IsNull(i_isolate)
- ? i::Handle<i::JSFunction>::null()
- : i::Handle<i::JSFunction>::cast(value));
+ if (index < 0 || index >= receiver->functions()->length()) {
+ thrower.RangeError("index out of bounds");
+ return;
+ }
+
+ i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(index),
+ value->IsNull(i_isolate)
+ ? i::Handle<i::JSFunction>::null()
+ : i::Handle<i::JSFunction>::cast(value));
}
// WebAssembly.Memory.grow(num) -> num
@@ -741,10 +744,8 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
EXTRACT_THIS(receiver, WasmMemoryObject);
int64_t delta_size = 0;
- if (args.Length() < 1 || !args[0]->IntegerValue(context).To(&delta_size)) {
- thrower.TypeError("Argument 0 required, must be numeric value of pages");
- return;
- }
+ if (!args[0]->IntegerValue(context).To(&delta_size)) return;
+
int64_t max_size64 = receiver->maximum_pages();
if (max_size64 < 0 ||
max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
@@ -769,9 +770,17 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("Unable to grow instance memory.");
return;
}
- bool free_memory = (delta_size != 0);
if (!old_buffer->is_shared()) {
- i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer, free_memory);
+ // When delta_size == 0, or guard pages are enabled, the same backing store
+ // is used. To be spec compliant, the buffer associated with the memory
+ // object needs to be detached. Setup a new buffer with the same backing
+ // store, detach the old buffer, and do not free backing store memory.
+ bool free_memory = delta_size != 0 && !old_buffer->has_guard_region();
+ if ((!free_memory && old_size != 0) || new_size64 == 0) {
+ i::WasmMemoryObject::SetupNewBufferWithSameBackingStore(
+ i_isolate, receiver, static_cast<uint32_t>(new_size64));
+ }
+ i::wasm::DetachMemoryBuffer(i_isolate, old_buffer, free_memory);
}
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
@@ -847,7 +856,7 @@ void InstallGetter(Isolate* isolate, Handle<JSObject> object,
Local<Function>(), attributes);
}
-void WasmJs::Install(Isolate* isolate) {
+void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
// Install the JS API once only.
@@ -867,11 +876,11 @@ void WasmJs::Install(Isolate* isolate) {
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
- JSObject::AddProperty(global, name, webassembly, attributes);
+
PropertyAttributes ro_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(),
- v8_str(isolate, "WebAssembly"), ro_attributes);
+ JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(), name,
+ ro_attributes);
InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
@@ -883,6 +892,11 @@ void WasmJs::Install(Isolate* isolate) {
WebAssemblyInstantiateStreaming, 1);
}
+ // Expose the API on the global object if configured to do so.
+ if (exposed_on_global_object) {
+ JSObject::AddProperty(global, name, webassembly, attributes);
+ }
+
// Setup Module
Handle<JSFunction> module_constructor =
InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
@@ -965,5 +979,9 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
runtime_error, attributes);
}
+
+#undef ASSIGN
+#undef EXTRACT_THIS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 0ef2219b1f..926bd7647a 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -14,7 +14,8 @@ namespace internal {
// Exposes a WebAssembly API to JavaScript through the V8 API.
class WasmJs {
public:
- V8_EXPORT_PRIVATE static void Install(Isolate* isolate);
+ V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
+ bool exposed_on_global_object);
// WebAssembly.Table.
static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
new file mode 100644
index 0000000000..4ddda98189
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -0,0 +1,134 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-memory.h"
+#include "src/objects-inl.h"
+#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void* TryAllocateBackingStore(Isolate* isolate, size_t size,
+ bool enable_guard_regions, void*& allocation_base,
+ size_t& allocation_length) {
+ // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
+ // systems. It may be safer to fail instead, given that other code might do
+ // things that would be unsafe if they expected guard pages where there
+ // weren't any.
+ if (enable_guard_regions) {
+ // TODO(eholk): On Windows we want to make sure we don't commit the guard
+ // pages yet.
+
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
+ DCHECK_EQ(0, size % base::OS::CommitPageSize());
+
+ // AllocateGuarded makes the whole region inaccessible by default.
+ allocation_base =
+ isolate->array_buffer_allocator()->Reserve(allocation_length);
+ if (allocation_base == nullptr) {
+ return nullptr;
+ }
+
+ void* memory = allocation_base;
+
+ // Make the part we care about accessible.
+ isolate->array_buffer_allocator()->SetProtection(
+ memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ return memory;
+ } else {
+ void* memory =
+ size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
+ allocation_base = memory;
+ allocation_length = size;
+ return memory;
+ }
+}
+
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
+ size_t allocation_length,
+ void* backing_store, size_t size,
+ bool is_external,
+ bool enable_guard_regions,
+ SharedFlag shared) {
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(shared, TENURED);
+ DCHECK_GE(kMaxInt, size);
+ if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
+ allocation_length, backing_store, static_cast<int>(size),
+ shared);
+ buffer->set_is_neuterable(false);
+ buffer->set_is_growable(true);
+ buffer->set_has_guard_region(enable_guard_regions);
+ return buffer;
+}
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
+ bool enable_guard_regions,
+ SharedFlag shared) {
+ // Check against kMaxInt, since the byte length is stored as int in the
+ // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
+ // line, and we don't want to fail a CHECK then.
+ if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
+ size > kMaxInt) {
+ // TODO(titzer): lift restriction on maximum memory allocated here.
+ return Handle<JSArrayBuffer>::null();
+ }
+
+ void* allocation_base = nullptr; // Set by TryAllocateBackingStore
+ size_t allocation_length = 0; // Set by TryAllocateBackingStore
+ // Do not reserve memory till non zero memory is encountered.
+ void* memory =
+ (size == 0) ? nullptr
+ : TryAllocateBackingStore(isolate, size, enable_guard_regions,
+ allocation_base, allocation_length);
+
+ if (size > 0 && memory == nullptr) {
+ return Handle<JSArrayBuffer>::null();
+ }
+
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ const byte* bytes = reinterpret_cast<const byte*>(memory);
+ for (size_t i = 0; i < size; ++i) {
+ DCHECK_EQ(0, bytes[i]);
+ }
+#endif
+
+ constexpr bool is_external = false;
+ return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
+ size, is_external, enable_guard_regions, shared);
+}
+
+void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
+ bool free_memory) {
+ const bool is_external = buffer->is_external();
+ DCHECK(!buffer->is_neuterable());
+ if (!is_external) {
+ buffer->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(*buffer);
+ if (free_memory) {
+ // We need to free the memory before neutering the buffer because
+ // FreeBackingStore reads buffer->allocation_base(), which is nulled out
+ // by Neuter. This means there is a dangling pointer until we neuter the
+ // buffer. Since there is no way for the user to directly call
+ // FreeBackingStore, we can ensure this is safe.
+ buffer->FreeBackingStore();
+ }
+ }
+ buffer->set_is_neuterable(true);
+ buffer->Neuter();
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
new file mode 100644
index 0000000000..1054795f70
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MEMORY_H_
+#define V8_WASM_MEMORY_H_
+
+#include "src/flags.h"
+#include "src/handles.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+Handle<JSArrayBuffer> NewArrayBuffer(
+ Isolate*, size_t size, bool enable_guard_regions,
+ SharedFlag shared = SharedFlag::kNotShared);
+
+Handle<JSArrayBuffer> SetupArrayBuffer(
+ Isolate*, void* allocation_base, size_t allocation_length,
+ void* backing_store, size_t size, bool is_external,
+ bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
+
+void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
+ bool free_memory);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_H_
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 0c2976757f..997496bb29 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -226,7 +226,8 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
start_function_index_(-1),
min_memory_size_(16),
max_memory_size_(0),
- has_max_memory_size_(false) {}
+ has_max_memory_size_(false),
+ has_shared_memory_(false) {}
WasmFunctionBuilder* WasmModuleBuilder::AddFunction(FunctionSig* sig) {
functions_.push_back(new (zone_) WasmFunctionBuilder(this));
@@ -325,6 +326,8 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
max_memory_size_ = value;
}
+void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
+
void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit magic =============================================================
buffer.write_u32(kWasmMagic);
@@ -396,8 +399,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
{
size_t start = EmitSection(kMemorySectionCode, buffer);
buffer.write_u8(1); // memory count
- buffer.write_u8(has_max_memory_size_ ? kResizableMaximumFlag
- : kNoMaximumFlag);
+ if (has_shared_memory_) {
+ buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
+ : MemoryFlags::kSharedNoMaximum);
+ } else {
+ buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
+ : MemoryFlags::kNoMaximum);
+ }
buffer.write_u32v(min_memory_size_);
if (has_max_memory_size_) {
buffer.write_u32v(max_memory_size_);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 2e00318043..898f996cd3 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -10,7 +10,6 @@
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -236,6 +235,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
void AddExport(Vector<const char> name, WasmFunctionBuilder* builder);
void SetMinMemorySize(uint32_t value);
void SetMaxMemorySize(uint32_t value);
+ void SetHasSharedMemory();
// Writing methods.
void WriteTo(ZoneBuffer& buffer) const;
@@ -295,6 +295,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
+ bool has_shared_memory_;
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 4adc9ef375..2c8266592a 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -5,6 +5,8 @@
#include <functional>
#include <memory>
+#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/debug/interface-types.h"
#include "src/frames-inl.h"
@@ -14,30 +16,19 @@
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
-#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-namespace base = v8::base;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
#define TRACE(...) \
do { \
@@ -54,299 +45,61 @@ namespace base = v8::base;
if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
} while (false)
-namespace {
-
-
-void* TryAllocateBackingStore(Isolate* isolate, size_t size,
- bool enable_guard_regions, void*& allocation_base,
- size_t& allocation_length) {
- // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
- // systems. It may be safer to fail instead, given that other code might do
- // things that would be unsafe if they expected guard pages where there
- // weren't any.
- if (enable_guard_regions && kGuardRegionsSupported) {
- // TODO(eholk): On Windows we want to make sure we don't commit the guard
- // pages yet.
-
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
- DCHECK_EQ(0, size % base::OS::CommitPageSize());
-
- // AllocateGuarded makes the whole region inaccessible by default.
- allocation_base =
- isolate->array_buffer_allocator()->Reserve(allocation_length);
- if (allocation_base == nullptr) {
- return nullptr;
- }
-
- void* memory = allocation_base;
-
- // Make the part we care about accessible.
- isolate->array_buffer_allocator()->SetProtection(
- memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- return memory;
- } else {
- void* memory =
- size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
- allocation_base = memory;
- allocation_length = size;
- return memory;
- }
-}
-
-static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- // If a link to shared memory instances exists, update the list of memory
- // instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = owner->compiled_module();
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
- DCHECK(compiled_module->has_weak_wasm_module());
- WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
-
- if (trap_handler::UseTrapHandler()) {
- Handle<FixedArray> code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(Smi::FromInt(-1));
- }
- }
- }
-
- // Since the order of finalizers is not guaranteed, it can be the case
- // that {instance->compiled_module()->module()}, which is a
- // {Managed<WasmModule>} has been collected earlier in this GC cycle.
- // Weak references to this instance won't be cleared until
- // the next GC cycle, so we need to manually break some links (such as
- // the weak references from {WasmMemoryObject::instances}.
- if (owner->has_memory_object()) {
- Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
- Handle<WasmInstanceObject> instance(owner, isolate);
- WasmMemoryObject::RemoveInstance(isolate, memory, instance);
- }
-
- // weak_wasm_module may have been cleared, meaning the module object
- // was GC-ed. In that case, there won't be any new instances created,
- // and we don't need to maintain the links between instances.
- if (!weak_wasm_module->cleared()) {
- WasmModuleObject* wasm_module =
- WasmModuleObject::cast(weak_wasm_module->value());
- WasmCompiledModule* current_template = wasm_module->compiled_module();
-
- TRACE("chain before {\n");
- TRACE_CHAIN(current_template);
- TRACE("}\n");
-
- DCHECK(!current_template->has_weak_prev_instance());
- WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
- WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
-
- if (current_template == compiled_module) {
- if (next == nullptr) {
- WasmCompiledModule::Reset(isolate, compiled_module);
- } else {
- WasmCompiledModule* next_compiled_module =
- WasmCompiledModule::cast(next->value());
- WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(next_compiled_module);
- DCHECK_NULL(prev);
- next_compiled_module->reset_weak_prev_instance();
- }
- } else {
- DCHECK(!(prev == nullptr && next == nullptr));
- // the only reason prev or next would be cleared is if the
- // respective objects got collected, but if that happened,
- // we would have relinked the list.
- if (prev != nullptr) {
- DCHECK(!prev->cleared());
- if (next == nullptr) {
- WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
- } else {
- WasmCompiledModule::cast(prev->value())
- ->set_ptr_to_weak_next_instance(next);
- }
- }
- if (next != nullptr) {
- DCHECK(!next->cleared());
- if (prev == nullptr) {
- WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
- } else {
- WasmCompiledModule::cast(next->value())
- ->set_ptr_to_weak_prev_instance(prev);
- }
- }
- }
- TRACE("chain after {\n");
- TRACE_CHAIN(wasm_module->compiled_module());
- TRACE("}\n");
- }
- compiled_module->reset_weak_owning_instance();
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- TRACE("}\n");
-}
-
-int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
- int offset) {
- DCHECK(!iterator.done());
- int byte_pos;
- do {
- byte_pos = iterator.source_position().ScriptOffset();
- iterator.Advance();
- } while (!iterator.done() && iterator.code_offset() <= offset);
- return byte_pos;
-}
-
-void RecordLazyCodeStats(Code* code, Counters* counters) {
- counters->wasm_lazily_compiled_functions()->Increment();
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
-}
-
-compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- DisallowHeapAllocation no_gc;
- WasmModule* module = compiled_module->module();
-
- std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
- std::vector<SignatureMap*> signature_maps;
-
- int num_function_tables = static_cast<int>(module->function_tables.size());
- for (int i = 0; i < num_function_tables; ++i) {
- FixedArray* ft = compiled_module->ptr_to_function_tables();
- FixedArray* st = compiled_module->ptr_to_signature_tables();
-
- // TODO(clemensh): defer these handles for concurrent compilation.
- function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
- signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
- signature_maps.push_back(&module->function_tables[i].map);
- }
-
- std::vector<Handle<Code>> empty_code;
-
- compiler::ModuleEnv result = {
- module, // --
- function_tables, // --
- signature_tables, // --
- signature_maps, // --
- empty_code, // --
- BUILTIN_CODE(isolate, WasmCompileLazy), // --
- reinterpret_cast<uintptr_t>( // --
- compiled_module->GetEmbeddedMemStartOrNull()), // --
- compiled_module->GetEmbeddedMemSizeOrZero(), // --
- reinterpret_cast<uintptr_t>( // --
- compiled_module->GetGlobalsStartOrNull()) // --
- };
- return result;
-}
-
-} // namespace
-
// static
-const WasmExceptionSig wasm::WasmException::empty_sig_(0, 0, nullptr);
-
-Handle<JSArrayBuffer> wasm::SetupArrayBuffer(
- Isolate* isolate, void* allocation_base, size_t allocation_length,
- void* backing_store, size_t size, bool is_external,
- bool enable_guard_regions, SharedFlag shared) {
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer(shared);
- DCHECK_GE(kMaxInt, size);
- if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
- JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
- allocation_length, backing_store, static_cast<int>(size),
- shared);
- buffer->set_is_neuterable(false);
- buffer->set_is_growable(true);
- buffer->set_has_guard_region(enable_guard_regions);
- return buffer;
-}
-
-Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
- bool enable_guard_regions,
- SharedFlag shared) {
- // Check against kMaxInt, since the byte length is stored as int in the
- // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
- // line, and we don't want to fail a CHECK then.
- if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
- size > kMaxInt) {
- // TODO(titzer): lift restriction on maximum memory allocated here.
- return Handle<JSArrayBuffer>::null();
- }
-
- enable_guard_regions = enable_guard_regions && kGuardRegionsSupported;
-
- void* allocation_base = nullptr; // Set by TryAllocateBackingStore
- size_t allocation_length = 0; // Set by TryAllocateBackingStore
- // Do not reserve memory till non zero memory is encountered.
- void* memory =
- (size == 0) ? nullptr
- : TryAllocateBackingStore(isolate, size, enable_guard_regions,
- allocation_base, allocation_length);
+const WasmExceptionSig WasmException::empty_sig_(0, 0, nullptr);
- if (size > 0 && memory == nullptr) {
- return Handle<JSArrayBuffer>::null();
- }
+// static
+constexpr const char* WasmException::kRuntimeIdStr;
-#if DEBUG
- // Double check the API allocator actually zero-initialized the memory.
- const byte* bytes = reinterpret_cast<const byte*>(memory);
- for (size_t i = 0; i < size; ++i) {
- DCHECK_EQ(0, bytes[i]);
- }
-#endif
+// static
+constexpr const char* WasmException::kRuntimeValuesStr;
- constexpr bool is_external = false;
- return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, enable_guard_regions, shared);
-}
+void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
+ Handle<FixedArray> code_table) {
+ DisallowHeapAllocation no_gc;
+ std::vector<trap_handler::ProtectedInstructionData> unpacked;
-void wasm::UnpackAndRegisterProtectedInstructions(
- Isolate* isolate, Handle<FixedArray> code_table) {
for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code;
+ Object* maybe_code = code_table->get(i);
// This is sometimes undefined when we're called from cctests.
- if (!code_table->GetValue<Code>(isolate, i).ToHandle(&code)) {
+ if (maybe_code->IsUndefined(isolate)) continue;
+ Code* code = Code::cast(maybe_code);
+
+ if (code->kind() != Code::WASM_FUNCTION) {
continue;
}
- if (code->kind() != Code::WASM_FUNCTION) {
+ if (code->trap_handler_index()->value() != trap_handler::kInvalidIndex) {
+ // This function has already been registered.
continue;
}
- const intptr_t base = reinterpret_cast<intptr_t>(code->entry());
+ byte* base = code->entry();
- Zone zone(isolate->allocator(), "Wasm Module");
- ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
const int mode_mask =
RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
trap_handler::ProtectedInstructionData data;
- data.instr_offset = it.rinfo()->data();
- data.landing_offset = reinterpret_cast<intptr_t>(it.rinfo()->pc()) - base;
+ data.instr_offset = static_cast<uint32_t>(it.rinfo()->data());
+ data.landing_offset = static_cast<uint32_t>(it.rinfo()->pc() - base);
+ // Check that now over-/underflow happened.
+ DCHECK_EQ(it.rinfo()->data(), data.instr_offset);
+ DCHECK_EQ(it.rinfo()->pc() - base, data.landing_offset);
unpacked.emplace_back(data);
}
- if (unpacked.size() > 0) {
- int size = code->CodeSize();
- const int index = RegisterHandlerData(reinterpret_cast<void*>(base), size,
- unpacked.size(), &unpacked[0]);
- // TODO(eholk): if index is negative, fail.
- DCHECK(index >= 0);
- code->set_trap_handler_index(Smi::FromInt(index));
- }
+ if (unpacked.empty()) continue;
+
+ int size = code->CodeSize();
+ const int index = RegisterHandlerData(reinterpret_cast<void*>(base), size,
+ unpacked.size(), &unpacked[0]);
+ unpacked.clear();
+ // TODO(eholk): if index is negative, fail.
+ DCHECK_LE(0, index);
+ code->set_trap_handler_index(Smi::FromInt(index));
}
}
-std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
if (name.function_->name.is_set()) {
if (name.name_.start()) {
@@ -359,25 +112,11 @@ std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
return os;
}
-WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
- DisallowHeapAllocation no_gc;
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_INTERPRETER_ENTRY);
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
- deopt_data->length());
- Object* weak_link = deopt_data->get(0);
- DCHECK(weak_link->IsWeakCell());
- WeakCell* cell = WeakCell::cast(weak_link);
- if (cell->cleared()) return nullptr;
- return WasmInstanceObject::cast(cell->value());
-}
-
WasmModule::WasmModule(std::unique_ptr<Zone> owned)
: signature_zone(std::move(owned)) {}
-WasmFunction* wasm::GetWasmFunctionForImportWrapper(Isolate* isolate,
- Handle<Object> target) {
+WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
+ Handle<Object> target) {
if (target->IsJSFunction()) {
Handle<JSFunction> func = Handle<JSFunction>::cast(target);
if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
@@ -390,9 +129,9 @@ WasmFunction* wasm::GetWasmFunctionForImportWrapper(Isolate* isolate,
return nullptr;
}
-Handle<Code> wasm::UnwrapImportWrapper(Handle<Object> import_wrapper) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(import_wrapper);
- Handle<Code> export_wrapper_code = handle(func->code());
+Handle<Code> UnwrapExportWrapper(Handle<JSFunction> export_wrapper) {
+ Handle<Code> export_wrapper_code = handle(export_wrapper->code());
+ DCHECK_EQ(export_wrapper_code->kind(), Code::JS_TO_WASM_FUNCTION);
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(*export_wrapper_code, mask);; it.next()) {
DCHECK(!it.done());
@@ -415,9 +154,9 @@ Handle<Code> wasm::UnwrapImportWrapper(Handle<Object> import_wrapper) {
UNREACHABLE();
}
-void wasm::UpdateDispatchTables(Isolate* isolate,
- Handle<FixedArray> dispatch_tables, int index,
- WasmFunction* function, Handle<Code> code) {
+void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
+ int index, WasmFunction* function,
+ Handle<Code> code) {
DCHECK_EQ(0, dispatch_tables->length() % 4);
for (int i = 0; i < dispatch_tables->length(); i += 4) {
int table_index = Smi::ToInt(dispatch_tables->get(i + 1));
@@ -441,41 +180,7 @@ void wasm::UpdateDispatchTables(Isolate* isolate,
}
}
-
-void wasm::TableSet(ErrorThrower* thrower, Isolate* isolate,
- Handle<WasmTableObject> table, int64_t index,
- Handle<JSFunction> function) {
- Handle<FixedArray> array(table->functions(), isolate);
-
- if (index < 0 || index >= array->length()) {
- thrower->RangeError("index out of bounds");
- return;
- }
- int index32 = static_cast<int>(index);
-
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
-
- WasmFunction* wasm_function = nullptr;
- Handle<Code> code = Handle<Code>::null();
- Handle<Object> value = handle(isolate->heap()->null_value());
-
- if (!function.is_null()) {
- wasm_function = GetWasmFunctionForImportWrapper(isolate, function);
- code = UnwrapImportWrapper(function);
- value = Handle<Object>::cast(function);
- }
-
- UpdateDispatchTables(isolate, dispatch_tables, index32, wasm_function, code);
- array->set(index32, *value);
-}
-
-Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
- WasmCompiledModule* compiled_module =
- WasmInstanceObject::cast(*instance)->compiled_module();
- return handle(compiled_module->script());
-}
-
-bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
+bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
// compiled. For the time being, pass an empty string as placeholder for the
@@ -486,75 +191,8 @@ bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
v8::Utils::ToLocal(isolate->factory()->empty_string()));
}
-void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool free_memory) {
- const bool is_external = buffer->is_external();
- DCHECK(!buffer->is_neuterable());
- if (!is_external) {
- buffer->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*buffer);
- if (free_memory) {
- // We need to free the memory before neutering the buffer because
- // FreeBackingStore reads buffer->allocation_base(), which is nulled out
- // by Neuter. This means there is a dangling pointer until we neuter the
- // buffer. Since there is no way for the user to directly call
- // FreeBackingStore, we can ensure this is safe.
- buffer->FreeBackingStore();
- }
- }
- buffer->set_is_neuterable(true);
- buffer->Neuter();
-}
-
-void testing::ValidateInstancesChain(Isolate* isolate,
- Handle<WasmModuleObject> module_obj,
- int instance_count) {
- CHECK_GE(instance_count, 0);
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
- *module_obj);
- Object* prev = nullptr;
- int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
- WasmCompiledModule* current_instance = compiled_module;
- while (current_instance->has_weak_next_instance()) {
- CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
- current_instance->ptr_to_weak_prev_instance()->value() == prev);
- CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(current_instance->ptr_to_weak_owning_instance()
- ->value()
- ->IsWasmInstanceObject());
- prev = current_instance;
- current_instance = WasmCompiledModule::cast(
- current_instance->ptr_to_weak_next_instance()->value());
- ++found_instances;
- CHECK_LE(found_instances, instance_count);
- }
- CHECK_EQ(found_instances, instance_count);
-}
-
-void testing::ValidateModuleState(Isolate* isolate,
- Handle<WasmModuleObject> module_obj) {
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK(compiled_module->has_weak_wasm_module());
- CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(!compiled_module->has_weak_prev_instance());
- CHECK(!compiled_module->has_weak_next_instance());
- CHECK(!compiled_module->has_weak_owning_instance());
-}
-
-void testing::ValidateOrphanedInstance(Isolate* isolate,
- Handle<WasmInstanceObject> instance) {
- DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = instance->compiled_module();
- CHECK(compiled_module->has_weak_wasm_module());
- CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
-}
-
-Handle<JSArray> wasm::GetImports(Isolate* isolate,
- Handle<WasmModuleObject> module_object) {
+Handle<JSArray> GetImports(Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
isolate);
Factory* factory = isolate->factory();
@@ -623,8 +261,8 @@ Handle<JSArray> wasm::GetImports(Isolate* isolate,
return array_object;
}
-Handle<JSArray> wasm::GetExports(Isolate* isolate,
- Handle<WasmModuleObject> module_object) {
+Handle<JSArray> GetExports(Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
isolate);
Factory* factory = isolate->factory();
@@ -686,10 +324,9 @@ Handle<JSArray> wasm::GetExports(Isolate* isolate,
return array_object;
}
-Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
- Handle<WasmModuleObject> module_object,
- Handle<String> name,
- ErrorThrower* thrower) {
+Handle<JSArray> GetCustomSections(Isolate* isolate,
+ Handle<WasmModuleObject> module_object,
+ Handle<String> name, ErrorThrower* thrower) {
Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
isolate);
Factory* factory = isolate->factory();
@@ -751,15 +388,15 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
return array_object;
}
-Handle<FixedArray> wasm::DecodeLocalNames(
+Handle<FixedArray> DecodeLocalNames(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(), isolate);
LocalNames decoded_locals;
{
DisallowHeapAllocation no_gc;
- wasm::DecodeLocalNames(wire_bytes->GetChars(),
- wire_bytes->GetChars() + wire_bytes->length(),
- &decoded_locals);
+ DecodeLocalNames(wire_bytes->GetChars(),
+ wire_bytes->GetChars() + wire_bytes->length(),
+ &decoded_locals);
}
Handle<FixedArray> locals_names =
isolate->factory()->NewFixedArray(decoded_locals.max_function_index + 1);
@@ -778,379 +415,7 @@ Handle<FixedArray> wasm::DecodeLocalNames(
return locals_names;
}
-bool wasm::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
- if (bytes.start() == nullptr || bytes.length() == 0) return false;
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), true, kWasmOrigin);
- return result.ok();
-}
-
-MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kAsmJsOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership to the {WasmModuleWrapper} generated in
- // {CompileToModuleObject}.
- Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
- ModuleCompiler compiler(isolate, std::move(result.val), centry_stub);
- return compiler.CompileToModuleObject(thrower, bytes, asm_js_script,
- asm_js_offset_table_bytes);
-}
-
-MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
- ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
- if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
- thrower->CompileError("Wasm code generation disallowed in this context");
- return {};
- }
-
- // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
-
- ModuleResult result = SyncDecodeWasmModule(
- isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership to the {WasmModuleWrapper} generated in
- // {CompileToModuleObject}.
- Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
- ModuleCompiler compiler(isolate, std::move(result.val), centry_stub);
- return compiler.CompileToModuleObject(thrower, bytes_copy, Handle<Script>(),
- Vector<const byte>());
-}
-
-MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
- &InstanceFinalizer);
- return builder.Build();
-}
-
-MaybeHandle<WasmInstanceObject> wasm::SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
- MaybeHandle<WasmModuleObject> module =
- wasm::SyncCompile(isolate, thrower, bytes);
- DCHECK_EQ(thrower->error(), module.is_null());
- if (module.is_null()) return {};
-
- return wasm::SyncInstantiate(isolate, thrower, module.ToHandleChecked(),
- Handle<JSReceiver>::null(),
- Handle<JSArrayBuffer>::null());
-}
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise) {
- v8::Local<v8::Promise::Resolver> resolver =
- v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
- auto maybe = resolver->Reject(v8::Utils::ToLocal(context),
- v8::Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result) {
- v8::Local<v8::Promise::Resolver> resolver =
- v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
- auto maybe = resolver->Resolve(v8::Utils::ToLocal(context),
- v8::Utils::ToLocal(result));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
- MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
- isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- ResolvePromise(isolate, handle(isolate->context()), promise,
- instance_object.ToHandleChecked());
-}
-
-void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes) {
- if (!FLAG_wasm_async_compilation) {
- ErrorThrower thrower(isolate, "WasmCompile");
- // Compile the module.
- MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(isolate, &thrower, bytes);
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- Handle<WasmModuleObject> module = module_object.ToHandleChecked();
- ResolvePromise(isolate, handle(isolate->context()), promise, module);
- return;
- }
-
- // Make a copy of the wire bytes in case the user program changes them
- // during asynchronous compilation.
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_compilation_manager()->StartAsyncCompileJob(
- isolate, std::move(copy), bytes.length(), handle(isolate->context()),
- promise);
-}
-
-Handle<Code> wasm::CompileLazy(Isolate* isolate) {
- HistogramTimerScope lazy_time_scope(
- isolate->counters()->wasm_lazy_compilation_time());
-
- // Find the wasm frame which triggered the lazy compile, to get the wasm
- // instance.
- StackFrameIterator it(isolate);
- // First frame: C entry stub.
- DCHECK(!it.done());
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Second frame: WasmCompileLazy builtin.
- DCHECK(!it.done());
- Handle<Code> lazy_compile_code(it.frame()->LookupCode(), isolate);
- DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_compile_code->builtin_index());
- Handle<WasmInstanceObject> instance;
- Handle<FixedArray> exp_deopt_data;
- int func_index = -1;
- if (lazy_compile_code->deoptimization_data()->length() > 0) {
- // Then it's an indirect call or via JS->wasm wrapper.
- DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
- exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
- auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
- instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
- func_index = Smi::ToInt(exp_deopt_data->get(1));
- }
- it.Advance();
- // Third frame: The calling wasm code or js-to-wasm wrapper.
- DCHECK(!it.done());
- DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
- Handle<Code> caller_code = handle(it.frame()->LookupCode(), isolate);
- if (it.frame()->is_js_to_wasm()) {
- DCHECK(!instance.is_null());
- } else if (instance.is_null()) {
- // Then this is a direct call (otherwise we would have attached the instance
- // via deopt data to the lazy compile stub). Just use the instance of the
- // caller.
- instance = handle(wasm::GetOwningWasmInstance(*caller_code), isolate);
- }
- int offset =
- static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
- // Only patch the caller code if this is *no* indirect call.
- // exp_deopt_data will be null if the called function is not exported at all,
- // and its length will be <= 2 if all entries in tables were already patched.
- // Note that this check is conservative: If the first call to an exported
- // function is direct, we will just patch the export tables, and only on the
- // second call we will patch the caller.
- bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
- exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
-
- Handle<Code> compiled_code = WasmCompiledModule::CompileLazy(
- isolate, instance, caller_code, offset, func_index, patch_caller);
- if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
- // See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
- // <export_table, index> followed by undefined values.
- // Use this information here to patch all export tables.
- DCHECK_EQ(0, exp_deopt_data->length() % 2);
- for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
- if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
- FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
- exp_table->set(exp_index, *compiled_code);
- }
- // After processing, remove the list of exported entries, such that we don't
- // do the patching redundantly.
- Handle<FixedArray> new_deopt_data =
- isolate->factory()->CopyFixedArrayUpTo(exp_deopt_data, 2, TENURED);
- lazy_compile_code->set_deoptimization_data(*new_deopt_data);
- }
-
- return compiled_code;
-}
-
-void LazyCompilationOrchestrator::CompileFunction(
- Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
- if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
- Code::WASM_FUNCTION) {
- return;
- }
-
- compiler::ModuleEnv module_env =
- CreateModuleEnvFromCompiledModule(isolate, compiled_module);
-
- const uint8_t* module_start = compiled_module->module_bytes()->GetChars();
-
- const WasmFunction* func = &module_env.module->functions[func_index];
- wasm::FunctionBody body{func->sig, func->code.offset(),
- module_start + func->code.offset(),
- module_start + func->code.end_offset()};
- // TODO(wasm): Refactor this to only get the name if it is really needed for
- // tracing / debugging.
- std::string func_name;
- {
- wasm::WasmName name = Vector<const char>::cast(
- compiled_module->GetRawFunctionName(func_index));
- // Copy to std::string, because the underlying string object might move on
- // the heap.
- func_name.assign(name.start(), static_cast<size_t>(name.length()));
- }
- ErrorThrower thrower(isolate, "WasmLazyCompile");
- compiler::WasmCompilationUnit unit(isolate, &module_env, body,
- CStrVector(func_name.c_str()), func_index,
- CEntryStub(isolate, 1).GetCode());
- unit.ExecuteCompilation();
- MaybeHandle<Code> maybe_code = unit.FinishCompilation(&thrower);
-
- // If there is a pending error, something really went wrong. The module was
- // verified before starting execution with lazy compilation.
- // This might be OOM, but then we cannot continue execution anyway.
- // TODO(clemensh): According to the spec, we can actually skip validation at
- // module creation time, and return a function that always traps here.
- CHECK(!thrower.error());
- Handle<Code> code = maybe_code.ToHandleChecked();
-
- Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
- Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
- // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
- deopt_data->set(0, *weak_instance);
- deopt_data->set(1, Smi::FromInt(func_index));
- code->set_deoptimization_data(*deopt_data);
-
- DCHECK_EQ(Builtins::kWasmCompileLazy,
- Code::cast(compiled_module->code_table()->get(func_index))
- ->builtin_index());
- compiled_module->code_table()->set(func_index, *code);
-
- // Now specialize the generated code for this instance.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- RecordLazyCodeStats(*code, isolate->counters());
-}
-
-Handle<Code> LazyCompilationOrchestrator::CompileLazy(
- Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
- int call_offset, int exported_func_index, bool patch_caller) {
- struct NonCompiledFunction {
- int offset;
- int func_index;
- };
- std::vector<NonCompiledFunction> non_compiled_functions;
- int func_to_return_idx = exported_func_index;
- wasm::Decoder decoder(nullptr, nullptr);
- bool is_js_to_wasm = caller->kind() == Code::JS_TO_WASM_FUNCTION;
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
-
- if (is_js_to_wasm) {
- non_compiled_functions.push_back({0, exported_func_index});
- } else if (patch_caller) {
- DisallowHeapAllocation no_gc;
- SeqOneByteString* module_bytes = compiled_module->module_bytes();
- SourcePositionTableIterator source_pos_iterator(
- caller->SourcePositionTable());
- DCHECK_EQ(2, caller->deoptimization_data()->length());
- int caller_func_index = Smi::ToInt(caller->deoptimization_data()->get(1));
- const byte* func_bytes =
- module_bytes->GetChars() +
- compiled_module->module()->functions[caller_func_index].code.offset();
- for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- Code* callee =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
- // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
- // (depending on the bool) against limits of T and then static_casts.
- size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
- DCHECK_GE(kMaxInt, offset_l);
- int offset = static_cast<int>(offset_l);
- int byte_pos =
- AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
- int called_func_index =
- ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
- non_compiled_functions.push_back({offset, called_func_index});
- // Call offset one instruction after the call. Remember the last called
- // function before that offset.
- if (offset < call_offset) func_to_return_idx = called_func_index;
- }
- }
-
- // TODO(clemensh): compile all functions in non_compiled_functions in
- // background, wait for func_to_return_idx.
- CompileFunction(isolate, instance, func_to_return_idx);
-
- if (is_js_to_wasm || patch_caller) {
- DisallowHeapAllocation no_gc;
- // Now patch the code object with all functions which are now compiled.
- int idx = 0;
- for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- Code* callee =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
- DCHECK_GT(non_compiled_functions.size(), idx);
- int called_func_index = non_compiled_functions[idx].func_index;
- // Check that the callee agrees with our assumed called_func_index.
- DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
- Smi::ToInt(callee->deoptimization_data()->get(1)) ==
- called_func_index);
- if (is_js_to_wasm) {
- DCHECK_EQ(func_to_return_idx, called_func_index);
- } else {
- DCHECK_EQ(non_compiled_functions[idx].offset,
- it.rinfo()->pc() - caller->instruction_start());
- }
- ++idx;
- Handle<Code> callee_compiled(
- Code::cast(compiled_module->code_table()->get(called_func_index)));
- if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
- DCHECK_NE(func_to_return_idx, called_func_index);
- continue;
- }
- DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
- it.rinfo()->set_target_address(isolate,
- callee_compiled->instruction_start());
- }
- DCHECK_EQ(non_compiled_functions.size(), idx);
- }
-
- Code* ret =
- Code::cast(compiled_module->code_table()->get(func_to_return_idx));
- DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
- return handle(ret, isolate);
-}
-
-const char* wasm::ExternalKindName(WasmExternalKind kind) {
+const char* ExternalKindName(WasmExternalKind kind) {
switch (kind) {
case kExternalFunction:
return "function";
@@ -1163,3 +428,11 @@ const char* wasm::ExternalKindName(WasmExternalKind kind) {
}
return "unknown";
}
+
+#undef TRACE
+#undef TRACE_CHAIN
+#undef TRACE_COMPILE
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 63edd5f865..a45d421ee8 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -7,13 +7,13 @@
#include <memory>
-#include "src/api.h"
#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/managed.h"
#include "src/parsing/preparse-data.h"
+#include "src/wasm/decoder.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-opcodes.h"
@@ -41,56 +41,6 @@ enum WasmExternalKind {
kExternalGlobal = 3
};
-// Representation of an initializer expression.
-struct WasmInitExpr {
- enum WasmInitKind {
- kNone,
- kGlobalIndex,
- kI32Const,
- kI64Const,
- kF32Const,
- kF64Const
- } kind;
-
- union {
- int32_t i32_const;
- int64_t i64_const;
- float f32_const;
- double f64_const;
- uint32_t global_index;
- } val;
-
- WasmInitExpr() : kind(kNone) {}
- explicit WasmInitExpr(int32_t v) : kind(kI32Const) { val.i32_const = v; }
- explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
- explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
- explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
- WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
- val.global_index = global_index;
- }
-};
-
-// Reference to a string in the wire bytes.
-class WireBytesRef {
- public:
- WireBytesRef() : WireBytesRef(0, 0) {}
- WireBytesRef(uint32_t offset, uint32_t length)
- : offset_(offset), length_(length) {
- DCHECK_IMPLIES(offset_ == 0, length_ == 0);
- DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
- }
-
- uint32_t offset() const { return offset_; }
- uint32_t length() const { return length_; }
- uint32_t end_offset() const { return offset_ + length_; }
- bool is_empty() const { return length_ == 0; }
- bool is_set() const { return offset_ != 0; }
-
- private:
- uint32_t offset_;
- uint32_t length_;
-};
-
// Static representation of a wasm function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
@@ -119,9 +69,14 @@ typedef FunctionSig WasmExceptionSig;
struct WasmException {
explicit WasmException(const WasmExceptionSig* sig = &empty_sig_)
: sig(sig) {}
+ FunctionSig* ToFunctionSig() const { return const_cast<FunctionSig*>(sig); }
const WasmExceptionSig* sig; // type signature of the exception.
+ // Used to hold data on runtime exceptions.
+ static constexpr const char* kRuntimeIdStr = "WasmExceptionRuntimeId";
+ static constexpr const char* kRuntimeValuesStr = "WasmExceptionValues";
+
private:
static const WasmExceptionSig empty_sig_;
};
@@ -184,13 +139,16 @@ struct V8_EXPORT_PRIVATE WasmModule {
static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
+ static constexpr int kInvalidExceptionTag = -1;
+
std::unique_ptr<Zone> signature_zone;
uint32_t initial_pages = 0; // initial size of the memory in 64k pages
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
+ bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
- bool has_memory = false; // true if the memory was defined or imported
- bool mem_export = false; // true if the memory is exported
- int start_function_index = -1; // start function, >= 0 if any
+ bool has_memory = false; // true if the memory was defined or imported
+ bool mem_export = false; // true if the memory is exported
+ int start_function_index = -1; // start function, >= 0 if any
std::vector<WasmGlobal> globals;
uint32_t globals_size = 0;
@@ -272,12 +230,13 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
function->code.end_offset());
}
+ Vector<const byte> module_bytes() const { return module_bytes_; }
const byte* start() const { return module_bytes_.start(); }
const byte* end() const { return module_bytes_.end(); }
size_t length() const { return module_bytes_.length(); }
private:
- const Vector<const byte> module_bytes_;
+ Vector<const byte> module_bytes_;
};
// A helper for printing out the names of functions.
@@ -295,11 +254,6 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
// If no debug info exists yet, it is created automatically.
Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
-// Get the script of the wasm module. If the origin of the module is asm.js, the
-// returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
-// it's of type TYPE_WASM.
-Handle<Script> GetScript(Handle<JSObject> instance);
-
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
ModuleOrigin origin, Handle<Script> asm_js_script,
@@ -321,117 +275,21 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
// function index, the inner one by the local index.
Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
-// Assumed to be called with a code object associated to a wasm module instance.
-// Intended to be called from runtime functions.
-// Returns nullptr on failing to get owning instance.
-WasmInstanceObject* GetOwningWasmInstance(Code* code);
-
-Handle<JSArrayBuffer> NewArrayBuffer(
- Isolate*, size_t size, bool enable_guard_regions,
- SharedFlag shared = SharedFlag::kNotShared);
-
-Handle<JSArrayBuffer> SetupArrayBuffer(
- Isolate*, void* allocation_base, size_t allocation_length,
- void* backing_store, size_t size, bool is_external,
- bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
-
-void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool free_memory);
-
+// If the target is an export wrapper, return the {WasmFunction*} corresponding
+// to the wrapped wasm function; in all other cases, return nullptr.
// The returned pointer is owned by the wasm instance target belongs to. The
// result is alive as long as the instance exists.
-WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
- Handle<Object> target);
+WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
-Handle<Code> UnwrapImportWrapper(Handle<Object> import_wrapper);
-
-void TableSet(ErrorThrower* thrower, Isolate* isolate,
- Handle<WasmTableObject> table, int64_t index,
- Handle<JSFunction> function);
+// {export_wrapper} is known to be an export.
+Handle<Code> UnwrapExportWrapper(Handle<JSFunction> export_wrapper);
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, WasmFunction* function, Handle<Code> code);
-//============================================================================
-//== Compilation and instantiation ===========================================
-//============================================================================
-V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
- const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
- Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports);
-
-#if V8_TARGET_ARCH_64_BIT
-const bool kGuardRegionsSupported = true;
-#else
-const bool kGuardRegionsSupported = false;
-#endif
-
-inline bool EnableGuardRegions() {
- return FLAG_wasm_guard_pages && kGuardRegionsSupported &&
- !FLAG_experimental_wasm_threads;
-}
-
-inline SharedFlag IsShared(Handle<JSArrayBuffer> buffer) {
- if (!buffer.is_null() && buffer->is_shared()) {
- DCHECK(FLAG_experimental_wasm_threads);
- return SharedFlag::kShared;
- }
- return SharedFlag::kNotShared;
-}
-
void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
Handle<FixedArray> code_table);
-// Triggered by the WasmCompileLazy builtin.
-// Walks the stack (top three frames) to determine the wasm instance involved
-// and which function to compile.
-// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
-// patching the call site or indirect function tables.
-// Returns either the Code object that has been lazily compiled, or Illegal if
-// an error occurred. In the latter case, a pending exception has been set,
-// which will be triggered when returning from the runtime function, i.e. the
-// Illegal builtin will never be called.
-Handle<Code> CompileLazy(Isolate* isolate);
-
-// This class orchestrates the lazy compilation of wasm functions. It is
-// triggered by the WasmCompileLazy builtin.
-// It contains the logic for compiling and specializing wasm functions, and
-// patching the calling wasm code.
-// Once we support concurrent lazy compilation, this class will contain the
-// logic to actually orchestrate parallel execution of wasm compilation jobs.
-// TODO(clemensh): Implement concurrent lazy compilation.
-class LazyCompilationOrchestrator {
- void CompileFunction(Isolate*, Handle<WasmInstanceObject>, int func_index);
-
- public:
- Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int call_offset,
- int exported_func_index, bool patch_caller);
-};
-
const char* ExternalKindName(WasmExternalKind);
// TruncatedUserString makes it easy to output names up to a certain length, and
@@ -470,21 +328,6 @@ class TruncatedUserString {
char buffer_[kMaxLen];
};
-namespace testing {
-void ValidateInstancesChain(Isolate* isolate,
- Handle<WasmModuleObject> module_obj,
- int instance_count);
-void ValidateModuleState(Isolate* isolate, Handle<WasmModuleObject> module_obj);
-void ValidateOrphanedInstance(Isolate* isolate,
- Handle<WasmInstanceObject> instance);
-} // namespace testing
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result);
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise);
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
new file mode 100644
index 0000000000..c435fc7913
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -0,0 +1,210 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OBJECTS_INL_H_
+#define V8_WASM_OBJECTS_INL_H_
+
+#include "src/heap/heap-inl.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
+
+CAST_ACCESSOR(WasmInstanceObject)
+CAST_ACCESSOR(WasmMemoryObject)
+CAST_ACCESSOR(WasmModuleObject)
+CAST_ACCESSOR(WasmTableObject)
+
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ bool holder::has_##name() { \
+ return !READ_FIELD(this, offset)->IsUndefined(GetIsolate()); \
+ } \
+ ACCESSORS(holder, name, type, offset)
+
+// WasmModuleObject
+ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
+ kCompiledModuleOffset)
+
+// WasmTableObject
+ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
+ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
+ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
+
+// WasmMemoryObject
+ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
+SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
+OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakFixedArray,
+ kInstancesOffset)
+ACCESSORS(WasmMemoryObject, wasm_context, Managed<WasmContext>,
+ kWasmContextOffset)
+
+// WasmInstanceObject
+ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
+ kCompiledModuleOffset)
+ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
+ kMemoryObjectOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, JSArrayBuffer,
+ kMemoryBufferOffset)
+ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
+ kGlobalsBufferOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
+ kDebugInfoOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
+ kFunctionTablesOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
+ kSignatureTablesOffset)
+ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
+ kDirectlyCalledInstancesOffset)
+ACCESSORS(WasmInstanceObject, js_imports_table, FixedArray,
+ kJsImportsTableOffset)
+
+// WasmSharedModuleData
+ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
+ kModuleBytesOffset)
+ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
+OPTIONAL_ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
+ kAsmJsOffsetTableOffset)
+OPTIONAL_ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
+ kBreakPointInfosOffset)
+
+OPTIONAL_ACCESSORS(WasmSharedModuleData, lazy_compilation_orchestrator, Foreign,
+ kLazyCompilationOrchestratorOffset)
+
+OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
+OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
+ kCWasmEntriesOffset)
+OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
+ kCWasmEntryMapOffset)
+
+#undef OPTIONAL_ACCESSORS
+
+#define FORWARD_SHARED(type, name) \
+ type WasmCompiledModule::name() { return shared()->name(); }
+FORWARD_SHARED(SeqOneByteString*, module_bytes)
+FORWARD_SHARED(wasm::WasmModule*, module)
+FORWARD_SHARED(Script*, script)
+FORWARD_SHARED(bool, is_asm_js)
+#undef FORWARD_SHARED
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
+ Handle<TYPE> WasmCompiledModule::NAME() const { \
+ return handle(ptr_to_##NAME()); \
+ } \
+ \
+ MaybeHandle<TYPE> WasmCompiledModule::maybe_##NAME() const { \
+ if (has_##NAME()) return NAME(); \
+ return MaybeHandle<TYPE>(); \
+ } \
+ \
+ TYPE* WasmCompiledModule::maybe_ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ if (!(TYPE_CHECK)) return nullptr; \
+ return TYPE::cast(obj); \
+ } \
+ \
+ TYPE* WasmCompiledModule::ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ DCHECK(TYPE_CHECK); \
+ return TYPE::cast(obj); \
+ } \
+ \
+ bool WasmCompiledModule::has_##NAME() const { \
+ Object* obj = get(ID); \
+ return TYPE_CHECK; \
+ } \
+ \
+ void WasmCompiledModule::reset_##NAME() { set_undefined(ID); } \
+ \
+ void WasmCompiledModule::set_##NAME(Handle<TYPE> value) { \
+ set_ptr_to_##NAME(*value); \
+ } \
+ void WasmCompiledModule::set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
+
+#define WCM_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
+
+#define WCM_CONST_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), private)
+
+#define WCM_WASM_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
+
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
+ TYPE WasmCompiledModule::NAME() const { \
+ return static_cast<TYPE>(Smi::ToInt(get(kID_##NAME))); \
+ } \
+ \
+ void WasmCompiledModule::set_##NAME(TYPE value) { \
+ set(kID_##NAME, Smi::FromInt(value)); \
+ }
+
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
+ public) \
+ \
+ Handle<TYPE> WasmCompiledModule::NAME() const { \
+ return handle(TYPE::cast(weak_##NAME()->value())); \
+ }
+
+#define WCM_LARGE_NUMBER(TYPE, NAME) \
+ TYPE WasmCompiledModule::NAME() const { \
+ Object* value = get(kID_##NAME); \
+ DCHECK(value->IsMutableHeapNumber()); \
+ return static_cast<TYPE>(HeapNumber::cast(value)->value()); \
+ } \
+ \
+ void WasmCompiledModule::set_##NAME(TYPE value) { \
+ Object* number = get(kID_##NAME); \
+ DCHECK(number->IsMutableHeapNumber()); \
+ HeapNumber::cast(number)->set_value(static_cast<double>(value)); \
+ } \
+ \
+ void WasmCompiledModule::recreate_##NAME(Handle<WasmCompiledModule> obj, \
+ Factory* factory, TYPE init_val) { \
+ Handle<HeapNumber> number = factory->NewHeapNumber( \
+ static_cast<double>(init_val), MutableMode::MUTABLE, TENURED); \
+ obj->set(kID_##NAME, *number); \
+ } \
+ bool WasmCompiledModule::has_##NAME() const { \
+ return get(kID_##NAME)->IsMutableHeapNumber(); \
+ }
+
+#define DEFINITION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
+WCM_PROPERTY_TABLE(DEFINITION)
+#undef DECLARATION
+
+#undef WCM_CONST_OBJECT
+#undef WCM_LARGE_NUMBER
+#undef WCM_OBJECT_OR_WEAK
+#undef WCM_SMALL_CONST_NUMBER
+#undef WCM_WEAK_LINK
+
+uint32_t WasmTableObject::current_length() { return functions()->length(); }
+
+bool WasmTableObject::has_maximum_length() {
+ return maximum_length()->Number() >= 0;
+}
+
+bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
+
+Address WasmCompiledModule::GetGlobalsStartOrNull() const {
+ return has_globals_start() ? reinterpret_cast<Address>(globals_start())
+ : nullptr;
+}
+
+void WasmCompiledModule::ReplaceCodeTableForTesting(
+ Handle<FixedArray> testing_table) {
+ set_code_table(testing_table);
+}
+
+#include "src/objects/object-macros-undef.h"
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OBJECTS_INL_H_
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 779a2d8430..012aa6644b 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -11,9 +11,12 @@
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-text.h"
#define TRACE(...) \
@@ -21,24 +24,13 @@
if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
} while (false)
-#define TRACE_CHAIN(instance) \
- do { \
- instance->PrintInstancesChain(); \
- } while (false)
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
+namespace v8 {
+namespace internal {
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+// Import a few often used types from the wasm namespace.
+using GlobalHandleAddress = wasm::GlobalHandleAddress;
+using WasmFunction = wasm::WasmFunction;
+using WasmModule = wasm::WasmModule;
namespace {
@@ -151,11 +143,12 @@ bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
DisallowHeapAllocation no_gc;
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
- BodyLocalDecls locals(&tmp);
+ wasm::BodyLocalDecls locals(&tmp);
const byte* module_start = compiled_module->module_bytes()->GetChars();
WasmFunction& func = compiled_module->module()->functions[func_index];
- BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(), &locals);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
DCHECK_LT(0, locals.encoded_size);
for (uint32_t offset : iterator.offsets()) {
if (offset > static_cast<uint32_t>(offset_in_func)) break;
@@ -180,6 +173,17 @@ Handle<WasmModuleObject> WasmModuleObject::New(
return module_object;
}
+void WasmModuleObject::ValidateStateForTesting(
+ Isolate* isolate, Handle<WasmModuleObject> module_obj) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = module_obj->compiled_module();
+ CHECK(compiled_module->has_weak_wasm_module());
+ CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
+ CHECK(!compiled_module->has_weak_prev_instance());
+ CHECK(!compiled_module->has_weak_next_instance());
+ CHECK(!compiled_module->has_weak_owning_instance());
+}
+
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions) {
@@ -228,7 +232,7 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
return new_dispatch_tables;
}
-void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
+void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % 4);
uint32_t old_size = functions()->length();
@@ -256,7 +260,8 @@ void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
// Patch the code of the respective instance.
{
DisallowHeapAllocation no_gc;
- CodeSpecialization code_specialization(isolate, &specialization_zone);
+ wasm::CodeSpecialization code_specialization(isolate,
+ &specialization_zone);
WasmInstanceObject* instance =
WasmInstanceObject::cast(dispatch_tables->get(i));
WasmCompiledModule* compiled_module = instance->compiled_module();
@@ -282,15 +287,38 @@ void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
}
}
+void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
+ int32_t index, Handle<JSFunction> function) {
+ Handle<FixedArray> array(table->functions(), isolate);
+
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+
+ WasmFunction* wasm_function = nullptr;
+ Handle<Code> code = Handle<Code>::null();
+ Handle<Object> value = isolate->factory()->null_value();
+
+ if (!function.is_null()) {
+ wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ // The verification that {function} is an export was done
+ // by the caller.
+ DCHECK_NOT_NULL(wasm_function);
+ code = wasm::UnwrapExportWrapper(function);
+ value = Handle<Object>::cast(function);
+ }
+
+ UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code);
+ array->set(index, *value);
+}
+
namespace {
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
uint32_t pages, uint32_t maximum_pages) {
+ if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
Address old_mem_start = nullptr;
uint32_t old_size = 0;
if (!old_buffer.is_null()) {
- if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
old_mem_start = static_cast<Address>(old_buffer->backing_store());
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
}
@@ -301,46 +329,51 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
-
- // TODO(gdeepti): Change the protection here instead of allocating a new
- // buffer before guard regions are turned on, see issue #5886.
const bool enable_guard_regions = old_buffer.is_null()
- ? EnableGuardRegions()
+ ? trap_handler::UseTrapHandler()
: old_buffer->has_guard_region();
size_t new_size =
static_cast<size_t>(old_pages + pages) * WasmModule::kPageSize;
- Handle<JSArrayBuffer> new_buffer =
- NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null()) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
- return new_buffer;
+ if (enable_guard_regions && old_size != 0) {
+ DCHECK_NOT_NULL(old_buffer->backing_store());
+ if (new_size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
+ new_size > kMaxInt) {
+ return Handle<JSArrayBuffer>::null();
+ }
+ isolate->array_buffer_allocator()->SetProtection(
+ old_mem_start, new_size,
+ v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(new_size);
+ old_buffer->set_byte_length(*length_obj);
+ return old_buffer;
+ } else {
+ Handle<JSArrayBuffer> new_buffer;
+ new_buffer = wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ return new_buffer;
+ }
}
// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
instance->set_memory_buffer(*buffer);
- WasmCompiledModule::SetSpecializationMemInfoFrom(
- isolate->factory(), handle(instance->compiled_module()), buffer);
if (instance->has_debug_info()) {
instance->debug_info()->UpdateMemory(*buffer);
}
}
-void UncheckedUpdateInstanceMemory(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Address old_mem_start, uint32_t old_size) {
- DCHECK(instance->has_memory_buffer());
- Handle<JSArrayBuffer> mem_buffer(instance->memory_buffer());
- uint32_t new_size = mem_buffer->byte_length()->Number();
- Address new_mem_start = static_cast<Address>(mem_buffer->backing_store());
+void UpdateWasmContext(WasmContext* wasm_context,
+ Handle<JSArrayBuffer> buffer) {
+ uint32_t new_mem_size = buffer->byte_length()->Number();
+ Address new_mem_start = static_cast<Address>(buffer->backing_store());
DCHECK_NOT_NULL(new_mem_start);
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.RelocateMemoryReferences(old_mem_start, old_size,
- new_mem_start, new_size);
- code_specialization.ApplyToWholeInstance(*instance);
+ wasm_context->mem_start = new_mem_start;
+ wasm_context->mem_size = new_mem_size;
}
} // namespace
@@ -352,20 +385,28 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
isolate->native_context()->wasm_memory_constructor());
auto memory_obj = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, TENURED));
+ auto wasm_context = Managed<WasmContext>::Allocate(isolate);
if (buffer.is_null()) {
- const bool enable_guard_regions = EnableGuardRegions();
- buffer = SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
- enable_guard_regions);
+ const bool enable_guard_regions = trap_handler::UseTrapHandler();
+ buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
+ enable_guard_regions);
+ wasm_context->get()->mem_size = 0;
+ wasm_context->get()->mem_start = nullptr;
+ } else {
+ CHECK(buffer->byte_length()->ToUint32(&wasm_context->get()->mem_size));
+ wasm_context->get()->mem_start =
+ static_cast<Address>(buffer->backing_store());
}
memory_obj->set_array_buffer(*buffer);
memory_obj->set_maximum_pages(maximum);
+ memory_obj->set_wasm_context(*wasm_context);
return memory_obj;
}
uint32_t WasmMemoryObject::current_pages() {
uint32_t byte_length;
CHECK(array_buffer()->byte_length()->ToUint32(&byte_length));
- return byte_length / wasm::WasmModule::kPageSize;
+ return byte_length / WasmModule::kPageSize;
}
void WasmMemoryObject::AddInstance(Isolate* isolate,
@@ -388,6 +429,32 @@ void WasmMemoryObject::RemoveInstance(Isolate* isolate,
}
}
+void WasmMemoryObject::SetupNewBufferWithSameBackingStore(
+ Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size) {
+ // In case of Memory.Grow(0), or Memory.Grow(delta) with guard pages enabled,
+ // Setup a new buffer, update memory object, and instances associated with the
+ // memory object, as the current buffer will be detached.
+ Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer());
+ Handle<JSArrayBuffer> new_buffer;
+
+ constexpr bool is_external = false;
+ new_buffer = wasm::SetupArrayBuffer(
+ isolate, old_buffer->allocation_base(), old_buffer->allocation_length(),
+ old_buffer->backing_store(), size * WasmModule::kPageSize, is_external,
+ old_buffer->has_guard_region());
+ if (memory_object->has_instances()) {
+ Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
+ for (int i = 0; i < instances->Length(); i++) {
+ Object* elem = instances->Get(i);
+ if (!elem->IsWasmInstanceObject()) continue;
+ Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
+ isolate);
+ SetInstanceMemory(isolate, instance, new_buffer);
+ }
+ }
+ memory_object->set_array_buffer(*new_buffer);
+}
+
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
@@ -399,13 +466,6 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<JSArrayBuffer> new_buffer;
// Return current size if grow by 0.
if (pages == 0) {
- // Even for pages == 0, we need to attach a new JSArrayBuffer with the same
- // backing store and neuter the old one to be spec compliant.
- new_buffer = SetupArrayBuffer(
- isolate, old_buffer->allocation_base(),
- old_buffer->allocation_length(), old_buffer->backing_store(),
- old_size, old_buffer->is_external(), old_buffer->has_guard_region());
- memory_object->set_array_buffer(*new_buffer);
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
}
@@ -420,8 +480,13 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
if (new_buffer.is_null()) return -1;
+ // Verify that the values we will change are actually the ones we expect.
+ DCHECK_EQ(memory_object->wasm_context()->get()->mem_size, old_size);
+ DCHECK_EQ(memory_object->wasm_context()->get()->mem_start,
+ static_cast<Address>(old_buffer->backing_store()));
+ UpdateWasmContext(memory_object->wasm_context()->get(), new_buffer);
+
if (memory_object->has_instances()) {
- Address old_mem_start = static_cast<Address>(old_buffer->backing_store());
Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
for (int i = 0; i < instances->Length(); i++) {
Object* elem = instances->Get(i);
@@ -429,10 +494,8 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
isolate);
SetInstanceMemory(isolate, instance, new_buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
}
}
-
memory_object->set_array_buffer(*new_buffer);
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
@@ -442,6 +505,11 @@ WasmModuleObject* WasmInstanceObject::module_object() {
return *compiled_module()->wasm_module();
}
+WasmContext* WasmInstanceObject::wasm_context() {
+ DCHECK(has_memory_object());
+ return memory_object()->wasm_context()->get();
+}
+
WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
@@ -477,28 +545,9 @@ int32_t WasmInstanceObject::GrowMemory(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t pages) {
if (pages == 0) return instance->GetMemorySize();
- if (instance->has_memory_object()) {
- return WasmMemoryObject::Grow(
- isolate, handle(instance->memory_object(), isolate), pages);
- }
-
- // No other instances to grow, grow just the one.
- uint32_t old_size = 0;
- Address old_mem_start = nullptr;
- Handle<JSArrayBuffer> old_buffer;
- if (instance->has_memory_buffer()) {
- old_buffer = handle(instance->memory_buffer(), isolate);
- old_size = old_buffer->byte_length()->Number();
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- }
- uint32_t maximum_pages = instance->GetMaxMemoryPages();
- Handle<JSArrayBuffer> buffer =
- GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
- if (buffer.is_null()) return -1;
- SetInstanceMemory(isolate, instance, buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
- return old_size / WasmModule::kPageSize;
+ DCHECK(instance->has_memory_object());
+ return WasmMemoryObject::Grow(
+ isolate, handle(instance->memory_object(), isolate), pages);
}
uint32_t WasmInstanceObject::GetMaxMemoryPages() {
@@ -518,6 +567,54 @@ uint32_t WasmInstanceObject::GetMaxMemoryPages() {
return FLAG_wasm_max_mem_pages;
}
+WasmInstanceObject* WasmInstanceObject::GetOwningInstance(Code* code) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_INTERPRETER_ENTRY);
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
+ deopt_data->length());
+ Object* weak_link = deopt_data->get(0);
+ DCHECK(weak_link->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(weak_link);
+ if (cell->cleared()) return nullptr;
+ return WasmInstanceObject::cast(cell->value());
+}
+
+void WasmInstanceObject::ValidateInstancesChainForTesting(
+ Isolate* isolate, Handle<WasmModuleObject> module_obj, int instance_count) {
+ CHECK_GE(instance_count, 0);
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = module_obj->compiled_module();
+ CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
+ *module_obj);
+ Object* prev = nullptr;
+ int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
+ WasmCompiledModule* current_instance = compiled_module;
+ while (current_instance->has_weak_next_instance()) {
+ CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
+ current_instance->ptr_to_weak_prev_instance()->value() == prev);
+ CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
+ CHECK(current_instance->ptr_to_weak_owning_instance()
+ ->value()
+ ->IsWasmInstanceObject());
+ prev = current_instance;
+ current_instance = WasmCompiledModule::cast(
+ current_instance->ptr_to_weak_next_instance()->value());
+ ++found_instances;
+ CHECK_LE(found_instances, instance_count);
+ }
+ CHECK_EQ(found_instances, instance_count);
+}
+
+void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
+ Isolate* isolate, Handle<WasmInstanceObject> instance) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module = instance->compiled_module();
+ CHECK(compiled_module->has_weak_wasm_module());
+ CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
+}
+
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
if (!object->IsJSFunction()) return false;
Handle<JSFunction> js_function(JSFunction::cast(object));
@@ -610,13 +707,13 @@ WasmSharedModuleData* WasmSharedModuleData::cast(Object* object) {
return reinterpret_cast<WasmSharedModuleData*>(object);
}
-wasm::WasmModule* WasmSharedModuleData::module() {
+WasmModule* WasmSharedModuleData::module() {
// We populate the kModuleWrapper field with a Foreign holding the
// address to the address of a WasmModule. This is because we can
// handle both cases when the WasmModule's lifetime is managed through
// a Managed<WasmModule> object, as well as cases when it's managed
// by the embedder. CcTests fall into the latter case.
- return *(reinterpret_cast<wasm::WasmModule**>(
+ return *(reinterpret_cast<WasmModule**>(
Foreign::cast(get(kModuleWrapperIndex))->foreign_address()));
}
@@ -673,8 +770,8 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
const byte* end = start + module_bytes->length();
// TODO(titzer): remember the module origin in the compiled_module
// For now, we assume serialized modules did not originate from asm.js.
- ModuleResult result =
- SyncDecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+ wasm::ModuleResult result =
+ SyncDecodeWasmModule(isolate, start, end, false, wasm::kWasmOrigin);
CHECK(result.ok());
CHECK_NOT_NULL(result.val);
// Take ownership of the WasmModule and immediately transfer it to the
@@ -682,8 +779,8 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
module = result.val.release();
}
- Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate, module);
+ Handle<wasm::WasmModuleWrapper> module_wrapper =
+ wasm::WasmModuleWrapper::From(isolate, module);
shared->set(kModuleWrapperIndex, *module_wrapper);
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
@@ -813,17 +910,16 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
Handle<WasmSharedModuleData> shared) {
if (shared->has_lazy_compilation_orchestrator()) return;
Isolate* isolate = shared->GetIsolate();
- LazyCompilationOrchestrator* orch = new LazyCompilationOrchestrator();
- Handle<Managed<LazyCompilationOrchestrator>> orch_handle =
- Managed<LazyCompilationOrchestrator>::New(isolate, orch);
+ auto orch_handle =
+ Managed<wasm::LazyCompilationOrchestrator>::Allocate(isolate);
shared->set_lazy_compilation_orchestrator(*orch_handle);
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
Isolate* isolate, Handle<WasmSharedModuleData> shared,
Handle<FixedArray> code_table, Handle<FixedArray> export_wrappers,
- const std::vector<wasm::GlobalHandleAddress>& function_tables,
- const std::vector<wasm::GlobalHandleAddress>& signature_tables) {
+ const std::vector<GlobalHandleAddress>& function_tables,
+ const std::vector<GlobalHandleAddress>& signature_tables) {
DCHECK_EQ(function_tables.size(), signature_tables.size());
Handle<FixedArray> ret =
isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
@@ -881,18 +977,10 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
ret->reset_weak_next_instance();
ret->reset_weak_prev_instance();
ret->reset_weak_exported_functions();
- if (ret->has_embedded_mem_start()) {
- WasmCompiledModule::recreate_embedded_mem_start(ret, isolate->factory(),
- ret->embedded_mem_start());
- }
if (ret->has_globals_start()) {
WasmCompiledModule::recreate_globals_start(ret, isolate->factory(),
ret->globals_start());
}
- if (ret->has_embedded_mem_size()) {
- WasmCompiledModule::recreate_embedded_mem_size(ret, isolate->factory(),
- ret->embedded_mem_size());
- }
return ret;
}
@@ -924,20 +1012,10 @@ void WasmCompiledModule::Reset(Isolate* isolate,
Object* undefined = *isolate->factory()->undefined_value();
Object* fct_obj = compiled_module->ptr_to_code_table();
if (fct_obj != nullptr && fct_obj != undefined) {
- uint32_t old_mem_size = compiled_module->GetEmbeddedMemSizeOrZero();
- // We use default_mem_size throughout, as the mem size of an uninstantiated
- // module, because if we can statically prove a memory access is over
- // bounds, we'll codegen a trap. See {WasmGraphBuilder::BoundsCheckMem}
- uint32_t default_mem_size = compiled_module->default_mem_size();
- Address old_mem_start = compiled_module->GetEmbeddedMemStartOrNull();
-
// Patch code to update memory references, global references, and function
// table references.
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- code_specialization.RelocateMemoryReferences(old_mem_start, old_mem_size,
- nullptr, default_mem_size);
+ wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
if (compiled_module->has_globals_start()) {
Address globals_start =
@@ -998,7 +1076,6 @@ void WasmCompiledModule::Reset(Isolate* isolate,
}
}
}
- compiled_module->ResetSpecializationMemInfoIfNeeded();
}
void WasmCompiledModule::InitId() {
@@ -1009,32 +1086,6 @@ void WasmCompiledModule::InitId() {
#endif
}
-void WasmCompiledModule::ResetSpecializationMemInfoIfNeeded() {
- DisallowHeapAllocation no_gc;
- if (has_embedded_mem_start()) {
- set_embedded_mem_size(default_mem_size());
- set_embedded_mem_start(0);
- }
-}
-
-void WasmCompiledModule::SetSpecializationMemInfoFrom(
- Factory* factory, Handle<WasmCompiledModule> compiled_module,
- Handle<JSArrayBuffer> buffer) {
- DCHECK(!buffer.is_null());
- size_t start_address = reinterpret_cast<size_t>(buffer->backing_store());
- uint32_t size = static_cast<uint32_t>(buffer->byte_length()->Number());
- if (!compiled_module->has_embedded_mem_start()) {
- DCHECK(!compiled_module->has_embedded_mem_size());
- WasmCompiledModule::recreate_embedded_mem_start(compiled_module, factory,
- start_address);
- WasmCompiledModule::recreate_embedded_mem_size(compiled_module, factory,
- size);
- } else {
- compiled_module->set_embedded_mem_start(start_address);
- compiled_module->set_embedded_mem_size(size);
- }
-}
-
void WasmCompiledModule::SetGlobalsStartAddressFrom(
Factory* factory, Handle<WasmCompiledModule> compiled_module,
Handle<JSArrayBuffer> buffer) {
@@ -1050,7 +1101,7 @@ void WasmCompiledModule::SetGlobalsStartAddressFrom(
MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- WireBytesRef ref) {
+ wasm::WireBytesRef ref) {
// TODO(wasm): cache strings from modules if it's a performance win.
Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
isolate);
@@ -1104,7 +1155,15 @@ bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
#define WCM_CHECK_LARGE_NUMBER(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsMutableHeapNumber())
WCM_PROPERTY_TABLE(WCM_CHECK)
+#undef WCM_CHECK_TYPE
+#undef WCM_CHECK_OBJECT
+#undef WCM_CHECK_CONST_OBJECT
+#undef WCM_CHECK_WASM_OBJECT
+#undef WCM_CHECK_WEAK_LINK
+#undef WCM_CHECK_SMALL_NUMBER
#undef WCM_CHECK
+#undef WCM_CHECK_SMALL_CONST_NUMBER
+#undef WCM_CHECK_LARGE_NUMBER
// All checks passed.
return true;
@@ -1116,7 +1175,7 @@ void WasmCompiledModule::PrintInstancesChain() {
for (WasmCompiledModule* current = this; current != nullptr;) {
PrintF("->%d", current->instance_id());
if (!current->has_weak_next_instance()) break;
- CHECK(!current->ptr_to_weak_next_instance()->cleared());
+ DCHECK(!current->ptr_to_weak_next_instance()->cleared());
current =
WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
}
@@ -1276,7 +1335,7 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
DCHECK(table_type == Encoded || table_type == Decoded);
if (table_type == Decoded) return offset_table;
- AsmJsOffsetsResult asm_offsets;
+ wasm::AsmJsOffsetsResult asm_offsets;
{
DisallowHeapAllocation no_gc;
const byte* bytes_start = offset_table->GetDataStartAddress();
@@ -1309,10 +1368,11 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
int idx = 0;
std::vector<WasmFunction>& wasm_funs = compiled_module->module()->functions;
for (int func = 0; func < num_functions; ++func) {
- std::vector<AsmJsOffsetEntry>& func_asm_offsets = asm_offsets.val[func];
+ std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
+ asm_offsets.val[func];
if (func_asm_offsets.empty()) continue;
int func_offset = wasm_funs[num_imported_functions + func].code.offset();
- for (AsmJsOffsetEntry& e : func_asm_offsets) {
+ for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
// Byte offsets must be strictly monotonously increasing:
DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
decoded_table->get_int(idx - kOTESize));
@@ -1329,16 +1389,24 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
} // namespace
-int WasmCompiledModule::GetAsmJsSourcePosition(
+int WasmCompiledModule::GetSourcePosition(
Handle<WasmCompiledModule> compiled_module, uint32_t func_index,
uint32_t byte_offset, bool is_at_number_conversion) {
Isolate* isolate = compiled_module->GetIsolate();
+ const WasmModule* module = compiled_module->module();
+
+ if (!module->is_asm_js()) {
+ // for non-asm.js modules, we just add the function's start offset
+ // to make a module-relative position.
+ return byte_offset + compiled_module->GetFunctionOffset(func_index);
+ }
+
+ // asm.js modules have an additional offset table that must be searched.
Handle<ByteArray> offset_table =
GetDecodedAsmJsOffsetTable(compiled_module, isolate);
- DCHECK_LT(func_index, compiled_module->module()->functions.size());
- uint32_t func_code_offset =
- compiled_module->module()->functions[func_index].code.offset();
+ DCHECK_LT(func_index, module->functions.size());
+ uint32_t func_code_offset = module->functions[func_index].code.offset();
uint32_t total_offset = func_code_offset + byte_offset;
// Binary search for the total byte offset.
@@ -1437,9 +1505,10 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
WasmFunction& func = functions[func_idx];
if (func.code.length() == 0) continue;
- BodyLocalDecls locals(&tmp);
- BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(), &locals);
+ wasm::BodyLocalDecls locals(&tmp);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
DCHECK_LT(0u, locals.encoded_size);
for (uint32_t offset : iterator.offsets()) {
uint32_t total_offset = func.code.offset() + offset;
@@ -1512,8 +1581,13 @@ Handle<Code> WasmCompiledModule::CompileLazy(
isolate->set_context(*instance->compiled_module()->native_context());
Object* orch_obj =
instance->compiled_module()->shared()->lazy_compilation_orchestrator();
- LazyCompilationOrchestrator* orch =
- Managed<LazyCompilationOrchestrator>::cast(orch_obj)->get();
+ auto* orch =
+ Managed<wasm::LazyCompilationOrchestrator>::cast(orch_obj)->get();
return orch->CompileLazy(isolate, instance, caller, offset, func_index,
patch_caller);
}
+
+#undef TRACE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 79d2db865c..86a7913d7a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -7,13 +7,13 @@
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
+#include "src/managed.h"
#include "src/objects.h"
#include "src/objects/script.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/decoder.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-module.h"
-#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
// Has to be the last include (doesn't have include guards)
@@ -24,9 +24,11 @@ namespace internal {
namespace wasm {
class InterpretedFrame;
class WasmInterpreter;
-
+struct WasmModule;
+class SignatureMap;
typedef Address GlobalHandleAddress;
-
+using ValueType = MachineRepresentation;
+using FunctionSig = Signature<ValueType>;
} // namespace wasm
class WasmCompiledModule;
@@ -50,6 +52,17 @@ class WasmInstanceObject;
static const int k##name##Offset = \
kSize + (k##name##Index - kFieldCount) * kPointerSize;
+// Wasm context used to store the mem_size and mem_start address of the linear
+// memory. These variables can be accessed at C++ level at graph build time
+// (e.g., initialized during instance building / changed at runtime by
+// grow_memory). The address of the WasmContext is provided to the wasm entry
+// functions using a RelocatableIntPtrConstant, then the address is passed as
+// parameter to the other wasm functions.
+struct WasmContext {
+ byte* mem_start;
+ uint32_t mem_size;
+};
+
// Representation of a WebAssembly.Module JavaScript-level object.
class WasmModuleObject : public JSObject {
public:
@@ -68,6 +81,9 @@ class WasmModuleObject : public JSObject {
static Handle<WasmModuleObject> New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+
+ static void ValidateStateForTesting(Isolate* isolate,
+ Handle<WasmModuleObject> module);
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -92,9 +108,9 @@ class WasmTableObject : public JSObject {
DEF_OFFSET(MaximumLength)
DEF_OFFSET(DispatchTables)
- inline uint32_t current_length() { return functions()->length(); }
- inline bool has_maximum_length() { return maximum_length()->Number() >= 0; }
- void grow(Isolate* isolate, uint32_t count);
+ inline uint32_t current_length();
+ inline bool has_maximum_length();
+ void Grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
int64_t maximum,
@@ -103,6 +119,9 @@ class WasmTableObject : public JSObject {
Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance, int table_index,
Handle<FixedArray> function_table, Handle<FixedArray> signature_table);
+
+ static void Set(Isolate* isolate, Handle<WasmTableObject> table,
+ int32_t index, Handle<JSFunction> function);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -113,11 +132,13 @@ class WasmMemoryObject : public JSObject {
DECL_ACCESSORS(array_buffer, JSArrayBuffer)
DECL_INT_ACCESSORS(maximum_pages)
DECL_OPTIONAL_ACCESSORS(instances, WeakFixedArray)
+ DECL_ACCESSORS(wasm_context, Managed<WasmContext>)
enum { // --
kArrayBufferIndex,
kMaximumPagesIndex,
kInstancesIndex,
+ kWasmContextIndex,
kFieldCount
};
@@ -125,6 +146,7 @@ class WasmMemoryObject : public JSObject {
DEF_OFFSET(ArrayBuffer)
DEF_OFFSET(MaximumPages)
DEF_OFFSET(Instances)
+ DEF_OFFSET(WasmContext)
// Add an instance to the internal (weak) list. amortized O(n).
static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
@@ -133,13 +155,15 @@ class WasmMemoryObject : public JSObject {
static void RemoveInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
uint32_t current_pages();
- inline bool has_maximum_pages() { return maximum_pages() >= 0; }
+ inline bool has_maximum_pages();
static Handle<WasmMemoryObject> New(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
int32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
+ static void SetupNewBufferWithSameBackingStore(
+ Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size);
};
// A WebAssembly.Instance JavaScript-level object.
@@ -158,6 +182,7 @@ class WasmInstanceObject : public JSObject {
// FixedArray of all instances whose code was imported
DECL_OPTIONAL_ACCESSORS(directly_called_instances, FixedArray)
+ DECL_ACCESSORS(js_imports_table, FixedArray)
enum { // --
kCompiledModuleIndex,
@@ -169,6 +194,7 @@ class WasmInstanceObject : public JSObject {
kFunctionTablesIndex,
kSignatureTablesIndex,
kDirectlyCalledInstancesIndex,
+ kJsImportsTableIndex,
kFieldCount
};
@@ -182,8 +208,10 @@ class WasmInstanceObject : public JSObject {
DEF_OFFSET(FunctionTables)
DEF_OFFSET(SignatureTables)
DEF_OFFSET(DirectlyCalledInstances)
+ DEF_OFFSET(JsImportsTable)
WasmModuleObject* module_object();
+ WasmContext* wasm_context();
V8_EXPORT_PRIVATE wasm::WasmModule* module();
// Get the debug info associated with the given wasm object.
@@ -198,15 +226,27 @@ class WasmInstanceObject : public JSObject {
uint32_t pages);
uint32_t GetMaxMemoryPages();
+
+ // Assumed to be called with a code object associated to a wasm module
+ // instance. Intended to be called from runtime functions. Returns nullptr on
+ // failing to get owning instance.
+ static WasmInstanceObject* GetOwningInstance(Code* code);
+
+ static void ValidateInstancesChainForTesting(
+ Isolate* isolate, Handle<WasmModuleObject> module_obj,
+ int instance_count);
+
+ static void ValidateOrphanedInstanceForTesting(
+ Isolate* isolate, Handle<WasmInstanceObject> instance);
};
// A WASM function that is wrapped and exported to JavaScript.
class WasmExportedFunction : public JSFunction {
public:
WasmInstanceObject* instance();
- int function_index();
+ V8_EXPORT_PRIVATE int function_index();
- static WasmExportedFunction* cast(Object* object);
+ V8_EXPORT_PRIVATE static WasmExportedFunction* cast(Object* object);
static bool IsWasmExportedFunction(Object* object);
static Handle<WasmExportedFunction> New(Isolate* isolate,
@@ -214,6 +254,8 @@ class WasmExportedFunction : public JSFunction {
MaybeHandle<String> maybe_name,
int func_index, int arity,
Handle<Code> export_wrapper);
+
+ Handle<Code> GetWasmCode();
};
// Information shared by all WasmCompiledModule objects for the same module.
@@ -274,8 +316,11 @@ class WasmSharedModuleData : public FixedArray {
// with all the information necessary for re-specializing them.
//
// We specialize wasm functions to their instance by embedding:
-// - raw interior pointers into the backing store of the array buffer
-// used as memory of a particular WebAssembly.Instance object.
+// - raw pointer to the wasm_context, that contains the size of the
+// memory and the pointer to the backing store of the array buffer
+// used as memory of a particular WebAssembly.Instance object. This
+// information are then used at runtime to access memory / verify bounds
+// check limits.
// - bounds check limits, computed at compile time, relative to the
// size of the memory.
// - the objects representing the function tables and signature tables
@@ -307,35 +352,16 @@ class WasmCompiledModule : public FixedArray {
#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
public: \
- Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
- \
- MaybeHandle<TYPE> maybe_##NAME() const { \
- if (has_##NAME()) return NAME(); \
- return MaybeHandle<TYPE>(); \
- } \
- \
- TYPE* maybe_ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- if (!(TYPE_CHECK)) return nullptr; \
- return TYPE::cast(obj); \
- } \
- \
- TYPE* ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- DCHECK(TYPE_CHECK); \
- return TYPE::cast(obj); \
- } \
- \
- bool has_##NAME() const { \
- Object* obj = get(ID); \
- return TYPE_CHECK; \
- } \
- \
- void reset_##NAME() { set_undefined(ID); } \
+ inline Handle<TYPE> NAME() const; \
+ inline MaybeHandle<TYPE> maybe_##NAME() const; \
+ inline TYPE* maybe_ptr_to_##NAME() const; \
+ inline TYPE* ptr_to_##NAME() const; \
+ inline bool has_##NAME() const; \
+ inline void reset_##NAME(); \
\
SETTER_MODIFIER: \
- void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
- void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
+ inline void set_##NAME(Handle<TYPE> value); \
+ inline void set_ptr_to_##NAME(TYPE* value);
#define WCM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
@@ -346,43 +372,27 @@ class WasmCompiledModule : public FixedArray {
#define WCM_WASM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
-#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
- public: \
- TYPE NAME() const { return static_cast<TYPE>(Smi::ToInt(get(kID_##NAME))); } \
- \
- private: \
- void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
+ public: \
+ inline TYPE NAME() const; \
+ \
+ private: \
+ inline void set_##NAME(TYPE value);
#define WCM_WEAK_LINK(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
public) \
\
public: \
- Handle<TYPE> NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
- }
+ inline Handle<TYPE> NAME() const;
#define WCM_LARGE_NUMBER(TYPE, NAME) \
public: \
- TYPE NAME() const { \
- Object* value = get(kID_##NAME); \
- DCHECK(value->IsMutableHeapNumber()); \
- return static_cast<TYPE>(HeapNumber::cast(value)->value()); \
- } \
- \
- void set_##NAME(TYPE value) { \
- Object* number = get(kID_##NAME); \
- DCHECK(number->IsMutableHeapNumber()); \
- HeapNumber::cast(number)->set_value(static_cast<double>(value)); \
- } \
- \
- static void recreate_##NAME(Handle<WasmCompiledModule> obj, \
- Factory* factory, TYPE init_val) { \
- Handle<HeapNumber> number = factory->NewHeapNumber( \
- static_cast<double>(init_val), MutableMode::MUTABLE, TENURED); \
- obj->set(kID_##NAME, *number); \
- } \
- bool has_##NAME() const { return get(kID_##NAME)->IsMutableHeapNumber(); }
+ inline TYPE NAME() const; \
+ inline void set_##NAME(TYPE value); \
+ inline static void recreate_##NAME(Handle<WasmCompiledModule> obj, \
+ Factory* factory, TYPE init_val); \
+ inline bool has_##NAME() const;
// Add values here if they are required for creating new instances or
// for deserialization, and if they are serializable.
@@ -399,9 +409,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_signature_tables) \
- MACRO(LARGE_NUMBER, size_t, embedded_mem_start) \
MACRO(LARGE_NUMBER, size_t, globals_start) \
- MACRO(LARGE_NUMBER, uint32_t, embedded_mem_size) \
MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages) \
MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
@@ -439,27 +447,10 @@ class WasmCompiledModule : public FixedArray {
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
- Address GetEmbeddedMemStartOrNull() const {
- return has_embedded_mem_start()
- ? reinterpret_cast<Address>(embedded_mem_start())
- : nullptr;
- }
-
- Address GetGlobalsStartOrNull() const {
- return has_globals_start() ? reinterpret_cast<Address>(globals_start())
- : nullptr;
- }
-
- uint32_t GetEmbeddedMemSizeOrZero() const {
- return has_embedded_mem_size() ? embedded_mem_size() : 0;
- }
+ inline Address GetGlobalsStartOrNull() const;
uint32_t default_mem_size() const;
- void ResetSpecializationMemInfoIfNeeded();
- static void SetSpecializationMemInfoFrom(
- Factory* factory, Handle<WasmCompiledModule> compiled_module,
- Handle<JSArrayBuffer> buffer);
static void SetGlobalsStartAddressFrom(
Factory* factory, Handle<WasmCompiledModule> compiled_module,
Handle<JSArrayBuffer> buffer);
@@ -470,8 +461,7 @@ class WasmCompiledModule : public FixedArray {
public:
// Allow to call method on WasmSharedModuleData also on this object.
-#define FORWARD_SHARED(type, name) \
- type name() { return shared()->name(); }
+#define FORWARD_SHARED(type, name) inline type name();
FORWARD_SHARED(SeqOneByteString*, module_bytes)
FORWARD_SHARED(wasm::WasmModule*, module)
FORWARD_SHARED(Script*, script)
@@ -524,11 +514,11 @@ class WasmCompiledModule : public FixedArray {
// Returns true if the position is valid inside this module, false otherwise.
bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
- // Get the asm.js source position from a byte offset.
- // Must only be called if the associated wasm object was created from asm.js.
- static int GetAsmJsSourcePosition(Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index, uint32_t byte_offset,
- bool is_at_number_conversion);
+ // Get the source position from a given function index and byte offset,
+ // for either asm.js or pure WASM modules.
+ static int GetSourcePosition(Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index, uint32_t byte_offset,
+ bool is_at_number_conversion);
// Compute the disassembly of a wasm function.
// Returns the disassembly string and a list of <byte_offset, line, column>
@@ -576,9 +566,7 @@ class WasmCompiledModule : public FixedArray {
Handle<Code> caller, int offset,
int func_index, bool patch_caller);
- void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table) {
- set_code_table(testing_table);
- }
+ inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
static void SetTableValue(Isolate* isolate, Handle<FixedArray> table,
int index, Address value);
@@ -689,76 +677,15 @@ class WasmDebugInfo : public FixedArray {
wasm::FunctionSig*);
};
-// TODO(titzer): these should be moved to wasm-objects-inl.h
-CAST_ACCESSOR(WasmInstanceObject)
-CAST_ACCESSOR(WasmMemoryObject)
-CAST_ACCESSOR(WasmModuleObject)
-CAST_ACCESSOR(WasmTableObject)
-
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(this, offset)->IsUndefined(GetIsolate()); \
- } \
- ACCESSORS(holder, name, type, offset)
-
-// WasmModuleObject
-ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
- kCompiledModuleOffset)
-
-// WasmTableObject
-ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
-ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
-ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
-
-// WasmMemoryObject
-ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
-SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
-OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakFixedArray,
- kInstancesOffset)
-
-// WasmInstanceObject
-ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
- kCompiledModuleOffset)
-ACCESSORS(WasmInstanceObject, exports_object, JSObject,
- kExportsObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
- kMemoryObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, JSArrayBuffer,
- kMemoryBufferOffset)
-ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
- kGlobalsBufferOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
- kDebugInfoOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
- kFunctionTablesOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
- kSignatureTablesOffset)
-ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
- kDirectlyCalledInstancesOffset)
-
-// WasmSharedModuleData
-ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
- kModuleBytesOffset)
-ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
-OPTIONAL_ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
- kAsmJsOffsetTableOffset)
-OPTIONAL_ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
- kBreakPointInfosOffset)
-
-OPTIONAL_ACCESSORS(WasmSharedModuleData, lazy_compilation_orchestrator, Foreign,
- kLazyCompilationOrchestratorOffset)
-
-OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
- kCWasmEntriesOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
- kCWasmEntryMapOffset)
-
-#undef OPTIONAL_ACCESSORS
#undef DECL_OOL_QUERY
#undef DECL_OOL_CAST
#undef DECL_GETTER
#undef DECL_OPTIONAL_ACCESSORS
+#undef WCM_CONST_OBJECT
+#undef WCM_LARGE_NUMBER
+#undef WCM_OBJECT_OR_WEAK
+#undef WCM_SMALL_CONST_NUMBER
+#undef WCM_WEAK_LINK
#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 10bc69dfb2..5f2507996d 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -235,6 +235,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
+ CASE_U32_OP(AtomicLoad, "atomic_load")
+ CASE_U32_OP(AtomicStore, "atomic_store")
CASE_U32_OP(AtomicAdd, "atomic_add")
CASE_U32_OP(AtomicSub, "atomic_sub")
CASE_U32_OP(AtomicAnd, "atomic_and")
@@ -248,6 +250,34 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
}
}
+#undef CASE_OP
+#undef CASE_I32_OP
+#undef CASE_I64_OP
+#undef CASE_F32_OP
+#undef CASE_F64_OP
+#undef CASE_F32x4_OP
+#undef CASE_I32x4_OP
+#undef CASE_I16x8_OP
+#undef CASE_I8x16_OP
+#undef CASE_S128_OP
+#undef CASE_S32x4_OP
+#undef CASE_S16x8_OP
+#undef CASE_S8x16_OP
+#undef CASE_S1x4_OP
+#undef CASE_S1x8_OP
+#undef CASE_S1x16_OP
+#undef CASE_INT_OP
+#undef CASE_FLOAT_OP
+#undef CASE_ALL_OP
+#undef CASE_SIMD_OP
+#undef CASE_SIMDI_OP
+#undef CASE_SIGN_OP
+#undef CASE_UNSIGNED_OP
+#undef CASE_ALL_SIGN_OP
+#undef CASE_CONVERT_OP
+#undef CASE_L32_OP
+#undef CASE_U32_OP
+
bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
switch (opcode) {
#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
@@ -305,23 +335,23 @@ bool IsJSCompatibleSignature(const FunctionSig* sig) {
namespace {
#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
-
enum WasmOpcodeSig : byte {
kSigEnum_None,
FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
};
+#undef DECLARE_SIG_ENUM
#define DECLARE_SIG(name, ...) \
constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
constexpr FunctionSig kSig_##name( \
1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
-
FOREACH_SIGNATURE(DECLARE_SIG)
+#undef DECLARE_SIG
#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
-
constexpr const FunctionSig* kSimpleExprSigs[] = {
nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+#undef DECLARE_SIG_ENTRY
// The following constexpr functions are used to initialize the constant arrays
// defined below. They must have exactly one return statement, and no switch.
@@ -375,6 +405,8 @@ CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
base::make_array<256>(GetAtomicOpcodeSigIndex);
+#undef CONSTEXPR_IF_NOT_GCC_4
+
} // namespace
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index c936958600..2401e0446c 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -14,6 +14,10 @@ namespace v8 {
namespace internal {
namespace wasm {
+// Binary encoding of the module header.
+const uint32_t kWasmMagic = 0x6d736100;
+const uint32_t kWasmVersion = 0x01;
+
// Binary encoding of local types.
enum ValueTypeCode {
kLocalVoid = 0x40,
@@ -415,6 +419,12 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(S128StoreMem, 0xfd81, s_is)
#define FOREACH_ATOMIC_OPCODE(V) \
+ V(I32AtomicLoad, 0xfe10, i_i) \
+ V(I32AtomicLoad8U, 0xfe12, i_i) \
+ V(I32AtomicLoad16U, 0xfe13, i_i) \
+ V(I32AtomicStore, 0xfe17, i_ii) \
+ V(I32AtomicStore8U, 0xfe19, i_ii) \
+ V(I32AtomicStore16U, 0xfe1a, i_ii) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
@@ -646,6 +656,36 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
}
}
};
+
+// Representation of an initializer expression.
+struct WasmInitExpr {
+ enum WasmInitKind {
+ kNone,
+ kGlobalIndex,
+ kI32Const,
+ kI64Const,
+ kF32Const,
+ kF64Const
+ } kind;
+
+ union {
+ int32_t i32_const;
+ int64_t i64_const;
+ float f32_const;
+ double f64_const;
+ uint32_t global_index;
+ } val;
+
+ WasmInitExpr() : kind(kNone) {}
+ explicit WasmInitExpr(int32_t v) : kind(kI32Const) { val.i32_const = v; }
+ explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
+ explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
+ explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
+ WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
+ val.global_index = global_index;
+ }
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 9ae8c33f2f..7744b42923 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -11,8 +11,8 @@
#include "src/base/compiler-specific.h"
#include "src/utils.h"
-#include "src/handles.h"
#include "src/globals.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
@@ -24,14 +24,15 @@ namespace wasm {
// Base class for Result<T>.
class V8_EXPORT_PRIVATE ResultBase {
protected:
- ResultBase(ResultBase&& other)
- : error_offset_(other.error_offset_),
- error_msg_(std::move(other.error_msg_)) {}
ResultBase() = default;
ResultBase& operator=(ResultBase&& other) = default;
public:
+ ResultBase(ResultBase&& other)
+ : error_offset_(other.error_offset_),
+ error_msg_(std::move(other.error_msg_)) {}
+
void error(uint32_t offset, std::string error_msg);
void PRINTF_FORMAT(2, 3) error(const char* format, ...) {
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index e596d6770a..e1fea08d31 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -14,20 +14,9 @@
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone.h"
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wheader-hygiene"
-#endif
-
-using namespace v8;
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-#if __clang__
-// TODO(mostynb@opera.com): remove the using statements and these pragmas.
-#pragma clang diagnostic pop
-#endif
+namespace v8 {
+namespace internal {
+namespace wasm {
namespace {
bool IsValidFunctionName(const Vector<const char> &name) {
@@ -43,10 +32,9 @@ bool IsValidFunctionName(const Vector<const char> &name) {
} // namespace
-void wasm::PrintWasmText(const WasmModule *module,
- const ModuleWireBytes &wire_bytes, uint32_t func_index,
- std::ostream &os,
- debug::WasmDisassembly::OffsetTable *offset_table) {
+void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ uint32_t func_index, std::ostream& os,
+ debug::WasmDisassembly::OffsetTable* offset_table) {
DCHECK_NOT_NULL(module);
DCHECK_GT(module->functions.size(), func_index);
const WasmFunction *fun = &module->functions[func_index];
@@ -181,6 +169,7 @@ void wasm::PrintWasmText(const WasmModule *module,
CASE_CONST(I64, i64, int64_t)
CASE_CONST(F32, f32, float)
CASE_CONST(F64, f64, double)
+#undef CASE_CONST
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
@@ -201,6 +190,21 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprSelect:
os << WasmOpcodes::OpcodeName(opcode);
break;
+ case kAtomicPrefix: {
+ WasmOpcode atomic_opcode = i.prefixed_opcode();
+ switch (atomic_opcode) {
+ FOREACH_ATOMIC_OPCODE(CASE_OPCODE) {
+ MemoryAccessOperand<false> operand(&i, i.pc(), kMaxUInt32);
+ os << WasmOpcodes::OpcodeName(atomic_opcode)
+ << " offset=" << operand.offset
+ << " align=" << (1ULL << operand.alignment);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
// This group is just printed by their internal opcode name, as they
// should never be shown to end-users.
@@ -211,9 +215,9 @@ void wasm::PrintWasmText(const WasmModule *module,
FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_MEM_OPCODE(CASE_OPCODE)
- FOREACH_ATOMIC_OPCODE(CASE_OPCODE)
os << WasmOpcodes::OpcodeName(opcode);
break;
+#undef CASE_OPCODE
default:
UNREACHABLE();
@@ -225,3 +229,7 @@ void wasm::PrintWasmText(const WasmModule *module,
DCHECK_EQ(0, control_depth);
DCHECK(i.ok());
}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8