aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-code-manager.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/wasm-code-manager.h')
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h136
1 files changed, 97 insertions, 39 deletions
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 65156b7457..4247350ceb 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -16,7 +16,7 @@
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
-#include "src/wasm/module-compiler.h"
+#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
@@ -30,7 +30,9 @@ namespace wasm {
class NativeModule;
class WasmCodeManager;
+class WasmEngine;
class WasmMemoryTracker;
+class WasmImportWrapperCache;
struct WasmModule;
// Sorted, disjoint and non-overlapping memory regions. A region is of the
@@ -43,8 +45,9 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
- DisjointAllocationPool(DisjointAllocationPool&& other) = default;
- DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
+ DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
+ DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
+ V8_NOEXCEPT = default;
// Merge the parameter region into this object while preserving ordering of
// the regions. The assumption is that the passed parameter is not
@@ -62,7 +65,7 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
private:
std::list<base::AddressRegion> regions_;
- DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
+ DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
class V8_EXPORT_PRIVATE WasmCode final {
@@ -111,9 +114,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
NativeModule* native_module() const { return native_module_; }
Tier tier() const { return tier_; }
Address constant_pool() const;
+ Address code_comments() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
size_t handler_table_offset() const { return handler_table_offset_; }
+ size_t code_comments_offset() const { return code_comments_offset_; }
+ size_t unpadded_binary_size() const { return unpadded_binary_size_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return tier_ == kLiftoff; }
bool contains(Address pc) const {
@@ -130,6 +136,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Validate() const;
void Print(const char* name = nullptr) const;
+ void MaybePrint(const char* name = nullptr) const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
@@ -140,13 +147,17 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
+ static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+ STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
+
private:
friend class NativeModule;
WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
@@ -161,11 +172,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
+ code_comments_offset_(code_comments_offset),
+ unpadded_binary_size_(unpadded_binary_size),
protected_instructions_(std::move(protected_instructions)),
tier_(tier) {
- DCHECK_LE(safepoint_table_offset, instructions.size());
- DCHECK_LE(constant_pool_offset, instructions.size());
- DCHECK_LE(handler_table_offset, instructions.size());
+ DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
+ DCHECK_LE(handler_table_offset, unpadded_binary_size);
+ DCHECK_LE(code_comments_offset, unpadded_binary_size);
+ DCHECK_LE(constant_pool_offset, unpadded_binary_size);
}
// Code objects that have been registered with the global trap handler within
@@ -178,9 +192,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
// trap_handler_index.
void RegisterTrapHandlerData();
- static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
- STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
-
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
OwnedVector<const byte> source_position_table_;
@@ -194,6 +205,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
// conversions.
size_t safepoint_table_offset_ = 0;
size_t handler_table_offset_ = 0;
+ size_t code_comments_offset_ = 0;
+ size_t unpadded_binary_size_ = 0;
intptr_t trap_handler_index_ = -1;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
Tier tier_;
@@ -219,27 +232,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> source_position_table,
- WasmCode::Tier tier);
+ WasmCode::Kind kind, WasmCode::Tier tier);
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
- // Add an import wrapper for wasm-to-JS transitions. This method copies over
- // JS-allocated code, because we compile wrappers using a different pipeline.
- WasmCode* AddImportWrapper(Handle<Code> code, uint32_t index);
-
- // Add an interpreter entry. For the same reason as AddImportWrapper, we
- // currently compile these using a different pipeline and we can't get a
- // CodeDesc here. When adding interpreter wrappers, we do not insert them in
- // the code_table, however, we let them self-identify as the {index} function.
- WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
-
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
@@ -259,6 +263,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// threads executing the old code.
void PublishCode(WasmCode* code);
+ // Switch a function to an interpreter entry wrapper. When adding interpreter
+ // wrappers, we do not insert them in the code_table, however, we let them
+ // self-identify as the {index} function.
+ void PublishInterpreterEntry(WasmCode* code, uint32_t index);
+
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
@@ -317,24 +326,31 @@ class V8_EXPORT_PRIVATE NativeModule final {
CompilationState* compilation_state() { return compilation_state_.get(); }
+ // Create a {CompilationEnv} object for compilation. Only valid as long as
+ // this {NativeModule} is alive.
+ CompilationEnv CreateCompilationEnv() const;
+
uint32_t num_functions() const {
return module_->num_declared_functions + module_->num_imported_functions;
}
uint32_t num_imported_functions() const {
return module_->num_imported_functions;
}
- bool use_trap_handler() const { return use_trap_handler_; }
+ UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
- void set_wire_bytes(OwnedVector<const byte> wire_bytes) {
- wire_bytes_ = std::move(wire_bytes);
- }
+ Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
- WasmCodeManager* code_manager() const { return wasm_code_manager_; }
+ size_t committed_code_space() const { return committed_code_space_.load(); }
+
+ void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
WasmCode* Lookup(Address) const;
+ WasmImportWrapperCache* import_wrapper_cache() const {
+ return import_wrapper_cache_.get();
+ }
+
~NativeModule();
const WasmFeatures& enabled_features() const { return enabled_features_; }
@@ -347,7 +363,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
- std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
+ std::shared_ptr<const WasmModule> module);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr);
@@ -356,12 +372,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
- // code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
+ // code is obtained (CodeDesc vs, as a point in time, Code), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
+ size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table,
@@ -376,6 +394,30 @@ class V8_EXPORT_PRIVATE NativeModule final {
return {code_table_.get(), module_->num_declared_functions};
}
+ // Hold the {mutex_} when calling this method.
+ bool has_interpreter_redirection(uint32_t func_index) {
+ DCHECK_LT(func_index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ if (!interpreter_redirections_) return false;
+ uint32_t bitset_idx = func_index - module_->num_imported_functions;
+ uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
+ return byte & (1 << (bitset_idx % kBitsPerByte));
+ }
+
+ // Hold the {mutex_} when calling this method.
+ void SetInterpreterRedirection(uint32_t func_index) {
+ DCHECK_LT(func_index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ if (!interpreter_redirections_) {
+ interpreter_redirections_.reset(
+ new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
+ kBitsPerByte]);
+ }
+ uint32_t bitset_idx = func_index - module_->num_imported_functions;
+ uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
+ byte |= 1 << (bitset_idx % kBitsPerByte);
+ }
+
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
// to be consistent across asynchronous compilations later.
@@ -385,7 +427,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
- OwnedVector<const byte> wire_bytes_;
+ // Wire bytes, held in a shared_ptr so they can be kept alive by the
+ // {WireBytesStorage}, held by background compile tasks.
+ std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
@@ -395,7 +439,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// The compilation state keeps track of compilation tasks for this module.
// Note that its destructor blocks until all tasks are finished/aborted and
// hence needs to be destructed first when this native module dies.
- std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
+ std::unique_ptr<CompilationState> compilation_state_;
+
+ // A cache of the import wrappers, keyed on the kind and signature.
+ std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
// This mutex protects concurrent calls to {AddCode} and friends.
mutable base::Mutex allocation_mutex_;
@@ -409,6 +456,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<WasmCode* []> code_table_;
+ // Null if no redirections exist, otherwise a bitset over all functions in
+ // this module marking those functions that have been redirected.
+ std::unique_ptr<uint8_t[]> interpreter_redirections_;
+
DisjointAllocationPool free_code_space_;
DisjointAllocationPool allocated_code_space_;
std::list<VirtualMemory> owned_code_space_;
@@ -416,11 +467,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
- WasmCodeManager* wasm_code_manager_;
+ WasmCodeManager* const code_manager_;
std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
- bool use_trap_handler_ = false;
+ UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
@@ -439,8 +490,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// TODO(titzer): isolate is only required here for CompilationState.
std::unique_ptr<NativeModule> NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled_features,
- size_t memory_estimate, bool can_request_more,
- std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
+ size_t code_size_estimate, bool can_request_more,
+ std::shared_ptr<const WasmModule> module);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
@@ -449,12 +500,15 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Add a sample of all module sizes.
void SampleModuleSizes(Isolate* isolate) const;
+ void SetMaxCommittedMemoryForTesting(size_t limit);
+
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
// using sampling based on regular intervals independent of the GC.
static void InstallSamplingGCCallback(Isolate* isolate);
- static size_t EstimateNativeModuleSize(const WasmModule* module);
+ static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
+ static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
private:
friend class NativeModule;
@@ -469,10 +523,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void FreeNativeModule(NativeModule*);
void AssignRanges(Address start, Address end, NativeModule*);
void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
- bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_;
+ // If the remaining uncommitted code space falls below
+ // {critical_uncommitted_code_space_}, then we trigger a GC before creating
+ // the next module. This value is initialized to 50% of the available code
+ // space on creation and after each GC.
+ std::atomic<size_t> critical_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////