summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-code-manager.h
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-12-04 08:20:37 +0100
committerMichaël Zasso <targos@protonmail.com>2018-12-06 15:23:33 +0100
commit9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3 (patch)
tree2b0c843168dafb939d8df8a15b2aa72b76dee51d /deps/v8/src/wasm/wasm-code-manager.h
parentb8fbe69db1292307adb2c2b2e0d5ef48c4ab2faf (diff)
downloadandroid-node-v8-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.tar.gz
android-node-v8-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.tar.bz2
android-node-v8-9b4bf7de6c9a7c25f116c7a502384c20b5cfaea3.zip
deps: update V8 to 7.1.302.28
PR-URL: https://github.com/nodejs/node/pull/23423 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm/wasm-code-manager.h')
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h126
1 files changed, 67 insertions, 59 deletions
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index ffcc05fbcd..65156b7457 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -18,6 +18,7 @@
#include "src/vector.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
@@ -32,47 +33,34 @@ class WasmCodeManager;
class WasmMemoryTracker;
struct WasmModule;
-struct AddressRange {
- Address start;
- Address end;
-
- AddressRange(Address s, Address e) : start(s), end(e) {
- DCHECK_LE(start, end);
- DCHECK_IMPLIES(start == kNullAddress, end == kNullAddress);
- }
- AddressRange() : AddressRange(kNullAddress, kNullAddress) {}
-
- size_t size() const { return static_cast<size_t>(end - start); }
- bool is_empty() const { return start == end; }
- operator bool() const { return start == kNullAddress; }
-};
-
-// Sorted, disjoint and non-overlapping memory ranges. A range is of the
+// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
DisjointAllocationPool() = default;
- explicit DisjointAllocationPool(AddressRange range) : ranges_({range}) {}
+ explicit DisjointAllocationPool(base::AddressRegion region)
+ : regions_({region}) {}
DisjointAllocationPool(DisjointAllocationPool&& other) = default;
DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
- // Merge the parameter range into this object while preserving ordering of the
- // ranges. The assumption is that the passed parameter is not intersecting
- // this object - for example, it was obtained from a previous Allocate.
- void Merge(AddressRange);
+ // Merge the parameter region into this object while preserving ordering of
+ // the regions. The assumption is that the passed parameter is not
+ // intersecting this object - for example, it was obtained from a previous
+ // Allocate.
+ void Merge(base::AddressRegion);
- // Allocate a contiguous range of size {size}. Return an empty pool on
+ // Allocate a contiguous region of size {size}. Return an empty pool on
// failure.
- AddressRange Allocate(size_t size);
+ base::AddressRegion Allocate(size_t size);
- bool IsEmpty() const { return ranges_.empty(); }
- const std::list<AddressRange>& ranges() const { return ranges_; }
+ bool IsEmpty() const { return regions_.empty(); }
+ const std::list<base::AddressRegion>& regions() const { return regions_; }
private:
- std::list<AddressRange> ranges_;
+ std::list<base::AddressRegion> regions_;
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};
@@ -113,9 +101,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
return source_position_table_.as_vector();
}
- uint32_t index() const { return index_.ToChecked(); }
+ uint32_t index() const {
+ DCHECK(!IsAnonymous());
+ return index_;
+ }
// Anonymous functions are functions that don't carry an index.
- bool IsAnonymous() const { return index_.IsNothing(); }
+ bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
Kind kind() const { return kind_; }
NativeModule* native_module() const { return native_module_; }
Tier tier() const { return tier_; }
@@ -135,6 +126,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
return protected_instructions_.as_vector();
}
+ const char* GetRuntimeStubName() const;
+
void Validate() const;
void Print(const char* name = nullptr) const;
void Disassemble(const char* name, std::ostream& os,
@@ -150,7 +143,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
private:
friend class NativeModule;
- WasmCode(NativeModule* native_module, Maybe<uint32_t> index,
+ WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
size_t constant_pool_offset,
@@ -185,11 +178,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
// trap_handler_index.
void RegisterTrapHandlerData();
+ static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+ STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
+
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
OwnedVector<const byte> source_position_table_;
NativeModule* native_module_ = nullptr;
- Maybe<uint32_t> index_;
+ uint32_t index_;
Kind kind_;
size_t constant_pool_offset_ = 0;
uint32_t stack_slots_ = 0;
@@ -216,8 +212,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
static constexpr bool kCanAllocateMoreMemory = true;
#endif
- // {AddCode} is thread safe w.r.t. other calls to {AddCode} or {AddCodeCopy},
- // i.e. it can be called concurrently from background threads.
+ // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
+ // code below, i.e. it can be called concurrently from background threads.
WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
OwnedVector<trap_handler::ProtectedInstructionData>
@@ -234,16 +230,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
- // A way to copy over JS-allocated code. This is because we compile
- // certain wrappers using a different pipeline.
- WasmCode* AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, uint32_t index);
+ // Add an import wrapper for wasm-to-JS transitions. This method copies over
+ // JS-allocated code, because we compile wrappers using a different pipeline.
+ WasmCode* AddImportWrapper(Handle<Code> code, uint32_t index);
- // Add an interpreter entry. For the same reason as AddCodeCopy, we
+ // Add an interpreter entry. For the same reason as AddImportWrapper, we
// currently compile these using a different pipeline and we can't get a
// CodeDesc here. When adding interpreter wrappers, we do not insert them in
// the code_table, however, we let them self-identify as the {index} function.
WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
+ // Adds anonymous code for testing purposes.
+ WasmCode* AddCodeForTesting(Handle<Code> code);
+
// When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will be copied into this NativeModule and the
// jump table will be populated with that copy.
@@ -346,18 +345,20 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class NativeModuleModificationScope;
NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
- bool can_request_more, VirtualMemory* code_space,
+ bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
- WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
- Address AllocateForCode(size_t size);
+ WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
+ const char* name = nullptr);
+ // Allocate code space. Returns a valid buffer or fails with OOM (crash).
+ Vector<byte> AllocateForCode(size_t size);
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
// code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
// whether it has an index or is anonymous, etc.
- WasmCode* AddOwnedCode(Maybe<uint32_t> index, Vector<const byte> instructions,
+ WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
@@ -368,18 +369,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
- void PatchJumpTable(uint32_t func_index, Address target,
- WasmCode::FlushICache);
+ // Hold the {allocation_mutex_} when calling this method.
+ void InstallCode(WasmCode* code);
Vector<WasmCode*> code_table() const {
return {code_table_.get(), module_->num_declared_functions};
}
- void set_code(uint32_t index, WasmCode* code) {
- DCHECK_LT(index, num_functions());
- DCHECK_LE(module_->num_imported_functions, index);
- DCHECK_EQ(code->index(), index);
- code_table_[index - module_->num_imported_functions] = code;
- }
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
@@ -390,12 +385,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
- // Holds all allocated code objects, is maintained to be in ascending order
- // according to the codes instruction start address to allow lookups.
- std::vector<std::unique_ptr<WasmCode>> owned_code_;
-
- std::unique_ptr<WasmCode* []> code_table_;
-
OwnedVector<const byte> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
@@ -408,13 +397,25 @@ class V8_EXPORT_PRIVATE NativeModule final {
// hence needs to be destructed first when this native module dies.
std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
- // This mutex protects concurrent calls to {AddCode} and {AddCodeCopy}.
+ // This mutex protects concurrent calls to {AddCode} and friends.
mutable base::Mutex allocation_mutex_;
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {allocation_mutex_}:
+
+ // Holds all allocated code objects, is maintained to be in ascending order
+ // according to the codes instruction start address to allow lookups.
+ std::vector<std::unique_ptr<WasmCode>> owned_code_;
+
+ std::unique_ptr<WasmCode* []> code_table_;
+
DisjointAllocationPool free_code_space_;
DisjointAllocationPool allocated_code_space_;
std::list<VirtualMemory> owned_code_space_;
+ // End of fields protected by {allocation_mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
+
WasmCodeManager* wasm_code_manager_;
std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
@@ -443,7 +444,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
- WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const;
// Add a sample of all module sizes.
@@ -459,22 +459,30 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
private:
friend class NativeModule;
- void TryAllocate(size_t size, VirtualMemory*, void* hint = nullptr);
+ V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
+ void* hint = nullptr);
bool Commit(Address, size_t);
// Currently, we uncommit a whole module, so all we need is account
// for the freed memory size. We do that in FreeNativeModule.
// There's no separate Uncommit.
void FreeNativeModule(NativeModule*);
- void Free(VirtualMemory* mem);
void AssignRanges(Address start, Address end, NativeModule*);
+ void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_;
+ std::atomic<size_t> remaining_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {native_modules_mutex_}:
+
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
std::unordered_set<NativeModule*> native_modules_;
- std::atomic<size_t> remaining_uncommitted_code_space_;
+
+ // End of fields protected by {native_modules_mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};