summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-code-manager.h
blob: ffcc05fbcd1872c42d5468c6cbb94d6c05b5e76a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_

#include <functional>
#include <list>
#include <map>
#include <unordered_map>
#include <unordered_set>

#include "src/base/macros.h"
#include "src/builtins/builtins-definitions.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-features.h"

namespace v8 {
namespace internal {

struct CodeDesc;
class Code;

namespace wasm {

class NativeModule;
class WasmCodeManager;
class WasmMemoryTracker;
struct WasmModule;

struct AddressRange {
  Address start;
  Address end;

  AddressRange(Address s, Address e) : start(s), end(e) {
    DCHECK_LE(start, end);
    DCHECK_IMPLIES(start == kNullAddress, end == kNullAddress);
  }
  AddressRange() : AddressRange(kNullAddress, kNullAddress) {}

  size_t size() const { return static_cast<size_t>(end - start); }
  bool is_empty() const { return start == end; }
  operator bool() const { return start == kNullAddress; }
};

// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
 public:
  DisjointAllocationPool() = default;

  explicit DisjointAllocationPool(AddressRange range) : ranges_({range}) {}

  DisjointAllocationPool(DisjointAllocationPool&& other) = default;
  DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;

  // Merge the parameter range into this object while preserving ordering of the
  // ranges. The assumption is that the passed parameter is not intersecting
  // this object - for example, it was obtained from a previous Allocate.
  void Merge(AddressRange);

  // Allocate a contiguous range of size {size}. Return an empty pool on
  // failure.
  AddressRange Allocate(size_t size);

  bool IsEmpty() const { return ranges_.empty(); }
  const std::list<AddressRange>& ranges() const { return ranges_; }

 private:
  std::list<AddressRange> ranges_;

  DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};

class V8_EXPORT_PRIVATE WasmCode final {
 public:
  enum Kind {
    kFunction,
    kWasmToJsWrapper,
    kLazyStub,
    kRuntimeStub,
    kInterpreterEntry,
    kJumpTable
  };

  // Each runtime stub is identified by an id. This id is used to reference the
  // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
  enum RuntimeStubId {
#define DEF_ENUM(Name) k##Name,
#define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
    WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
#undef DEF_ENUM_TRAP
#undef DEF_ENUM
        kRuntimeStubCount
  };

  // kOther is used if we have WasmCode that is neither
  // liftoff- nor turbofan-compiled, i.e. if Kind is
  // not a kFunction.
  enum Tier : int8_t { kLiftoff, kTurbofan, kOther };

  Vector<byte> instructions() const { return instructions_; }
  Address instruction_start() const {
    return reinterpret_cast<Address>(instructions_.start());
  }
  Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
  Vector<const byte> source_positions() const {
    return source_position_table_.as_vector();
  }

  uint32_t index() const { return index_.ToChecked(); }
  // Anonymous functions are functions that don't carry an index.
  bool IsAnonymous() const { return index_.IsNothing(); }
  Kind kind() const { return kind_; }
  NativeModule* native_module() const { return native_module_; }
  Tier tier() const { return tier_; }
  Address constant_pool() const;
  size_t constant_pool_offset() const { return constant_pool_offset_; }
  size_t safepoint_table_offset() const { return safepoint_table_offset_; }
  size_t handler_table_offset() const { return handler_table_offset_; }
  uint32_t stack_slots() const { return stack_slots_; }
  bool is_liftoff() const { return tier_ == kLiftoff; }
  bool contains(Address pc) const {
    return reinterpret_cast<Address>(instructions_.start()) <= pc &&
           pc < reinterpret_cast<Address>(instructions_.end());
  }

  Vector<trap_handler::ProtectedInstructionData> protected_instructions()
      const {
    return protected_instructions_.as_vector();
  }

  void Validate() const;
  void Print(const char* name = nullptr) const;
  void Disassemble(const char* name, std::ostream& os,
                   Address current_pc = kNullAddress) const;

  static bool ShouldBeLogged(Isolate* isolate);
  void LogCode(Isolate* isolate) const;

  ~WasmCode();

  enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };

 private:
  friend class NativeModule;

  WasmCode(NativeModule* native_module, Maybe<uint32_t> index,
           Vector<byte> instructions, uint32_t stack_slots,
           size_t safepoint_table_offset, size_t handler_table_offset,
           size_t constant_pool_offset,
           OwnedVector<trap_handler::ProtectedInstructionData>
               protected_instructions,
           OwnedVector<const byte> reloc_info,
           OwnedVector<const byte> source_position_table, Kind kind, Tier tier)
      : instructions_(instructions),
        reloc_info_(std::move(reloc_info)),
        source_position_table_(std::move(source_position_table)),
        native_module_(native_module),
        index_(index),
        kind_(kind),
        constant_pool_offset_(constant_pool_offset),
        stack_slots_(stack_slots),
        safepoint_table_offset_(safepoint_table_offset),
        handler_table_offset_(handler_table_offset),
        protected_instructions_(std::move(protected_instructions)),
        tier_(tier) {
    DCHECK_LE(safepoint_table_offset, instructions.size());
    DCHECK_LE(constant_pool_offset, instructions.size());
    DCHECK_LE(handler_table_offset, instructions.size());
  }

  // Code objects that have been registered with the global trap handler within
  // this process, will have a {trap_handler_index} associated with them.
  size_t trap_handler_index() const;
  void set_trap_handler_index(size_t);
  bool HasTrapHandlerIndex() const;

  // Register protected instruction information with the trap handler. Sets
  // trap_handler_index.
  void RegisterTrapHandlerData();

  Vector<byte> instructions_;
  OwnedVector<const byte> reloc_info_;
  OwnedVector<const byte> source_position_table_;
  NativeModule* native_module_ = nullptr;
  Maybe<uint32_t> index_;
  Kind kind_;
  size_t constant_pool_offset_ = 0;
  uint32_t stack_slots_ = 0;
  // we care about safepoint data for wasm-to-js functions,
  // since there may be stack/register tagged values for large number
  // conversions.
  size_t safepoint_table_offset_ = 0;
  size_t handler_table_offset_ = 0;
  intptr_t trap_handler_index_ = -1;
  OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
  Tier tier_;

  DISALLOW_COPY_AND_ASSIGN(WasmCode);
};

// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);

class V8_EXPORT_PRIVATE NativeModule final {
 public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
  static constexpr bool kCanAllocateMoreMemory = false;
#else
  static constexpr bool kCanAllocateMoreMemory = true;
#endif

  // {AddCode} is thread safe w.r.t. other calls to {AddCode} or {AddCodeCopy},
  // i.e. it can be called concurrently from background threads.
  WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
                    size_t safepoint_table_offset, size_t handler_table_offset,
                    OwnedVector<trap_handler::ProtectedInstructionData>
                        protected_instructions,
                    OwnedVector<const byte> source_position_table,
                    WasmCode::Tier tier);

  WasmCode* AddDeserializedCode(
      uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
      size_t safepoint_table_offset, size_t handler_table_offset,
      size_t constant_pool_offset,
      OwnedVector<trap_handler::ProtectedInstructionData>
          protected_instructions,
      OwnedVector<const byte> reloc_info,
      OwnedVector<const byte> source_position_table, WasmCode::Tier tier);

  // A way to copy over JS-allocated code. This is because we compile
  // certain wrappers using a different pipeline.
  WasmCode* AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, uint32_t index);

  // Add an interpreter entry. For the same reason as AddCodeCopy, we
  // currently compile these using a different pipeline and we can't get a
  // CodeDesc here. When adding interpreter wrappers, we do not insert them in
  // the code_table, however, we let them self-identify as the {index} function.
  WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);

  // When starting lazy compilation, provide the WasmLazyCompile builtin by
  // calling SetLazyBuiltin. It will be copied into this NativeModule and the
  // jump table will be populated with that copy.
  void SetLazyBuiltin(Handle<Code> code);

  // Initializes all runtime stubs by copying them over from the JS-allocated
  // heap into this native module. It must be called exactly once per native
  // module before adding other WasmCode so that runtime stub ids can be
  // resolved during relocation.
  void SetRuntimeStubs(Isolate* isolate);

  // Makes the code available to the system (by entering it into the code table
  // and patching the jump table). Callers have to take care not to race with
  // threads executing the old code.
  void PublishCode(WasmCode* code);

  // Creates a snapshot of the current state of the code table. This is useful
  // to get a consistent view of the table (e.g. used by the serializer).
  std::vector<WasmCode*> SnapshotCodeTable() const;

  WasmCode* code(uint32_t index) const {
    DCHECK_LT(index, num_functions());
    DCHECK_LE(module_->num_imported_functions, index);
    return code_table_[index - module_->num_imported_functions];
  }

  bool has_code(uint32_t index) const { return code(index) != nullptr; }

  WasmCode* runtime_stub(WasmCode::RuntimeStubId index) const {
    DCHECK_LT(index, WasmCode::kRuntimeStubCount);
    WasmCode* code = runtime_stub_table_[index];
    DCHECK_NOT_NULL(code);
    return code;
  }

  Address jump_table_start() const {
    return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
  }

  ptrdiff_t jump_table_offset(uint32_t func_index) const {
    DCHECK_GE(func_index, num_imported_functions());
    return GetCallTargetForFunction(func_index) - jump_table_start();
  }

  bool is_jump_table_slot(Address address) const {
    return jump_table_->contains(address);
  }

  // Transition this module from code relying on trap handlers (i.e. without
  // explicit memory bounds checks) to code that does not require trap handlers
  // (i.e. code with explicit bounds checks).
  // This method must only be called if {use_trap_handler()} is true (it will be
  // false afterwards). All code in this {NativeModule} needs to be re-added
  // after calling this method.
  void DisableTrapHandler();

  // Returns the target to call for the given function (returns a jump table
  // slot within {jump_table_}).
  Address GetCallTargetForFunction(uint32_t func_index) const;

  // Reverse lookup from a given call target (i.e. a jump table slot as the
  // above {GetCallTargetForFunction} returns) to a function index.
  uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;

  bool SetExecutable(bool executable);

  // For cctests, where we build both WasmModule and the runtime objects
  // on the fly, and bypass the instance builder pipeline.
  void ReserveCodeTableForTesting(uint32_t max_functions);

  void LogWasmCodes(Isolate* isolate);

  CompilationState* compilation_state() { return compilation_state_.get(); }

  uint32_t num_functions() const {
    return module_->num_declared_functions + module_->num_imported_functions;
  }
  uint32_t num_imported_functions() const {
    return module_->num_imported_functions;
  }
  bool use_trap_handler() const { return use_trap_handler_; }
  void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
  bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
  Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
  void set_wire_bytes(OwnedVector<const byte> wire_bytes) {
    wire_bytes_ = std::move(wire_bytes);
  }
  const WasmModule* module() const { return module_.get(); }
  WasmCodeManager* code_manager() const { return wasm_code_manager_; }

  WasmCode* Lookup(Address) const;

  ~NativeModule();

  const WasmFeatures& enabled_features() const { return enabled_features_; }

 private:
  friend class WasmCode;
  friend class WasmCodeManager;
  friend class NativeModuleModificationScope;

  NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
               bool can_request_more, VirtualMemory* code_space,
               WasmCodeManager* code_manager,
               std::shared_ptr<const WasmModule> module, const ModuleEnv& env);

  WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
  Address AllocateForCode(size_t size);

  // Primitive for adding code to the native module. All code added to a native
  // module is owned by that module. Various callers get to decide on how the
  // code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
  // whether it has an index or is anonymous, etc.
  WasmCode* AddOwnedCode(Maybe<uint32_t> index, Vector<const byte> instructions,
                         uint32_t stack_slots, size_t safepoint_table_offset,
                         size_t handler_table_offset,
                         size_t constant_pool_offset,
                         OwnedVector<trap_handler::ProtectedInstructionData>,
                         OwnedVector<const byte> reloc_info,
                         OwnedVector<const byte> source_position_table,
                         WasmCode::Kind, WasmCode::Tier);

  WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);

  void PatchJumpTable(uint32_t func_index, Address target,
                      WasmCode::FlushICache);

  Vector<WasmCode*> code_table() const {
    return {code_table_.get(), module_->num_declared_functions};
  }
  void set_code(uint32_t index, WasmCode* code) {
    DCHECK_LT(index, num_functions());
    DCHECK_LE(module_->num_imported_functions, index);
    DCHECK_EQ(code->index(), index);
    code_table_[index - module_->num_imported_functions] = code;
  }

  // Features enabled for this module. We keep a copy of the features that
  // were enabled at the time of the creation of this native module,
  // to be consistent across asynchronous compilations later.
  const WasmFeatures enabled_features_;

  // TODO(clemensh): Make this a unique_ptr (requires refactoring
  // AsyncCompileJob).
  std::shared_ptr<const WasmModule> module_;

  // Holds all allocated code objects, is maintained to be in ascending order
  // according to the codes instruction start address to allow lookups.
  std::vector<std::unique_ptr<WasmCode>> owned_code_;

  std::unique_ptr<WasmCode* []> code_table_;

  OwnedVector<const byte> wire_bytes_;

  WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};

  // Jump table used to easily redirect wasm function calls.
  WasmCode* jump_table_ = nullptr;

  // The compilation state keeps track of compilation tasks for this module.
  // Note that its destructor blocks until all tasks are finished/aborted and
  // hence needs to be destructed first when this native module dies.
  std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;

  // This mutex protects concurrent calls to {AddCode} and {AddCodeCopy}.
  mutable base::Mutex allocation_mutex_;

  DisjointAllocationPool free_code_space_;
  DisjointAllocationPool allocated_code_space_;
  std::list<VirtualMemory> owned_code_space_;

  WasmCodeManager* wasm_code_manager_;
  std::atomic<size_t> committed_code_space_{0};
  int modification_scope_depth_ = 0;
  bool can_request_more_memory_;
  bool use_trap_handler_ = false;
  bool is_executable_ = false;
  bool lazy_compile_frozen_ = false;

  DISALLOW_COPY_AND_ASSIGN(NativeModule);
};

class V8_EXPORT_PRIVATE WasmCodeManager final {
 public:
  explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
                           size_t max_committed);
  // Create a new NativeModule. The caller is responsible for its
  // lifetime. The native module will be given some memory for code,
  // which will be page size aligned. The size of the initial memory
  // is determined with a heuristic based on the total size of wasm
  // code. The native module may later request more memory.
  // TODO(titzer): isolate is only required here for CompilationState.
  std::unique_ptr<NativeModule> NewNativeModule(
      Isolate* isolate, const WasmFeatures& enabled_features,
      size_t memory_estimate, bool can_request_more,
      std::shared_ptr<const WasmModule> module, const ModuleEnv& env);

  NativeModule* LookupNativeModule(Address pc) const;
  WasmCode* LookupCode(Address pc) const;
  WasmCode* GetCodeFromStartAddress(Address pc) const;
  size_t remaining_uncommitted_code_space() const;

  // Add a sample of all module sizes.
  void SampleModuleSizes(Isolate* isolate) const;

  // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
  // bias samples towards apps with high memory pressure. We should switch to
  // using sampling based on regular intervals independent of the GC.
  static void InstallSamplingGCCallback(Isolate* isolate);

  static size_t EstimateNativeModuleSize(const WasmModule* module);

 private:
  friend class NativeModule;

  void TryAllocate(size_t size, VirtualMemory*, void* hint = nullptr);
  bool Commit(Address, size_t);
  // Currently, we uncommit a whole module, so all we need is account
  // for the freed memory size. We do that in FreeNativeModule.
  // There's no separate Uncommit.

  void FreeNativeModule(NativeModule*);
  void Free(VirtualMemory* mem);
  void AssignRanges(Address start, Address end, NativeModule*);
  bool ShouldForceCriticalMemoryPressureNotification();

  WasmMemoryTracker* const memory_tracker_;
  mutable base::Mutex native_modules_mutex_;
  std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
  std::unordered_set<NativeModule*> native_modules_;
  std::atomic<size_t> remaining_uncommitted_code_space_;

  DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};

// Within the scope, the native_module is writable and not executable.
// At the scope's destruction, the native_module is executable and not writable.
// The states inside the scope and at the scope termination are irrespective of
// native_module's state when entering the scope.
// We currently mark the entire module's memory W^X:
//  - for AOT, that's as efficient as it can be.
//  - for Lazy, we don't have a heuristic for functions that may need patching,
//    and even if we did, the resulting set of pages may be fragmented.
//    Currently, we try and keep the number of syscalls low.
// -  similar argument for debug time.
class NativeModuleModificationScope final {
 public:
  explicit NativeModuleModificationScope(NativeModule* native_module);
  ~NativeModuleModificationScope();

 private:
  NativeModule* native_module_;
};

}  // namespace wasm
}  // namespace internal
}  // namespace v8

#endif  // V8_WASM_WASM_CODE_MANAGER_H_