summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/concurrent-marking.h
blob: be2fc03d462d5a882b19dab109b39871c5b46852 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_CONCURRENT_MARKING_H_
#define V8_HEAP_CONCURRENT_MARKING_H_

#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
#include "src/init/v8.h"
#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"

namespace v8 {
namespace internal {

class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
struct WeakObjects;

struct MemoryChunkData {
  intptr_t live_bytes;
  std::unique_ptr<TypedSlots> typed_slots;
};

using MemoryChunkDataMap =
    std::unordered_map<MemoryChunk*, MemoryChunkData, MemoryChunk::Hasher>;

class V8_EXPORT_PRIVATE ConcurrentMarking {
 public:
  // When the scope is entered, the concurrent marking tasks
  // are preempted and are not looking at the heap objects, concurrent marking
  // is resumed when the scope is exited.
  class PauseScope {
   public:
    explicit PauseScope(ConcurrentMarking* concurrent_marking);
    ~PauseScope();

   private:
    ConcurrentMarking* const concurrent_marking_;
    const bool resume_on_exit_;
  };

  enum class StopRequest {
    // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
    PREEMPT_TASKS,
    // Wait for ongoing tasks to complete (and cancels unstarted tasks).
    COMPLETE_ONGOING_TASKS,
    // Wait for all scheduled tasks to complete (only use this in tests that
    // control the full stack -- otherwise tasks cancelled by the platform can
    // make this call hang).
    COMPLETE_TASKS_FOR_TESTING,
  };

  // TODO(gab): The only thing that prevents this being above 7 is
  // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
  // task 0, reserved for the main thread).
  static constexpr int kMaxTasks = 7;
  using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
  using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;

  ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
                    MarkingWorklist* on_hold, WeakObjects* weak_objects,
                    EmbedderTracingWorklist* embedder_objects);

  // Schedules asynchronous tasks to perform concurrent marking. Objects in the
  // heap should not be moved while these are active (can be stopped safely via
  // Stop() or PauseScope).
  void ScheduleTasks();

  // Stops concurrent marking per |stop_request|'s semantics. Returns true
  // if concurrent marking was in progress, false otherwise.
  bool Stop(StopRequest stop_request);

  void RescheduleTasksIfNeeded();
  // Flushes memory chunk data using the given marking state.
  void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
  // This function is called for a new space page that was cleared after
  // scavenge and is going to be re-used.
  void ClearMemoryChunkData(MemoryChunk* chunk);

  int TaskCount() { return task_count_; }

  // Checks if all threads are stopped.
  bool IsStopped();

  size_t TotalMarkedBytes();

  void set_ephemeron_marked(bool ephemeron_marked) {
    ephemeron_marked_.store(ephemeron_marked);
  }
  bool ephemeron_marked() { return ephemeron_marked_.load(); }

 private:
  struct TaskState {
    // The main thread sets this flag to true when it wants the concurrent
    // marker to give up the worker thread.
    std::atomic<bool> preemption_request;
    MemoryChunkDataMap memory_chunk_data;
    size_t marked_bytes = 0;
    unsigned mark_compact_epoch;
    bool is_forced_gc;
    char cache_line_padding[64];
  };
  class Task;
  void Run(int task_id, TaskState* task_state);
  Heap* const heap_;
  MarkingWorklist* const shared_;
  MarkingWorklist* const on_hold_;
  WeakObjects* const weak_objects_;
  EmbedderTracingWorklist* const embedder_objects_;
  TaskState task_state_[kMaxTasks + 1];
  std::atomic<size_t> total_marked_bytes_{0};
  std::atomic<bool> ephemeron_marked_{false};
  base::Mutex pending_lock_;
  base::ConditionVariable pending_condition_;
  int pending_task_count_ = 0;
  bool is_pending_[kMaxTasks + 1] = {};
  CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
  int task_count_ = 0;
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_CONCURRENT_MARKING_H_