summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/marking.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/marking.h')
-rw-r--r--deps/v8/src/heap/marking.h179
1 files changed, 163 insertions, 16 deletions
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index df73f1c5c1..ec5b06cde1 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -135,31 +135,36 @@ class V8_EXPORT_PRIVATE Bitmap {
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
return MarkBit(cell, mask);
}
+};
+template <AccessMode mode>
+class ConcurrentBitmap : public Bitmap {
+ public:
void Clear();
void MarkAllBits();
// Clears bits in the given cell. The mask specifies bits to clear: if a
// bit is set in the mask then the corresponding bit is cleared in the cell.
- template <AccessMode mode = AccessMode::NON_ATOMIC>
void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
// Sets bits in the given cell. The mask specifies bits to set: if a
// bit is set in the mask then the corresponding bit is set in the cell.
- template <AccessMode mode = AccessMode::NON_ATOMIC>
void SetBitsInCell(uint32_t cell_index, uint32_t mask);
- // Sets all bits in the range [start_index, end_index). The cells at the
- // boundary of the range are updated with atomic compare and swap operation.
- // The inner cells are updated with relaxed write.
+ // Sets all bits in the range [start_index, end_index). If the access is
+ // atomic, the cells at the boundary of the range are updated with atomic
+ // compare and swap operation. The inner cells are updated with relaxed write.
void SetRange(uint32_t start_index, uint32_t end_index);
- // Clears all bits in the range [start_index, end_index). The cells at the
- // boundary of the range are updated with atomic compare and swap operation.
- // The inner cells are updated with relaxed write.
+ // Clears all bits in the range [start_index, end_index). If the access is
+ // atomic, the cells at the boundary of the range are updated with atomic
+ // compare and swap operation. The inner cells are updated with relaxed write.
void ClearRange(uint32_t start_index, uint32_t end_index);
+ // The following methods are *not* safe to use in a concurrent context so they
+ // are not implemented for `AccessMode::ATOMIC`.
+
// Returns true if all bits in the range [start_index, end_index) are set.
bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
@@ -169,32 +174,174 @@ class V8_EXPORT_PRIVATE Bitmap {
void Print();
bool IsClean();
+
+ private:
+ // Clear all bits in the cell range [start_cell_index, end_cell_index). If the
+ // access is atomic then *still* use a relaxed memory ordering.
+ void ClearCellRangeRelaxed(uint32_t start_cell_index,
+ uint32_t end_cell_index);
+
+ // Set all bits in the cell range [start_cell_index, end_cell_index). If the
+ // access is atomic then *still* use a relaxed memory ordering.
+ void SetCellRangeRelaxed(uint32_t start_cell_index, uint32_t end_cell_index);
};
template <>
-inline void Bitmap::SetBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, 0);
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ cells()[i] = 0;
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, 0xffffffff);
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ cells()[i] = 0xffffffff;
+ }
+}
+
+template <AccessMode mode>
+inline void ConcurrentBitmap<mode>::Clear() {
+ ClearCellRangeRelaxed(0, CellsCount());
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // setting stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <AccessMode mode>
+inline void ConcurrentBitmap<mode>::MarkAllBits() {
+ SetCellRangeRelaxed(0, CellsCount());
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // setting stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
cells()[cell_index] |= mask;
}
template <>
-inline void Bitmap::SetBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
}
template <>
-inline void Bitmap::ClearBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
cells()[cell_index] &= ~mask;
}
template <>
-inline void Bitmap::ClearBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
}
+template <AccessMode mode>
+void ConcurrentBitmap<mode>::SetRange(uint32_t start_index,
+ uint32_t end_index) {
+ if (start_index >= end_index) return;
+ end_index--;
+
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 1s.
+ SetBitsInCell(start_cell_index, ~(start_index_mask - 1));
+ // Then fill all in between cells with 1s.
+ SetCellRangeRelaxed(start_cell_index + 1, end_cell_index);
+ // Finally, fill all bits until the end address in the last cell with 1s.
+ SetBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
+ } else {
+ SetBitsInCell(start_cell_index,
+ end_index_mask | (end_index_mask - start_index_mask));
+ }
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // setting stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <AccessMode mode>
+void ConcurrentBitmap<mode>::ClearRange(uint32_t start_index,
+ uint32_t end_index) {
+ if (start_index >= end_index) return;
+ end_index--;
+
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 0s.
+ ClearBitsInCell(start_cell_index, ~(start_index_mask - 1));
+ // Then fill all in between cells with 0s.
+ ClearCellRangeRelaxed(start_cell_index + 1, end_cell_index);
+ // Finally, set all bits until the end address in the last cell with 0s.
+ ClearBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
+ } else {
+ ClearBitsInCell(start_cell_index,
+ end_index_mask | (end_index_mask - start_index_mask));
+ }
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // clearing stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <>
+V8_EXPORT_PRIVATE bool
+ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
+ uint32_t start_index, uint32_t end_index);
+
+template <>
+V8_EXPORT_PRIVATE bool
+ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
+ uint32_t start_index, uint32_t end_index);
+
+template <>
+void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print();
+
+template <>
+V8_EXPORT_PRIVATE bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean();
+
class Marking : public AllStatic {
public:
// TODO(hpayer): The current mark bit operations use as default NON_ATOMIC