diff options
author | Michaël Zasso <targos@protonmail.com> | 2019-08-16 11:32:46 +0200 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2019-08-19 09:25:23 +0200 |
commit | e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4 (patch) | |
tree | 6c6bed9804be9df6162b2483f0a56f371f66464d /deps/v8/src/snapshot | |
parent | ec16fdae540adaf710b1a86c620170b2880088f0 (diff) | |
download | android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.gz android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.bz2 android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.zip |
deps: update V8 to 7.7.299.4
PR-URL: https://github.com/nodejs/node/pull/28918
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src/snapshot')
22 files changed, 294 insertions, 230 deletions
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS index f55d5b57e5..2dec1c33db 100644 --- a/deps/v8/src/snapshot/OWNERS +++ b/deps/v8/src/snapshot/OWNERS @@ -1,5 +1,3 @@ -set noparent - delphick@chromium.org jgruber@chromium.org petermarshall@chromium.org diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc index b4e75a6c20..d7e208eac5 100644 --- a/deps/v8/src/snapshot/code-serializer.cc +++ b/deps/v8/src/snapshot/code-serializer.cc @@ -96,22 +96,22 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo( } bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) { - PagedSpace* read_only_space = isolate()->heap()->read_only_space(); - if (!read_only_space->Contains(obj)) return false; + if (!ReadOnlyHeap::Contains(obj)) return false; - // For objects in RO_SPACE, never serialize the object, but instead create a - // back reference that encodes the page number as the chunk_index and the - // offset within the page as the chunk_offset. + // For objects on the read-only heap, never serialize the object, but instead + // create a back reference that encodes the page number as the chunk_index and + // the offset within the page as the chunk_offset. Address address = obj.address(); Page* page = Page::FromAddress(address); uint32_t chunk_index = 0; + ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space(); for (Page* p : *read_only_space) { if (p == page) break; ++chunk_index; } uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address)); - SerializerReference back_reference = - SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset); + SerializerReference back_reference = SerializerReference::BackReference( + SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset); reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference); CHECK(SerializeBackReference(obj)); return true; diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc index 4fb600d1dd..0b96a5a050 100644 --- a/deps/v8/src/snapshot/deserializer-allocator.cc +++ b/deps/v8/src/snapshot/deserializer-allocator.cc @@ -20,8 +20,9 @@ namespace internal { // space allocation, we have to do an actual allocation when deserializing // each large object. Instead of tracking offset for back references, we // reference large objects by index. -Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) { - if (space == LO_SPACE) { +Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) { + const int space_number = static_cast<int>(space); + if (space == SnapshotSpace::kLargeObject) { AlwaysAllocateScope scope(heap_); // Note that we currently do not support deserialization of large code // objects. @@ -30,21 +31,21 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) { HeapObject obj = result.ToObjectChecked(); deserialized_large_objects_.push_back(obj); return obj.address(); - } else if (space == MAP_SPACE) { + } else if (space == SnapshotSpace::kMap) { DCHECK_EQ(Map::kSize, size); return allocated_maps_[next_map_index_++]; } else { - DCHECK_LT(space, kNumberOfPreallocatedSpaces); - Address address = high_water_[space]; + DCHECK(IsPreAllocatedSpace(space)); + Address address = high_water_[space_number]; DCHECK_NE(address, kNullAddress); - high_water_[space] += size; + high_water_[space_number] += size; #ifdef DEBUG // Assert that the current reserved chunk is still big enough. - const Heap::Reservation& reservation = reservations_[space]; - int chunk_index = current_chunk_[space]; - DCHECK_LE(high_water_[space], reservation[chunk_index].end); + const Heap::Reservation& reservation = reservations_[space_number]; + int chunk_index = current_chunk_[space_number]; + DCHECK_LE(high_water_[space_number], reservation[chunk_index].end); #endif - if (space == CODE_SPACE) + if (space == SnapshotSpace::kCode) MemoryChunk::FromAddress(address) ->GetCodeObjectRegistry() ->RegisterNewlyAllocatedCodeObject(address); @@ -52,7 +53,7 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) { } } -Address DeserializerAllocator::Allocate(AllocationSpace space, int size) { +Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) { Address address; HeapObject obj; @@ -75,16 +76,17 @@ Address DeserializerAllocator::Allocate(AllocationSpace space, int size) { } } -void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) { - DCHECK_LT(space, kNumberOfPreallocatedSpaces); - uint32_t chunk_index = current_chunk_[space]; - const Heap::Reservation& reservation = reservations_[space]; +void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) { + DCHECK(IsPreAllocatedSpace(space)); + const int space_number = static_cast<int>(space); + uint32_t chunk_index = current_chunk_[space_number]; + const Heap::Reservation& reservation = reservations_[space_number]; // Make sure the current chunk is indeed exhausted. - CHECK_EQ(reservation[chunk_index].end, high_water_[space]); + CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]); // Move to next reserved chunk. - chunk_index = ++current_chunk_[space]; + chunk_index = ++current_chunk_[space_number]; CHECK_LT(chunk_index, reservation.size()); - high_water_[space] = reservation[chunk_index].start; + high_water_[space_number] = reservation[chunk_index].start; } HeapObject DeserializerAllocator::GetMap(uint32_t index) { @@ -97,12 +99,14 @@ HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) { return deserialized_large_objects_[index]; } -HeapObject DeserializerAllocator::GetObject(AllocationSpace space, +HeapObject DeserializerAllocator::GetObject(SnapshotSpace space, uint32_t chunk_index, uint32_t chunk_offset) { - DCHECK_LT(space, kNumberOfPreallocatedSpaces); - DCHECK_LE(chunk_index, current_chunk_[space]); - Address address = reservations_[space][chunk_index].start + chunk_offset; + DCHECK(IsPreAllocatedSpace(space)); + const int space_number = static_cast<int>(space); + DCHECK_LE(chunk_index, current_chunk_[space_number]); + Address address = + reservations_[space_number][chunk_index].start + chunk_offset; if (next_alignment_ != kWordAligned) { int padding = Heap::GetFillToAlign(address, next_alignment_); next_alignment_ = kWordAligned; @@ -114,8 +118,8 @@ HeapObject DeserializerAllocator::GetObject(AllocationSpace space, void DeserializerAllocator::DecodeReservation( const std::vector<SerializedData::Reservation>& res) { - DCHECK_EQ(0, reservations_[FIRST_SPACE].size()); - int current_space = FIRST_SPACE; + DCHECK_EQ(0, reservations_[0].size()); + int current_space = 0; for (auto& r : res) { reservations_[current_space].push_back( {r.chunk_size(), kNullAddress, kNullAddress}); @@ -127,11 +131,13 @@ void DeserializerAllocator::DecodeReservation( bool DeserializerAllocator::ReserveSpace() { #ifdef DEBUG - for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) { + for (int i = 0; i < kNumberOfSpaces; ++i) { DCHECK_GT(reservations_[i].size(), 0); } #endif // DEBUG DCHECK(allocated_maps_.empty()); + // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once + // implemented. if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) { return false; } diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h index 27cacc79d5..18f9363cdf 100644 --- a/deps/v8/src/snapshot/deserializer-allocator.h +++ b/deps/v8/src/snapshot/deserializer-allocator.h @@ -25,9 +25,9 @@ class DeserializerAllocator final { // ------- Allocation Methods ------- // Methods related to memory allocation during deserialization. - Address Allocate(AllocationSpace space, int size); + Address Allocate(SnapshotSpace space, int size); - void MoveToNextChunk(AllocationSpace space); + void MoveToNextChunk(SnapshotSpace space); void SetAlignment(AllocationAlignment alignment) { DCHECK_EQ(kWordAligned, next_alignment_); DCHECK_LE(kWordAligned, alignment); @@ -51,7 +51,7 @@ class DeserializerAllocator final { HeapObject GetMap(uint32_t index); HeapObject GetLargeObject(uint32_t index); - HeapObject GetObject(AllocationSpace space, uint32_t chunk_index, + HeapObject GetObject(SnapshotSpace space, uint32_t chunk_index, uint32_t chunk_offset); // ------- Reservation Methods ------- @@ -69,13 +69,13 @@ class DeserializerAllocator final { private: // Raw allocation without considering alignment. - Address AllocateRaw(AllocationSpace space, int size); + Address AllocateRaw(SnapshotSpace space, int size); private: static constexpr int kNumberOfPreallocatedSpaces = - SerializerDeserializer::kNumberOfPreallocatedSpaces; + static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces); static constexpr int kNumberOfSpaces = - SerializerDeserializer::kNumberOfSpaces; + static_cast<int>(SnapshotSpace::kNumberOfSpaces); // The address of the next object that will be allocated in each space. // Each space has a number of chunks reserved by the GC, with each chunk diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc index 1fd590db26..25e32e2cc0 100644 --- a/deps/v8/src/snapshot/deserializer.cc +++ b/deps/v8/src/snapshot/deserializer.cc @@ -90,10 +90,10 @@ Deserializer::~Deserializer() { // process. It is also called on the body of each function. void Deserializer::VisitRootPointers(Root root, const char* description, FullObjectSlot start, FullObjectSlot end) { - // We are reading to a location outside of JS heap, so pass NEW_SPACE to - // avoid triggering write barriers. - ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), NEW_SPACE, - kNullAddress); + // We are reading to a location outside of JS heap, so pass kNew to avoid + // triggering write barriers. + ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), + SnapshotSpace::kNew, kNullAddress); } void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) { @@ -112,9 +112,10 @@ void Deserializer::DeserializeDeferredObjects() { break; } default: { - int space = code & kSpaceMask; - DCHECK_LE(space, kNumberOfSpaces); - DCHECK_EQ(code - space, kNewObject); + const int space_number = code & kSpaceMask; + DCHECK_LE(space_number, kNumberOfSpaces); + DCHECK_EQ(code - space_number, kNewObject); + SnapshotSpace space = static_cast<SnapshotSpace>(space_number); HeapObject object = GetBackReferencedObject(space); int size = source_.GetInt() << kTaggedSizeLog2; Address obj_address = object.address(); @@ -201,7 +202,8 @@ String ForwardStringIfExists(Isolate* isolate, StringTableInsertionKey* key) { } // namespace -HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) { +HeapObject Deserializer::PostProcessNewObject(HeapObject obj, + SnapshotSpace space) { if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) { if (obj.IsString()) { // Uninitialize hash field as we need to recompute the hash. @@ -209,7 +211,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) { string.set_hash_field(String::kEmptyHashField); // Rehash strings before read-only space is sealed. Strings outside // read-only space are rehashed lazily. (e.g. when rehashing dictionaries) - if (space == RO_SPACE) { + if (space == SnapshotSpace::kReadOnlyHeap) { to_rehash_.push_back(obj); } } else if (obj.NeedsRehashing()) { @@ -249,7 +251,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) { // We flush all code pages after deserializing the startup snapshot. // Hence we only remember each individual code object when deserializing // user code. - if (deserializing_user_code() || space == LO_SPACE) { + if (deserializing_user_code() || space == SnapshotSpace::kLargeObject) { new_code_objects_.push_back(Code::cast(obj)); } } else if (FLAG_trace_maps && obj.IsMap()) { @@ -326,16 +328,16 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) { return obj; } -HeapObject Deserializer::GetBackReferencedObject(int space) { +HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) { HeapObject obj; switch (space) { - case LO_SPACE: + case SnapshotSpace::kLargeObject: obj = allocator()->GetLargeObject(source_.GetInt()); break; - case MAP_SPACE: + case SnapshotSpace::kMap: obj = allocator()->GetMap(source_.GetInt()); break; - case RO_SPACE: { + case SnapshotSpace::kReadOnlyHeap: { uint32_t chunk_index = source_.GetInt(); uint32_t chunk_offset = source_.GetInt(); if (isolate()->heap()->deserialization_complete()) { @@ -347,16 +349,14 @@ HeapObject Deserializer::GetBackReferencedObject(int space) { Address address = page->OffsetToAddress(chunk_offset); obj = HeapObject::FromAddress(address); } else { - obj = allocator()->GetObject(static_cast<AllocationSpace>(space), - chunk_index, chunk_offset); + obj = allocator()->GetObject(space, chunk_index, chunk_offset); } break; } default: { uint32_t chunk_index = source_.GetInt(); uint32_t chunk_offset = source_.GetInt(); - obj = allocator()->GetObject(static_cast<AllocationSpace>(space), - chunk_index, chunk_offset); + obj = allocator()->GetObject(space, chunk_index, chunk_offset); break; } } @@ -372,49 +372,48 @@ HeapObject Deserializer::GetBackReferencedObject(int space) { HeapObject Deserializer::ReadObject() { MaybeObject object; - // We are reading to a location outside of JS heap, so pass NEW_SPACE to - // avoid triggering write barriers. + // We are reading to a location outside of JS heap, so pass kNew to avoid + // triggering write barriers. bool filled = ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1), - NEW_SPACE, kNullAddress); + SnapshotSpace::kNew, kNullAddress); CHECK(filled); return object.GetHeapObjectAssumeStrong(); } -HeapObject Deserializer::ReadObject(int space_number) { +HeapObject Deserializer::ReadObject(SnapshotSpace space) { const int size = source_.GetInt() << kObjectAlignmentBits; - Address address = - allocator()->Allocate(static_cast<AllocationSpace>(space_number), size); + Address address = allocator()->Allocate(space, size); HeapObject obj = HeapObject::FromAddress(address); isolate_->heap()->OnAllocationEvent(obj, size); MaybeObjectSlot current(address); MaybeObjectSlot limit(address + size); - if (ReadData(current, limit, space_number, address)) { + if (ReadData(current, limit, space, address)) { // Only post process if object content has not been deferred. - obj = PostProcessNewObject(obj, space_number); + obj = PostProcessNewObject(obj, space); } #ifdef DEBUG if (obj.IsCode()) { - DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE); + DCHECK_EQ(space, SnapshotSpace::kCode); } else { - DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE); + DCHECK_NE(space, SnapshotSpace::kCode); } #endif // DEBUG return obj; } -void Deserializer::ReadCodeObjectBody(int space_number, +void Deserializer::ReadCodeObjectBody(SnapshotSpace space, Address code_object_address) { // At this point the code object is already allocated, its map field is // initialized and its raw data fields and code stream are also read. // Now we read the rest of code header's fields. MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize); MaybeObjectSlot limit(code_object_address + Code::kDataStart); - bool filled = ReadData(current, limit, space_number, code_object_address); + bool filled = ReadData(current, limit, space, code_object_address); CHECK(filled); // Now iterate RelocInfos the same way it was done by the serialzier and @@ -517,21 +516,22 @@ static void NoExternalReferencesCallback() { } template <typename TSlot> -bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space, +bool Deserializer::ReadData(TSlot current, TSlot limit, + SnapshotSpace source_space, Address current_object_address) { Isolate* const isolate = isolate_; // Write barrier support costs around 1% in startup time. In fact there // are no new space objects in current boot snapshots, so it's not needed, // but that may change. - bool write_barrier_needed = - (current_object_address != kNullAddress && source_space != NEW_SPACE && - source_space != CODE_SPACE); + bool write_barrier_needed = (current_object_address != kNullAddress && + source_space != SnapshotSpace::kNew && + source_space != SnapshotSpace::kCode); while (current < limit) { byte data = source_.Get(); switch (data) { -#define CASE_STATEMENT(bytecode, space_number) \ - case bytecode + space_number: \ - STATIC_ASSERT((space_number & ~kSpaceMask) == 0); +#define CASE_STATEMENT(bytecode, snapshot_space) \ + case bytecode + static_cast<int>(snapshot_space): \ + STATIC_ASSERT((static_cast<int>(snapshot_space) & ~kSpaceMask) == 0); #define CASE_BODY(bytecode, space_number_if_any) \ current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \ @@ -541,18 +541,18 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space, // This generates a case and a body for the new space (which has to do extra // write barrier handling) and handles the other spaces with fall-through cases // and one body. -#define ALL_SPACES(bytecode) \ - CASE_STATEMENT(bytecode, NEW_SPACE) \ - CASE_BODY(bytecode, NEW_SPACE) \ - CASE_STATEMENT(bytecode, OLD_SPACE) \ - V8_FALLTHROUGH; \ - CASE_STATEMENT(bytecode, CODE_SPACE) \ - V8_FALLTHROUGH; \ - CASE_STATEMENT(bytecode, MAP_SPACE) \ - V8_FALLTHROUGH; \ - CASE_STATEMENT(bytecode, LO_SPACE) \ - V8_FALLTHROUGH; \ - CASE_STATEMENT(bytecode, RO_SPACE) \ +#define ALL_SPACES(bytecode) \ + CASE_STATEMENT(bytecode, SnapshotSpace::kNew) \ + CASE_BODY(bytecode, SnapshotSpace::kNew) \ + CASE_STATEMENT(bytecode, SnapshotSpace::kOld) \ + V8_FALLTHROUGH; \ + CASE_STATEMENT(bytecode, SnapshotSpace::kCode) \ + V8_FALLTHROUGH; \ + CASE_STATEMENT(bytecode, SnapshotSpace::kMap) \ + V8_FALLTHROUGH; \ + CASE_STATEMENT(bytecode, SnapshotSpace::kLargeObject) \ + V8_FALLTHROUGH; \ + CASE_STATEMENT(bytecode, SnapshotSpace::kReadOnlyHeap) \ CASE_BODY(bytecode, kAnyOldSpace) #define FOUR_CASES(byte_code) \ @@ -579,16 +579,16 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space, ALL_SPACES(kBackref) // Find an object in the roots array and write a pointer to it to the // current object. - SINGLE_CASE(kRootArray, RO_SPACE) + SINGLE_CASE(kRootArray, SnapshotSpace::kReadOnlyHeap) // Find an object in the partial snapshots cache and write a pointer to it // to the current object. - SINGLE_CASE(kPartialSnapshotCache, RO_SPACE) + SINGLE_CASE(kPartialSnapshotCache, SnapshotSpace::kReadOnlyHeap) // Find an object in the partial snapshots cache and write a pointer to it // to the current object. - SINGLE_CASE(kReadOnlyObjectCache, RO_SPACE) + SINGLE_CASE(kReadOnlyObjectCache, SnapshotSpace::kReadOnlyHeap) // Find an object in the attached references and write a pointer to it to // the current object. - SINGLE_CASE(kAttachedReference, RO_SPACE) + SINGLE_CASE(kAttachedReference, SnapshotSpace::kReadOnlyHeap) #undef CASE_STATEMENT #undef CASE_BODY @@ -614,7 +614,7 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space, case kNextChunk: { int space = source_.Get(); - allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space)); + allocator()->MoveToNextChunk(static_cast<SnapshotSpace>(space)); break; } @@ -791,13 +791,15 @@ Address Deserializer::ReadExternalReferenceCase() { } template <typename TSlot, SerializerDeserializer::Bytecode bytecode, - int space_number_if_any> + SnapshotSpace space_number_if_any> TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current, Address current_object_address, byte data, bool write_barrier_needed) { bool emit_write_barrier = false; - int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask) - : space_number_if_any; + SnapshotSpace space = static_cast<SnapshotSpace>( + space_number_if_any == kAnyOldSpace + ? static_cast<SnapshotSpace>(data & kSpaceMask) + : space_number_if_any); HeapObject heap_object; HeapObjectReferenceType reference_type = allocator()->GetAndClearNextReferenceIsWeak() @@ -805,11 +807,11 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current, : HeapObjectReferenceType::STRONG; if (bytecode == kNewObject) { - heap_object = ReadObject(space_number); - emit_write_barrier = (space_number == NEW_SPACE); + heap_object = ReadObject(space); + emit_write_barrier = (space == SnapshotSpace::kNew); } else if (bytecode == kBackref) { - heap_object = GetBackReferencedObject(space_number); - emit_write_barrier = (space_number == NEW_SPACE); + heap_object = GetBackReferencedObject(space); + emit_write_barrier = (space == SnapshotSpace::kNew); } else if (bytecode == kRootArray) { int id = source_.GetInt(); RootIndex root_index = static_cast<RootIndex>(id); @@ -819,8 +821,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current, } else if (bytecode == kReadOnlyObjectCache) { int cache_index = source_.GetInt(); heap_object = HeapObject::cast( - isolate->heap()->read_only_heap()->cached_read_only_object( - cache_index)); + isolate->read_only_heap()->cached_read_only_object(cache_index)); DCHECK(!Heap::InYoungGeneration(heap_object)); emit_write_barrier = false; } else if (bytecode == kPartialSnapshotCache) { diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h index 6e3f497d38..8dce1b3f3f 100644 --- a/deps/v8/src/snapshot/deserializer.h +++ b/deps/v8/src/snapshot/deserializer.h @@ -5,6 +5,7 @@ #ifndef V8_SNAPSHOT_DESERIALIZER_H_ #define V8_SNAPSHOT_DESERIALIZER_H_ +#include <utility> #include <vector> #include "src/objects/allocation-site.h" @@ -39,6 +40,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { ~Deserializer() override; void SetRehashability(bool v) { can_rehash_ = v; } + std::pair<uint32_t, uint32_t> GetChecksum() const { + return source_.GetChecksum(); + } protected: // Create a deserializer from a snapshot byte source. @@ -65,7 +69,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { // This returns the address of an object that has been described in the // snapshot by chunk index and offset. - HeapObject GetBackReferencedObject(int space); + HeapObject GetBackReferencedObject(SnapshotSpace space); // Add an object to back an attached reference. The order to add objects must // mirror the order they are added in the serializer. @@ -122,11 +126,13 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { // object, i.e. if we are writing a series of tagged values that are not on // the heap. Return false if the object content has been deferred. template <typename TSlot> - bool ReadData(TSlot start, TSlot end, int space, Address object_address); + bool ReadData(TSlot start, TSlot end, SnapshotSpace space, + Address object_address); // A helper function for ReadData, templatized on the bytecode for efficiency. // Returns the new value of {current}. - template <typename TSlot, Bytecode bytecode, int space_number_if_any> + template <typename TSlot, Bytecode bytecode, + SnapshotSpace space_number_if_any> inline TSlot ReadDataCase(Isolate* isolate, TSlot current, Address current_object_address, byte data, bool write_barrier_needed); @@ -135,8 +141,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { inline Address ReadExternalReferenceCase(); HeapObject ReadObject(); - HeapObject ReadObject(int space_number); - void ReadCodeObjectBody(int space_number, Address code_object_address); + HeapObject ReadObject(SnapshotSpace space_number); + void ReadCodeObjectBody(SnapshotSpace space_number, + Address code_object_address); public: void VisitCodeTarget(Code host, RelocInfo* rinfo); @@ -151,7 +158,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { TSlot ReadRepeatedObject(TSlot current, int repeat_count); // Special handling for serialized code like hooking up internalized strings. - HeapObject PostProcessNewObject(HeapObject obj, int space); + HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space); // Objects from the attached object descriptions in the serialized user code. std::vector<Handle<HeapObject>> attached_objects_; diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc index 4cee1ac131..f4183b4b87 100644 --- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc +++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc @@ -127,7 +127,15 @@ void PlatformEmbeddedFileWriterGeneric::DeclareExternalFilename( fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str()); } -void PlatformEmbeddedFileWriterGeneric::FileEpilogue() {} +void PlatformEmbeddedFileWriterGeneric::FileEpilogue() { + // Omitting this section can imply an executable stack, which is usually + // a linker warning/error. C++ compilers add these automatically, but + // compiling assembly requires the .note.GNU-stack section to be inserted + // manually. + // Additional documentation: + // https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart + fprintf(fp_, ".section .note.GNU-stack,\"\",%%progbits\n"); +} int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective( DataDirective directive) { diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc index d0758cb42c..69457e11a5 100644 --- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc +++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc @@ -11,7 +11,6 @@ #if defined(V8_OS_WIN_X64) #include "src/builtins/builtins.h" #include "src/diagnostics/unwinding-info-win64.h" -#include "src/objects/objects-inl.h" #include "src/snapshot/embedded/embedded-data.h" #endif @@ -570,11 +569,7 @@ void PlatformEmbeddedFileWriterWin::DeclareExternalFilename( // Replace any Windows style paths (backslashes) with forward // slashes. std::string fixed_filename(filename); - for (auto& c : fixed_filename) { - if (c == '\\') { - c = '/'; - } - } + std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/'); fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str()); } diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc index 6bf198230f..819f7009c2 100644 --- a/deps/v8/src/snapshot/mksnapshot.cc +++ b/deps/v8/src/snapshot/mksnapshot.cc @@ -259,12 +259,14 @@ int main(int argc, char** argv) { // Set code range such that relative jumps for builtins to // builtin calls in the snapshot are possible. i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); - size_t code_range_size = + size_t code_range_size_mb = i::kMaximalCodeRangeSize == 0 ? i::kMaxPCRelativeCodeRangeInMB : std::min(i::kMaximalCodeRangeSize / i::MB, i::kMaxPCRelativeCodeRangeInMB); - i_isolate->heap()->ConfigureHeap(0, 0, code_range_size); + v8::ResourceConstraints constraints; + constraints.set_code_range_size_in_bytes(code_range_size_mb * i::MB); + i_isolate->heap()->ConfigureHeap(constraints); // The isolate contains data from builtin compilation that needs // to be written out if builtins are embedded. i_isolate->RegisterEmbeddedFileWriter(&embedded_writer); diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h index f294d33b5c..ea2136007b 100644 --- a/deps/v8/src/snapshot/natives.h +++ b/deps/v8/src/snapshot/natives.h @@ -16,7 +16,6 @@ namespace internal { enum NativeType { EXTRAS, - TEST }; // Extra handling for V8_EXPORT_PRIVATE in combination with USING_V8_SHARED diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc index 9b56f129df..22854bf14a 100644 --- a/deps/v8/src/snapshot/partial-deserializer.cc +++ b/deps/v8/src/snapshot/partial-deserializer.cc @@ -73,7 +73,8 @@ void PartialDeserializer::DeserializeEmbedderFields( int space = code & kSpaceMask; DCHECK_LE(space, kNumberOfSpaces); DCHECK_EQ(code - space, kNewObject); - Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)), + Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject( + static_cast<SnapshotSpace>(space))), isolate()); int index = source()->GetInt(); int size = source()->GetInt(); diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc index 036f0a0414..7b4ffbb2bf 100644 --- a/deps/v8/src/snapshot/partial-serializer.cc +++ b/deps/v8/src/snapshot/partial-serializer.cc @@ -214,7 +214,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) { if (DataIsEmpty(data)) continue; // Restore original values from cleared fields. EmbedderDataSlot(js_obj, i).store_raw(original_embedder_values[i], no_gc); - embedder_fields_sink_.Put(kNewObject + reference.space(), + embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()), "embedder field holder"); embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex"); embedder_fields_sink_.PutInt(reference.chunk_offset(), diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc index 576e644846..5ac5a6444a 100644 --- a/deps/v8/src/snapshot/read-only-deserializer.cc +++ b/deps/v8/src/snapshot/read-only-deserializer.cc @@ -21,7 +21,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) { V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer"); } - ReadOnlyHeap* ro_heap = isolate->heap()->read_only_heap(); + ReadOnlyHeap* ro_heap = isolate->read_only_heap(); // No active threads. DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse()); diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc index f4b45a15cc..4ddaf37773 100644 --- a/deps/v8/src/snapshot/read-only-serializer.cc +++ b/deps/v8/src/snapshot/read-only-serializer.cc @@ -67,7 +67,7 @@ void ReadOnlySerializer::FinalizeSerialization() { #ifdef DEBUG // Check that every object on read-only heap is reachable (and was // serialized). - ReadOnlyHeapIterator iterator(isolate()->heap()->read_only_heap()); + ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap()); for (HeapObject object = iterator.Next(); !object.is_null(); object = iterator.Next()) { CHECK(serialized_objects_.count(object)); diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h index e7c44236ac..c81e9a1e21 100644 --- a/deps/v8/src/snapshot/references.h +++ b/deps/v8/src/snapshot/references.h @@ -12,6 +12,30 @@ namespace v8 { namespace internal { +// TODO(goszczycki): Move this somewhere every file in src/snapshot can use it. +// The spaces suported by the serializer. Spaces after LO_SPACE (NEW_LO_SPACE +// and CODE_LO_SPACE) are not supported. +enum class SnapshotSpace { + kReadOnlyHeap = RO_SPACE, + kNew = NEW_SPACE, + kOld = OLD_SPACE, + kCode = CODE_SPACE, + kMap = MAP_SPACE, + kLargeObject = LO_SPACE, + kNumberOfPreallocatedSpaces = kCode + 1, + kNumberOfSpaces = kLargeObject + 1, + kSpecialValueSpace = kNumberOfSpaces, + // Number of spaces which should be allocated by the heap. Eventually + // kReadOnlyHeap will move to the end of this enum and this will be equal to + // it. + kNumberOfHeapSpaces = kNumberOfSpaces, +}; + +constexpr bool IsPreAllocatedSpace(SnapshotSpace space) { + return static_cast<int>(space) < + static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces); +} + class SerializerReference { private: enum SpecialValueType { @@ -21,33 +45,32 @@ class SerializerReference { kBuiltinReference, }; - static const int kSpecialValueSpace = LAST_SPACE + 1; - STATIC_ASSERT(kSpecialValueSpace < (1 << kSpaceTagSize)); + STATIC_ASSERT(static_cast<int>(SnapshotSpace::kSpecialValueSpace) < + (1 << kSpaceTagSize)); SerializerReference(SpecialValueType type, uint32_t value) - : bitfield_(SpaceBits::encode(kSpecialValueSpace) | + : bitfield_(SpaceBits::encode(SnapshotSpace::kSpecialValueSpace) | SpecialValueTypeBits::encode(type)), value_(value) {} public: SerializerReference() : SerializerReference(kInvalidValue, 0) {} - SerializerReference(uint32_t space, uint32_t chunk_index, + SerializerReference(SnapshotSpace space, uint32_t chunk_index, uint32_t chunk_offset) : bitfield_(SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index)), value_(chunk_offset) {} - static SerializerReference BackReference(AllocationSpace space, + static SerializerReference BackReference(SnapshotSpace space, uint32_t chunk_index, uint32_t chunk_offset) { DCHECK(IsAligned(chunk_offset, kObjectAlignment)); - DCHECK_LT(space, LO_SPACE); return SerializerReference(space, chunk_index, chunk_offset); } static SerializerReference MapReference(uint32_t index) { - return SerializerReference(MAP_SPACE, 0, index); + return SerializerReference(SnapshotSpace::kMap, 0, index); } static SerializerReference OffHeapBackingStoreReference(uint32_t index) { @@ -55,7 +78,7 @@ class SerializerReference { } static SerializerReference LargeObjectReference(uint32_t index) { - return SerializerReference(LO_SPACE, 0, index); + return SerializerReference(SnapshotSpace::kLargeObject, 0, index); } static SerializerReference AttachedReference(uint32_t index) { @@ -67,17 +90,17 @@ class SerializerReference { } bool is_valid() const { - return SpaceBits::decode(bitfield_) != kSpecialValueSpace || + return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace || SpecialValueTypeBits::decode(bitfield_) != kInvalidValue; } bool is_back_reference() const { - return SpaceBits::decode(bitfield_) <= LAST_SPACE; + return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace; } - AllocationSpace space() const { + SnapshotSpace space() const { DCHECK(is_back_reference()); - return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_)); + return SpaceBits::decode(bitfield_); } uint32_t chunk_offset() const { @@ -86,17 +109,17 @@ class SerializerReference { } uint32_t chunk_index() const { - DCHECK(space() != MAP_SPACE && space() != LO_SPACE); + DCHECK(IsPreAllocatedSpace(space())); return ChunkIndexBits::decode(bitfield_); } uint32_t map_index() const { - DCHECK_EQ(MAP_SPACE, SpaceBits::decode(bitfield_)); + DCHECK_EQ(SnapshotSpace::kMap, SpaceBits::decode(bitfield_)); return value_; } bool is_off_heap_backing_store_reference() const { - return SpaceBits::decode(bitfield_) == kSpecialValueSpace && + return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace && SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore; } @@ -106,12 +129,12 @@ class SerializerReference { } uint32_t large_object_index() const { - DCHECK_EQ(LO_SPACE, SpaceBits::decode(bitfield_)); + DCHECK_EQ(SnapshotSpace::kLargeObject, SpaceBits::decode(bitfield_)); return value_; } bool is_attached_reference() const { - return SpaceBits::decode(bitfield_) == kSpecialValueSpace && + return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace && SpecialValueTypeBits::decode(bitfield_) == kAttachedReference; } @@ -121,7 +144,7 @@ class SerializerReference { } bool is_builtin_reference() const { - return SpaceBits::decode(bitfield_) == kSpecialValueSpace && + return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace && SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference; } @@ -131,7 +154,7 @@ class SerializerReference { } private: - class SpaceBits : public BitField<int, 0, kSpaceTagSize> {}; + class SpaceBits : public BitField<SnapshotSpace, 0, kSpaceTagSize> {}; class ChunkIndexBits : public BitField<uint32_t, SpaceBits::kNext, 32 - kSpaceTagSize> {}; class SpecialValueTypeBits diff --git a/deps/v8/src/snapshot/serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc index 763244137f..a709715bdd 100644 --- a/deps/v8/src/snapshot/serializer-allocator.cc +++ b/deps/v8/src/snapshot/serializer-allocator.cc @@ -23,42 +23,42 @@ void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) { custom_chunk_size_ = chunk_size; } -static uint32_t PageSizeOfSpace(int space) { +static uint32_t PageSizeOfSpace(SnapshotSpace space) { return static_cast<uint32_t>( MemoryChunkLayout::AllocatableMemoryInMemoryChunk( static_cast<AllocationSpace>(space))); } -uint32_t SerializerAllocator::TargetChunkSize(int space) { +uint32_t SerializerAllocator::TargetChunkSize(SnapshotSpace space) { if (custom_chunk_size_ == 0) return PageSizeOfSpace(space); DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space)); return custom_chunk_size_; } -SerializerReference SerializerAllocator::Allocate(AllocationSpace space, +SerializerReference SerializerAllocator::Allocate(SnapshotSpace space, uint32_t size) { - DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces); + const int space_number = static_cast<int>(space); + DCHECK(IsPreAllocatedSpace(space)); DCHECK(size > 0 && size <= PageSizeOfSpace(space)); // Maps are allocated through AllocateMap. - DCHECK_NE(MAP_SPACE, space); - // We tenure large object allocations. - DCHECK_NE(NEW_LO_SPACE, space); + DCHECK_NE(SnapshotSpace::kMap, space); - uint32_t old_chunk_size = pending_chunk_[space]; + uint32_t old_chunk_size = pending_chunk_[space_number]; uint32_t new_chunk_size = old_chunk_size + size; // Start a new chunk if the new size exceeds the target chunk size. // We may exceed the target chunk size if the single object size does. if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) { serializer_->PutNextChunk(space); - completed_chunks_[space].push_back(pending_chunk_[space]); - pending_chunk_[space] = 0; + completed_chunks_[space_number].push_back(pending_chunk_[space_number]); + pending_chunk_[space_number] = 0; new_chunk_size = size; } - uint32_t offset = pending_chunk_[space]; - pending_chunk_[space] = new_chunk_size; + uint32_t offset = pending_chunk_[space_number]; + pending_chunk_[space_number] = new_chunk_size; return SerializerReference::BackReference( - space, static_cast<uint32_t>(completed_chunks_[space].size()), offset); + space, static_cast<uint32_t>(completed_chunks_[space_number].size()), + offset); } SerializerReference SerializerAllocator::AllocateMap() { @@ -83,23 +83,25 @@ SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() { bool SerializerAllocator::BackReferenceIsAlreadyAllocated( SerializerReference reference) const { DCHECK(reference.is_back_reference()); - AllocationSpace space = reference.space(); - if (space == LO_SPACE) { + SnapshotSpace space = reference.space(); + if (space == SnapshotSpace::kLargeObject) { return reference.large_object_index() < seen_large_objects_index_; - } else if (space == MAP_SPACE) { + } else if (space == SnapshotSpace::kMap) { return reference.map_index() < num_maps_; - } else if (space == RO_SPACE && + } else if (space == SnapshotSpace::kReadOnlyHeap && serializer_->isolate()->heap()->deserialization_complete()) { // If not deserializing the isolate itself, then we create BackReferences - // for all RO_SPACE objects without ever allocating. + // for all read-only heap objects without ever allocating. return true; } else { + const int space_number = static_cast<int>(space); size_t chunk_index = reference.chunk_index(); - if (chunk_index == completed_chunks_[space].size()) { - return reference.chunk_offset() < pending_chunk_[space]; + if (chunk_index == completed_chunks_[space_number].size()) { + return reference.chunk_offset() < pending_chunk_[space_number]; } else { - return chunk_index < completed_chunks_[space].size() && - reference.chunk_offset() < completed_chunks_[space][chunk_index]; + return chunk_index < completed_chunks_[space_number].size() && + reference.chunk_offset() < + completed_chunks_[space_number][chunk_index]; } } } @@ -109,7 +111,7 @@ std::vector<SerializedData::Reservation> SerializerAllocator::EncodeReservations() const { std::vector<SerializedData::Reservation> out; - for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) { + for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { for (size_t j = 0; j < completed_chunks_[i].size(); j++) { out.emplace_back(completed_chunks_[i][j]); } @@ -120,11 +122,14 @@ SerializerAllocator::EncodeReservations() const { out.back().mark_as_last(); } - STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces); + STATIC_ASSERT(SnapshotSpace::kMap == + SnapshotSpace::kNumberOfPreallocatedSpaces); out.emplace_back(num_maps_ * Map::kSize); out.back().mark_as_last(); - STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1); + STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) == + static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) + + 1); out.emplace_back(large_objects_total_size_); out.back().mark_as_last(); @@ -136,21 +141,24 @@ void SerializerAllocator::OutputStatistics() { PrintF(" Spaces (bytes):\n"); - for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) { + for (int space = 0; space < kNumberOfSpaces; space++) { PrintF("%16s", Heap::GetSpaceName(static_cast<AllocationSpace>(space))); } PrintF("\n"); - for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) { + for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) { size_t s = pending_chunk_[space]; for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size; PrintF("%16zu", s); } - STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces); + STATIC_ASSERT(SnapshotSpace::kMap == + SnapshotSpace::kNumberOfPreallocatedSpaces); PrintF("%16d", num_maps_ * Map::kSize); - STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1); + STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) == + static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) + + 1); PrintF("%16d\n", large_objects_total_size_); } diff --git a/deps/v8/src/snapshot/serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h index 0ca968f0fe..0d15c5a91b 100644 --- a/deps/v8/src/snapshot/serializer-allocator.h +++ b/deps/v8/src/snapshot/serializer-allocator.h @@ -16,7 +16,7 @@ class SerializerAllocator final { public: explicit SerializerAllocator(Serializer* serializer); - SerializerReference Allocate(AllocationSpace space, uint32_t size); + SerializerReference Allocate(SnapshotSpace space, uint32_t size); SerializerReference AllocateMap(); SerializerReference AllocateLargeObject(uint32_t size); SerializerReference AllocateOffHeapBackingStore(); @@ -35,12 +35,12 @@ class SerializerAllocator final { private: // We try to not exceed this size for every chunk. We will not succeed for // larger objects though. - uint32_t TargetChunkSize(int space); + uint32_t TargetChunkSize(SnapshotSpace space); static constexpr int kNumberOfPreallocatedSpaces = - SerializerDeserializer::kNumberOfPreallocatedSpaces; + static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces); static constexpr int kNumberOfSpaces = - SerializerDeserializer::kNumberOfSpaces; + static_cast<int>(SnapshotSpace::kNumberOfSpaces); // Objects from the same space are put into chunks for bulk-allocation // when deserializing. We have to make sure that each chunk fits into a diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h index 30da8db662..c845a089a3 100644 --- a/deps/v8/src/snapshot/serializer-common.h +++ b/deps/v8/src/snapshot/serializer-common.h @@ -6,9 +6,9 @@ #define V8_SNAPSHOT_SERIALIZER_COMMON_H_ #include "src/base/bits.h" +#include "src/base/memory.h" #include "src/codegen/external-reference-table.h" #include "src/common/globals.h" -#include "src/common/v8memory.h" #include "src/objects/visitors.h" #include "src/sanitizer/msan.h" #include "src/snapshot/references.h" @@ -102,19 +102,6 @@ class SerializerDeserializer : public RootVisitor { public: static void Iterate(Isolate* isolate, RootVisitor* visitor); - // No reservation for large object space necessary. - // We also handle map space differenly. - STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1); - - // We do not support young generation large objects and large code objects. - STATIC_ASSERT(LAST_SPACE == NEW_LO_SPACE); - STATIC_ASSERT(LAST_SPACE - 2 == LO_SPACE); - static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1; - - // The number of spaces supported by the serializer. Spaces after LO_SPACE - // (NEW_LO_SPACE and CODE_LO_SPACE) are not supported. - static const int kNumberOfSpaces = LO_SPACE + 1; - protected: static bool CanBeDeferred(HeapObject o); @@ -123,6 +110,12 @@ class SerializerDeserializer : public RootVisitor { void RestoreExternalReferenceRedirectors( const std::vector<CallHandlerInfo>& call_handler_infos); + static const int kNumberOfPreallocatedSpaces = + static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces); + + static const int kNumberOfSpaces = + static_cast<int>(SnapshotSpace::kNumberOfSpaces); + // clang-format off #define UNUSED_SERIALIZER_BYTE_CODES(V) \ V(0x06) V(0x07) V(0x0e) V(0x0f) \ @@ -259,7 +252,7 @@ class SerializerDeserializer : public RootVisitor { // // Some other constants. // - static const int kAnyOldSpace = -1; + static const SnapshotSpace kAnyOldSpace = SnapshotSpace::kNumberOfSpaces; // Sentinel after a new object to indicate that double alignment is needed. static const int kDoubleAlignmentSentinel = 0; @@ -344,12 +337,13 @@ class SerializedData { protected: void SetHeaderValue(uint32_t offset, uint32_t value) { - WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset, value); + base::WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset, + value); } uint32_t GetHeaderValue(uint32_t offset) const { - return ReadLittleEndianValue<uint32_t>(reinterpret_cast<Address>(data_) + - offset); + return base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(data_) + offset); } void AllocateData(uint32_t size); diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc index b2dd6a33e7..5b68aaa87b 100644 --- a/deps/v8/src/snapshot/serializer.cc +++ b/deps/v8/src/snapshot/serializer.cc @@ -27,7 +27,7 @@ Serializer::Serializer(Isolate* isolate) allocator_(this) { #ifdef OBJECT_PRINT if (FLAG_serialization_statistics) { - for (int space = 0; space < LAST_SPACE; ++space) { + for (int space = 0; space < kNumberOfSpaces; ++space) { instance_type_count_[space] = NewArray<int>(kInstanceTypes); instance_type_size_[space] = NewArray<size_t>(kInstanceTypes); for (int i = 0; i < kInstanceTypes; i++) { @@ -36,7 +36,7 @@ Serializer::Serializer(Isolate* isolate) } } } else { - for (int space = 0; space < LAST_SPACE; ++space) { + for (int space = 0; space < kNumberOfSpaces; ++space) { instance_type_count_[space] = nullptr; instance_type_size_[space] = nullptr; } @@ -47,7 +47,7 @@ Serializer::Serializer(Isolate* isolate) Serializer::~Serializer() { if (code_address_map_ != nullptr) delete code_address_map_; #ifdef OBJECT_PRINT - for (int space = 0; space < LAST_SPACE; ++space) { + for (int space = 0; space < kNumberOfSpaces; ++space) { if (instance_type_count_[space] != nullptr) { DeleteArray(instance_type_count_[space]); DeleteArray(instance_type_size_[space]); @@ -57,10 +57,11 @@ Serializer::~Serializer() { } #ifdef OBJECT_PRINT -void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) { +void Serializer::CountInstanceType(Map map, int size, SnapshotSpace space) { + const int space_number = static_cast<int>(space); int instance_type = map.instance_type(); - instance_type_count_[space][instance_type]++; - instance_type_size_[space][instance_type] += size; + instance_type_count_[space_number][instance_type]++; + instance_type_size_[space_number][instance_type] += size; } #endif // OBJECT_PRINT @@ -73,7 +74,7 @@ void Serializer::OutputStatistics(const char* name) { #ifdef OBJECT_PRINT PrintF(" Instance types (count and bytes):\n"); #define PRINT_INSTANCE_TYPE(Name) \ - for (int space = 0; space < LAST_SPACE; ++space) { \ + for (int space = 0; space < kNumberOfSpaces; ++space) { \ if (instance_type_count_[space][Name]) { \ PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \ instance_type_size_[space][Name], \ @@ -173,8 +174,8 @@ bool Serializer::SerializeBackReference(HeapObject obj) { } PutAlignmentPrefix(obj); - AllocationSpace space = reference.space(); - sink_.Put(kBackref + space, "BackRef"); + SnapshotSpace space = reference.space(); + sink_.Put(kBackref + static_cast<int>(space), "BackRef"); PutBackReference(obj, reference); } return true; @@ -221,11 +222,11 @@ void Serializer::PutBackReference(HeapObject object, SerializerReference reference) { DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference)); switch (reference.space()) { - case MAP_SPACE: + case SnapshotSpace::kMap: sink_.PutInt(reference.map_index(), "BackRefMapIndex"); break; - case LO_SPACE: + case SnapshotSpace::kLargeObject: sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex"); break; @@ -255,9 +256,9 @@ int Serializer::PutAlignmentPrefix(HeapObject object) { return 0; } -void Serializer::PutNextChunk(int space) { +void Serializer::PutNextChunk(SnapshotSpace space) { sink_.Put(kNextChunk, "NextChunk"); - sink_.Put(space, "NextChunkSpace"); + sink_.Put(static_cast<int>(space), "NextChunkSpace"); } void Serializer::PutRepeat(int repeat_count) { @@ -298,7 +299,7 @@ Code Serializer::CopyCode(Code code) { reinterpret_cast<Address>(&code_buffer_.front()))); } -void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space, +void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space, int size, Map map) { if (serializer_->code_address_map_) { const char* code_name = @@ -307,22 +308,23 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space, CodeNameEvent(object_.address(), sink_->Position(), code_name)); } + const int space_number = static_cast<int>(space); SerializerReference back_reference; - if (space == LO_SPACE) { - sink_->Put(kNewObject + space, "NewLargeObject"); + if (space == SnapshotSpace::kLargeObject) { + sink_->Put(kNewObject + space_number, "NewLargeObject"); sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords"); CHECK(!object_.IsCode()); back_reference = serializer_->allocator()->AllocateLargeObject(size); - } else if (space == MAP_SPACE) { + } else if (space == SnapshotSpace::kMap) { DCHECK_EQ(Map::kSize, size); back_reference = serializer_->allocator()->AllocateMap(); - sink_->Put(kNewObject + space, "NewMap"); + sink_->Put(kNewObject + space_number, "NewMap"); // This is redundant, but we include it anyways. sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords"); } else { int fill = serializer_->PutAlignmentPrefix(object_); back_reference = serializer_->allocator()->Allocate(space, size + fill); - sink_->Put(kNewObject + space, "NewObject"); + sink_->Put(kNewObject + space_number, "NewObject"); sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords"); } @@ -468,8 +470,9 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() { ExternalTwoByteString::cast(string).resource()->data()); } - AllocationSpace space = - (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE; + SnapshotSpace space = (allocation_size > kMaxRegularHeapObjectSize) + ? SnapshotSpace::kLargeObject + : SnapshotSpace::kOld; SerializePrologue(space, allocation_size, map); // Output the rest of the imaginary string. @@ -534,8 +537,8 @@ void Serializer::ObjectSerializer::Serialize() { SerializeExternalString(); return; } else if (!ReadOnlyHeap::Contains(object_)) { - // Only clear padding for strings outside RO_SPACE. RO_SPACE should have - // been cleared elsewhere. + // Only clear padding for strings outside the read-only heap. Read-only heap + // should have been cleared elsewhere. if (object_.IsSeqOneByteString()) { // Clear padding bytes at the end. Done here to avoid having to do this // at allocation sites in generated code. @@ -568,11 +571,21 @@ void Serializer::ObjectSerializer::Serialize() { void Serializer::ObjectSerializer::SerializeObject() { int size = object_.Size(); Map map = object_.map(); - AllocationSpace space = - MemoryChunk::FromHeapObject(object_)->owner()->identity(); - // Young generation large objects are tenured. - if (space == NEW_LO_SPACE) { - space = LO_SPACE; + SnapshotSpace space; + if (ReadOnlyHeap::Contains(object_)) { + space = SnapshotSpace::kReadOnlyHeap; + } else { + AllocationSpace heap_space = + MemoryChunk::FromHeapObject(object_)->owner_identity(); + // Large code objects are not supported and cannot be expressed by + // SnapshotSpace. + DCHECK_NE(heap_space, CODE_LO_SPACE); + // Young generation large objects are tenured. + if (heap_space == NEW_LO_SPACE) { + space = SnapshotSpace::kLargeObject; + } else { + space = static_cast<SnapshotSpace>(heap_space); + } } SerializePrologue(space, size, map); @@ -612,7 +625,8 @@ void Serializer::ObjectSerializer::SerializeDeferred() { bytes_processed_so_far_ = kTaggedSize; serializer_->PutAlignmentPrefix(object_); - sink_->Put(kNewObject + back_reference.space(), "deferred object"); + sink_->Put(kNewObject + static_cast<int>(back_reference.space()), + "deferred object"); serializer_->PutBackReference(object_, back_reference); sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size"); diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h index b70c7fd45a..fad2ec8a88 100644 --- a/deps/v8/src/snapshot/serializer.h +++ b/deps/v8/src/snapshot/serializer.h @@ -205,7 +205,7 @@ class Serializer : public SerializerDeserializer { void PutAttachedReference(SerializerReference reference); // Emit alignment prefix if necessary, return required padding space in bytes. int PutAlignmentPrefix(HeapObject object); - void PutNextChunk(int space); + void PutNextChunk(SnapshotSpace space); void PutRepeat(int repeat_count); // Returns true if the object was successfully serialized as a root. @@ -243,7 +243,7 @@ class Serializer : public SerializerDeserializer { void OutputStatistics(const char* name); #ifdef OBJECT_PRINT - void CountInstanceType(Map map, int size, AllocationSpace space); + void CountInstanceType(Map map, int size, SnapshotSpace space); #endif // OBJECT_PRINT #ifdef DEBUG @@ -272,8 +272,8 @@ class Serializer : public SerializerDeserializer { #ifdef OBJECT_PRINT static const int kInstanceTypes = LAST_TYPE + 1; - int* instance_type_count_[LAST_SPACE]; - size_t* instance_type_size_[LAST_SPACE]; + int* instance_type_count_[kNumberOfSpaces]; + size_t* instance_type_size_[kNumberOfSpaces]; #endif // OBJECT_PRINT #ifdef DEBUG @@ -321,7 +321,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor { void VisitOffHeapTarget(Code host, RelocInfo* target) override; private: - void SerializePrologue(AllocationSpace space, int size, Map map); + void SerializePrologue(SnapshotSpace space, int size, Map map); // This function outputs or skips the raw data between the last pointer and // up to the current position. diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h index 61396aaa71..f20f2ad33f 100644 --- a/deps/v8/src/snapshot/snapshot-source-sink.h +++ b/deps/v8/src/snapshot/snapshot-source-sink.h @@ -5,7 +5,10 @@ #ifndef V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_ #define V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_ +#include <utility> + #include "src/base/logging.h" +#include "src/snapshot/serializer-common.h" #include "src/utils/utils.h" namespace v8 { @@ -66,6 +69,11 @@ class SnapshotByteSource final { int position() { return position_; } void set_position(int position) { position_ = position; } + std::pair<uint32_t, uint32_t> GetChecksum() const { + Checksum checksum(Vector<const byte>(data_, length_)); + return {checksum.a(), checksum.b()}; + } + private: const byte* data_; int length_; diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h index ef933ef83a..4a4da9f755 100644 --- a/deps/v8/src/snapshot/snapshot.h +++ b/deps/v8/src/snapshot/snapshot.h @@ -8,7 +8,6 @@ #include "src/snapshot/partial-serializer.h" #include "src/snapshot/startup-serializer.h" -#include "src/objects/objects-inl.h" #include "src/utils/utils.h" namespace v8 { @@ -99,11 +98,12 @@ class Snapshot : public AllStatic { uint32_t index); static uint32_t GetHeaderValue(const v8::StartupData* data, uint32_t offset) { - return ReadLittleEndianValue<uint32_t>( + return base::ReadLittleEndianValue<uint32_t>( reinterpret_cast<Address>(data->data) + offset); } static void SetHeaderValue(char* data, uint32_t offset, uint32_t value) { - WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset, value); + base::WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset, + value); } static void CheckVersion(const v8::StartupData* data); |