summaryrefslogtreecommitdiff
path: root/deps/v8/src/snapshot
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-01-24 20:16:06 +0100
committerMyles Borins <mylesborins@google.com>2018-01-24 15:02:20 -0800
commit4c4af643e5042d615a60c6bbc05aee9d81b903e5 (patch)
tree3fb0a97988fe4439ae3ae06f26915d1dcf8cab92 /deps/v8/src/snapshot
parentfa9f31a4fda5a3782c652e56e394465805ebb50f (diff)
downloadandroid-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.gz
android-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.bz2
android-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.zip
deps: update V8 to 6.4.388.40
PR-URL: https://github.com/nodejs/node/pull/17489 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/snapshot')
-rw-r--r--deps/v8/src/snapshot/OWNERS1
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.cc289
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h132
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc254
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h82
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.cc67
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.h52
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc66
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h33
-rw-r--r--deps/v8/src/snapshot/builtin-snapshot-utils.cc67
-rw-r--r--deps/v8/src/snapshot/builtin-snapshot-utils.h56
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc12
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc246
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.h102
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.cc7
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.h1
-rw-r--r--deps/v8/src/snapshot/deserializer.cc430
-rw-r--r--deps/v8/src/snapshot/deserializer.h94
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc20
-rw-r--r--deps/v8/src/snapshot/natives-common.cc2
-rw-r--r--deps/v8/src/snapshot/natives-external.cc14
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc10
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h2
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc20
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.h5
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc20
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h3
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc10
-rw-r--r--deps/v8/src/snapshot/serializer-common.h11
-rw-r--r--deps/v8/src/snapshot/serializer.cc119
-rw-r--r--deps/v8/src/snapshot/serializer.h19
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc48
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc4
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc2
-rw-r--r--deps/v8/src/snapshot/snapshot.h6
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc18
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h4
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc70
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h11
40 files changed, 1662 insertions, 750 deletions
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 5729fbfba2..e158e4d92b 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,6 +1,7 @@
set noparent
jgruber@chromium.org
+petermarshall@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.cc b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
new file mode 100644
index 0000000000..59cab6d40a
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
@@ -0,0 +1,289 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-deserializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/deserializer.h"
+
+namespace v8 {
+namespace internal {
+
+using interpreter::Bytecodes;
+using interpreter::Interpreter;
+
+BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
+ Deserializer<BuiltinDeserializerAllocator>* deserializer)
+ : deserializer_(deserializer) {}
+
+BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
+ delete handler_allocations_;
+}
+
+namespace {
+int HandlerAllocationIndex(int code_object_id) {
+ return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
+}
+} // namespace
+
+Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
+ int size) {
+ const int code_object_id = deserializer()->CurrentCodeObjectId();
+ DCHECK_NE(BuiltinDeserializer::kNoCodeObjectId, code_object_id);
+ DCHECK_EQ(CODE_SPACE, space);
+ DCHECK_EQ(deserializer()->ExtractCodeObjectSize(code_object_id), size);
+#ifdef DEBUG
+ RegisterCodeObjectAllocation(code_object_id);
+#endif
+
+ if (BSU::IsBuiltinIndex(code_object_id)) {
+ Object* obj = isolate()->builtins()->builtin(code_object_id);
+ DCHECK(Internals::HasHeapObjectTag(obj));
+ return HeapObject::cast(obj)->address();
+ } else if (BSU::IsHandlerIndex(code_object_id)) {
+ if (handler_allocation_ != nullptr) {
+ // Lazy deserialization.
+ DCHECK_NULL(handler_allocations_);
+ return handler_allocation_;
+ } else {
+ // Eager deserialization.
+ DCHECK_NULL(handler_allocation_);
+ DCHECK_NOT_NULL(handler_allocations_);
+ int index = HandlerAllocationIndex(code_object_id);
+ DCHECK_NOT_NULL(handler_allocations_->at(index));
+ return handler_allocations_->at(index);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+Heap::Reservation
+BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
+ Heap::Reservation result;
+
+ // Reservations for builtins.
+
+ // DeserializeLazy is always the first builtin reservation (to simplify logic
+ // in InitializeBuiltinsTable).
+ {
+ DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
+ uint32_t builtin_size =
+ deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({builtin_size, nullptr, nullptr});
+ }
+
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ if (i == Builtins::kDeserializeLazy) continue;
+
+ // Skip lazy builtins. These will be replaced by the DeserializeLazy code
+ // object in InitializeFromReservations and thus require no reserved space.
+ if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
+ continue;
+ }
+
+ uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({builtin_size, nullptr, nullptr});
+ }
+
+ // Reservations for bytecode handlers.
+
+ BSU::ForEachBytecode(
+ [=, &result](Bytecode bytecode, OperandScale operand_scale) {
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ // Bytecodes without a handler don't require a reservation.
+ return;
+ } else if (FLAG_lazy_handler_deserialization &&
+ deserializer()->IsLazyDeserializationEnabled() &&
+ Bytecodes::IsLazy(bytecode)) {
+ // Skip lazy handlers. These will be replaced by the DeserializeLazy
+ // code object in InitializeFromReservations and thus require no
+ // reserved space.
+ return;
+ }
+
+ const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
+ uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
+ DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+ result.push_back({handler_size, nullptr, nullptr});
+ });
+
+ return result;
+}
+
+void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
+ const Heap::Chunk& chunk, int builtin_id) {
+ DCHECK_EQ(deserializer()->ExtractCodeObjectSize(builtin_id), chunk.size);
+ DCHECK_EQ(chunk.size, chunk.end - chunk.start);
+
+ SkipList::Update(chunk.start, chunk.size);
+ isolate()->builtins()->set_builtin(builtin_id,
+ HeapObject::FromAddress(chunk.start));
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(builtin_id);
+#endif
+}
+
+void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
+ const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale) {
+ DCHECK_EQ(deserializer()->ExtractCodeObjectSize(
+ BSU::BytecodeToIndex(bytecode, operand_scale)),
+ chunk.size);
+ DCHECK_EQ(chunk.size, chunk.end - chunk.start);
+
+ SkipList::Update(chunk.start, chunk.size);
+
+ DCHECK_NOT_NULL(handler_allocations_);
+ const int index =
+ HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
+ handler_allocations_->at(index) = chunk.start;
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
+#endif
+}
+
+void BuiltinDeserializerAllocator::InitializeFromReservations(
+ const Heap::Reservation& reservation) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+
+ // Initialize the builtins table.
+
+ Builtins* builtins = isolate()->builtins();
+ int reservation_index = 0;
+
+ // Other builtins can be replaced by DeserializeLazy so it may not be lazy.
+ // It always occupies the first reservation slot.
+ {
+ DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
+ InitializeBuiltinFromReservation(reservation[reservation_index],
+ Builtins::kDeserializeLazy);
+ reservation_index++;
+ }
+
+ Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
+
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ if (i == Builtins::kDeserializeLazy) continue;
+
+ if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
+ builtins->set_builtin(i, deserialize_lazy);
+ } else {
+ InitializeBuiltinFromReservation(reservation[reservation_index], i);
+ reservation_index++;
+ }
+ }
+
+ // Initialize interpreter bytecode handler reservations.
+
+ DCHECK_NULL(handler_allocations_);
+ handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
+
+ BSU::ForEachBytecode(
+ [=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ // Bytecodes without a handler don't have a reservation.
+ return;
+ } else if (FLAG_lazy_handler_deserialization &&
+ deserializer()->IsLazyDeserializationEnabled() &&
+ Bytecodes::IsLazy(bytecode)) {
+ // Likewise, bytecodes with lazy handlers don't either.
+ return;
+ }
+
+ InitializeHandlerFromReservation(reservation[reservation_index],
+ bytecode, operand_scale);
+ reservation_index++;
+ });
+
+ DCHECK_EQ(reservation.size(), reservation_index);
+}
+
+void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
+ int builtin_id) {
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK(isolate()->builtins()->is_initialized());
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
+ DCHECK_EQ(Builtins::kDeserializeLazy,
+ isolate()->builtins()->builtin(builtin_id)->builtin_index());
+
+ const uint32_t builtin_size =
+ deserializer()->ExtractCodeObjectSize(builtin_id);
+ DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+
+ Handle<HeapObject> o =
+ isolate()->factory()->NewCodeForDeserialization(builtin_size);
+
+ // Note: After this point and until deserialization finishes, heap allocation
+ // is disallowed. We currently can't safely assert this since we'd need to
+ // pass the DisallowHeapAllocation scope out of this function.
+
+ // Write the allocated filler object into the builtins table. It will be
+ // returned by our custom Allocate method below once needed.
+
+ isolate()->builtins()->set_builtin(builtin_id, *o);
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(builtin_id);
+#endif
+}
+
+void BuiltinDeserializerAllocator::ReserveForHandler(
+ Bytecode bytecode, OperandScale operand_scale) {
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
+
+ const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
+ const uint32_t handler_size =
+ deserializer()->ExtractCodeObjectSize(code_object_id);
+ DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
+
+ handler_allocation_ =
+ isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
+
+// Note: After this point and until deserialization finishes, heap allocation
+// is disallowed. We currently can't safely assert this since we'd need to
+// pass the DisallowHeapAllocation scope out of this function.
+
+#ifdef DEBUG
+ RegisterCodeObjectReservation(code_object_id);
+#endif
+}
+
+#ifdef DEBUG
+void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
+ int code_object_id) {
+ const auto result = unused_reservations_.emplace(code_object_id);
+ CHECK(result.second); // False, iff builtin_id was already present in set.
+}
+
+void BuiltinDeserializerAllocator::RegisterCodeObjectAllocation(
+ int code_object_id) {
+ const size_t removed_elems = unused_reservations_.erase(code_object_id);
+ CHECK_EQ(removed_elems, 1);
+}
+
+bool BuiltinDeserializerAllocator::ReservationsAreFullyUsed() const {
+ // Not 100% precise but should be good enough.
+ return unused_reservations_.empty();
+}
+#endif // DEBUG
+
+Isolate* BuiltinDeserializerAllocator::isolate() const {
+ return deserializer()->isolate();
+}
+
+BuiltinDeserializer* BuiltinDeserializerAllocator::deserializer() const {
+ return static_cast<BuiltinDeserializer*>(deserializer_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
new file mode 100644
index 0000000000..6fc7bfaf6b
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -0,0 +1,132 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
+
+#include <unordered_set>
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Deserializer;
+
+class BuiltinDeserializer;
+class BuiltinSnapshotUtils;
+
+class BuiltinDeserializerAllocator final {
+ using BSU = BuiltinSnapshotUtils;
+ using Bytecode = interpreter::Bytecode;
+ using OperandScale = interpreter::OperandScale;
+
+ public:
+ BuiltinDeserializerAllocator(
+ Deserializer<BuiltinDeserializerAllocator>* deserializer);
+
+ ~BuiltinDeserializerAllocator();
+
+ // ------- Allocation Methods -------
+ // Methods related to memory allocation during deserialization.
+
+ // Allocation works differently here than in other deserializers. Instead of
+ // a statically-known memory area determined at serialization-time, our
+ // memory requirements here are determined at runtime. Another major
+ // difference is that we create builtin Code objects up-front (before
+ // deserialization) in order to avoid having to patch builtin references
+ // later on. See also the kBuiltin case in deserializer.cc.
+ //
+ // There are three ways that we use to reserve / allocate space. In all
+ // cases, required objects are requested from the GC prior to
+ // deserialization. 1. pre-allocated builtin code objects are written into
+ // the builtins table (this is to make deserialization of builtin references
+ // easier). Pre-allocated handler code objects are 2. stored in the
+ // {handler_allocations_} vector (at eager-deserialization time) and 3.
+ // stored in {handler_allocation_} (at lazy-deserialization time).
+ //
+ // Allocate simply returns the pre-allocated object prepared by
+ // InitializeFromReservations.
+ Address Allocate(AllocationSpace space, int size);
+
+ void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
+ void SetAlignment(AllocationAlignment alignment) { UNREACHABLE(); }
+
+ HeapObject* GetMap(uint32_t index) { UNREACHABLE(); }
+ HeapObject* GetLargeObject(uint32_t index) { UNREACHABLE(); }
+ HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ UNREACHABLE();
+ }
+
+ // ------- Reservation Methods -------
+ // Methods related to memory reservations (prior to deserialization).
+
+ // Builtin deserialization does not bake reservations into the snapshot, hence
+ // this is a nop.
+ void DecodeReservation(Vector<const SerializedData::Reservation> res) {}
+
+ // These methods are used to pre-allocate builtin objects prior to
+ // deserialization.
+ // TODO(jgruber): Refactor reservation/allocation logic in deserializers to
+ // make this less messy.
+ Heap::Reservation CreateReservationsForEagerBuiltinsAndHandlers();
+ void InitializeFromReservations(const Heap::Reservation& reservation);
+
+ // Creates reservations and initializes the builtins table in preparation for
+ // lazily deserializing a single builtin.
+ void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
+
+ // Pre-allocates a code object preparation for lazily deserializing a single
+ // handler.
+ void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
+
+#ifdef DEBUG
+ bool ReservationsAreFullyUsed() const;
+#endif
+
+ private:
+ Isolate* isolate() const;
+ BuiltinDeserializer* deserializer() const;
+
+ // Used after memory allocation prior to isolate initialization, to register
+ // the newly created object in code space and add it to the builtins table.
+ void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
+ int builtin_id);
+
+ // As above, but for interpreter bytecode handlers.
+ void InitializeHandlerFromReservation(
+ const Heap::Chunk& chunk, interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale);
+
+#ifdef DEBUG
+ void RegisterCodeObjectReservation(int code_object_id);
+ void RegisterCodeObjectAllocation(int code_object_id);
+ std::unordered_set<int> unused_reservations_;
+#endif
+
+ private:
+ // The current deserializer. Note that this always points to a
+ // BuiltinDeserializer instance, but we can't perform the cast during
+ // construction since that makes vtable-based checks fail.
+ Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
+
+ // Stores allocated space for bytecode handlers during eager deserialization.
+ std::vector<Address>* handler_allocations_ = nullptr;
+
+ // Stores the allocated space for a single handler during lazy
+ // deserialization.
+ Address handler_allocation_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index fb41a9fec9..53a0f30612 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -5,80 +5,128 @@
#include "src/snapshot/builtin-deserializer.h"
#include "src/assembler-inl.h"
+#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-// Tracks the builtin currently being deserialized (required for allocation).
-class DeserializingBuiltinScope {
+using interpreter::Bytecodes;
+using interpreter::Interpreter;
+
+// Tracks the code object currently being deserialized (required for
+// allocation).
+class DeserializingCodeObjectScope {
public:
- DeserializingBuiltinScope(BuiltinDeserializer* builtin_deserializer,
- int builtin_id)
+ DeserializingCodeObjectScope(BuiltinDeserializer* builtin_deserializer,
+ int code_object_id)
: builtin_deserializer_(builtin_deserializer) {
- DCHECK_EQ(BuiltinDeserializer::kNoBuiltinId,
- builtin_deserializer->current_builtin_id_);
- builtin_deserializer->current_builtin_id_ = builtin_id;
+ DCHECK_EQ(BuiltinDeserializer::kNoCodeObjectId,
+ builtin_deserializer->current_code_object_id_);
+ builtin_deserializer->current_code_object_id_ = code_object_id;
}
- ~DeserializingBuiltinScope() {
- builtin_deserializer_->current_builtin_id_ =
- BuiltinDeserializer::kNoBuiltinId;
+ ~DeserializingCodeObjectScope() {
+ builtin_deserializer_->current_code_object_id_ =
+ BuiltinDeserializer::kNoCodeObjectId;
}
private:
BuiltinDeserializer* builtin_deserializer_;
- DISALLOW_COPY_AND_ASSIGN(DeserializingBuiltinScope)
+ DISALLOW_COPY_AND_ASSIGN(DeserializingCodeObjectScope)
};
BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
const BuiltinSnapshotData* data)
: Deserializer(data, false) {
- // We may have to relax this at some point to pack reloc infos and handler
- // tables into the builtin blob (instead of the partial snapshot cache).
- DCHECK(ReservesOnlyCodeSpace());
-
- builtin_offsets_ = data->BuiltinOffsets();
- DCHECK_EQ(Builtins::builtin_count, builtin_offsets_.length());
- DCHECK(std::is_sorted(builtin_offsets_.begin(), builtin_offsets_.end()));
+ code_offsets_ = data->BuiltinOffsets();
+ DCHECK_EQ(BSU::kNumberOfCodeObjects, code_offsets_.length());
+ DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
Initialize(isolate);
}
-void BuiltinDeserializer::DeserializeEagerBuiltins() {
+void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_EQ(0, source()->position());
+ // Deserialize builtins.
+
Builtins* builtins = isolate()->builtins();
- for (int i = 0; i < Builtins::builtin_count; i++) {
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
- // InitializeBuiltinsTable.
+ // InitializeFromReservations.
DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
builtins->builtin(i));
} else {
- builtins->set_builtin(i, DeserializeBuiltin(i));
+ builtins->set_builtin(i, DeserializeBuiltinRaw(i));
}
}
#ifdef DEBUG
- for (int i = 0; i < Builtins::builtin_count; i++) {
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
Object* o = builtins->builtin(i);
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
}
#endif
+
+ // Deserialize bytecode handlers.
+
+ Interpreter* interpreter = isolate()->interpreter();
+ DCHECK(!isolate()->interpreter()->IsDispatchTableInitialized());
+
+ BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ // Bytecodes without a dedicated handler are patched up in a second pass.
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+ // If lazy-deserialization is enabled and the current bytecode is lazy,
+ // we write the generic LazyDeserialization handler into the dispatch table
+ // and deserialize later upon first use.
+ Code* code = (FLAG_lazy_handler_deserialization &&
+ IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
+ ? GetDeserializeLazyHandler(operand_scale)
+ : DeserializeHandlerRaw(bytecode, operand_scale);
+
+ interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
+ });
+
+ // Patch up holes in the dispatch table.
+
+ Code* illegal_handler = interpreter->GetBytecodeHandler(
+ Bytecode::kIllegal, OperandScale::kSingle);
+
+ BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+ interpreter->SetBytecodeHandler(bytecode, operand_scale, illegal_handler);
+ });
+
+ DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
}
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
+ allocator()->ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
+ DisallowHeapAllocation no_gc;
+ return DeserializeBuiltinRaw(builtin_id);
+}
+
+Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
+ allocator()->ReserveForHandler(bytecode, operand_scale);
+ DisallowHeapAllocation no_gc;
+ return DeserializeHandlerRaw(bytecode, operand_scale);
+}
+
+Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DeserializingBuiltinScope scope(this, builtin_id);
+ DeserializingCodeObjectScope scope(this, builtin_id);
const int initial_position = source()->position();
- SetPositionToBuiltin(builtin_id);
+ source()->set_position(code_offsets_[builtin_id]);
Object* o = ReadDataSingle();
DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
@@ -94,35 +142,38 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return code;
}
-void BuiltinDeserializer::SetPositionToBuiltin(int builtin_id) {
- DCHECK(Builtins::IsBuiltinId(builtin_id));
+Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
+ OperandScale operand_scale) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
- const uint32_t offset = builtin_offsets_[builtin_id];
- source()->set_position(offset);
+ const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
+ DeserializingCodeObjectScope scope(this, code_object_id);
- // Grab the size of the code object.
- byte data = source()->Get();
+ const int initial_position = source()->position();
+ source()->set_position(code_offsets_[code_object_id]);
- // The first bytecode can either be kNewObject, or kNextChunk if the current
- // chunk has been exhausted. Since we do allocations differently here, we
- // don't care about kNextChunk and can simply skip over it.
- // TODO(jgruber): When refactoring (de)serializer allocations, ensure we don't
- // generate kNextChunk bytecodes anymore for the builtins snapshot. In fact,
- // the entire reservations mechanism is unused for the builtins snapshot.
- if (data == kNextChunk) {
- source()->Get(); // Skip over kNextChunk's {space} parameter.
- } else {
- source()->set_position(offset); // Rewind.
- }
+ Object* o = ReadDataSingle();
+ DCHECK(o->IsCode() && Code::cast(o)->kind() == Code::BYTECODE_HANDLER);
+
+ // Rewind.
+ source()->set_position(initial_position);
+
+ // Flush the instruction cache.
+ Code* code = Code::cast(o);
+ Assembler::FlushICache(isolate(), code->instruction_start(),
+ code->instruction_size());
+
+ return code;
}
-uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
- DCHECK(Builtins::IsBuiltinId(builtin_id));
+uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
+ DCHECK_LT(code_object_id, BSU::kNumberOfCodeObjects);
const int initial_position = source()->position();
// Grab the size of the code object.
- SetPositionToBuiltin(builtin_id);
+ source()->set_position(code_offsets_[code_object_id]);
byte data = source()->Get();
USE(data);
@@ -135,108 +186,19 @@ uint32_t BuiltinDeserializer::ExtractBuiltinSize(int builtin_id) {
return result;
}
-Heap::Reservation BuiltinDeserializer::CreateReservationsForEagerBuiltins() {
- DCHECK(ReservesOnlyCodeSpace());
-
- Heap::Reservation result;
-
- // DeserializeLazy is always the first reservation (to simplify logic in
- // InitializeBuiltinsTable).
- {
- DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
- uint32_t builtin_size = ExtractBuiltinSize(Builtins::kDeserializeLazy);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({builtin_size, nullptr, nullptr});
- }
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (i == Builtins::kDeserializeLazy) continue;
-
- // Skip lazy builtins. These will be replaced by the DeserializeLazy code
- // object in InitializeBuiltinsTable and thus require no reserved space.
- if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) continue;
-
- uint32_t builtin_size = ExtractBuiltinSize(i);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({builtin_size, nullptr, nullptr});
- }
-
- return result;
-}
-
-void BuiltinDeserializer::InitializeBuiltinFromReservation(
- const Heap::Chunk& chunk, int builtin_id) {
- DCHECK_EQ(ExtractBuiltinSize(builtin_id), chunk.size);
- DCHECK_EQ(chunk.size, chunk.end - chunk.start);
-
- SkipList::Update(chunk.start, chunk.size);
- isolate()->builtins()->set_builtin(builtin_id,
- HeapObject::FromAddress(chunk.start));
-}
-
-void BuiltinDeserializer::InitializeBuiltinsTable(
- const Heap::Reservation& reservation) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
-
- Builtins* builtins = isolate()->builtins();
- int reservation_index = 0;
-
- // Other builtins can be replaced by DeserializeLazy so it may not be lazy.
- // It always occupies the first reservation slot.
- {
- DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
- InitializeBuiltinFromReservation(reservation[reservation_index],
- Builtins::kDeserializeLazy);
- reservation_index++;
- }
-
- Code* deserialize_lazy = builtins->builtin(Builtins::kDeserializeLazy);
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (i == Builtins::kDeserializeLazy) continue;
-
- if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- builtins->set_builtin(i, deserialize_lazy);
- } else {
- InitializeBuiltinFromReservation(reservation[reservation_index], i);
- reservation_index++;
- }
+Code* BuiltinDeserializer::GetDeserializeLazyHandler(
+ interpreter::OperandScale operand_scale) const {
+ STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ return Code::cast(isolate()->heap()->deserialize_lazy_handler());
+ case OperandScale::kDouble:
+ return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
+ case OperandScale::kQuadruple:
+ return Code::cast(
+ isolate()->heap()->deserialize_lazy_handler_extra_wide());
}
-
- DCHECK_EQ(reservation.size(), reservation_index);
-}
-
-void BuiltinDeserializer::ReserveAndInitializeBuiltinsTableForBuiltin(
- int builtin_id) {
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(isolate()->builtins()->is_initialized());
- DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
- DCHECK_EQ(Builtins::kDeserializeLazy,
- isolate()->builtins()->builtin(builtin_id)->builtin_index());
-
- const uint32_t builtin_size = ExtractBuiltinSize(builtin_id);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
-
- Handle<HeapObject> o =
- isolate()->factory()->NewCodeForDeserialization(builtin_size);
-
- // Note: After this point and until deserialization finishes, heap allocation
- // is disallowed. We currently can't safely assert this since we'd need to
- // pass the DisallowHeapAllocation scope out of this function.
-
- // Write the allocated filler object into the builtins table. It will be
- // returned by our custom Allocate method below once needed.
-
- isolate()->builtins()->set_builtin(builtin_id, *o);
-}
-
-Address BuiltinDeserializer::Allocate(int space_index, int size) {
- DCHECK_EQ(CODE_SPACE, space_index);
- DCHECK_EQ(ExtractBuiltinSize(current_builtin_id_), size);
- Object* obj = isolate()->builtins()->builtin(current_builtin_id_);
- DCHECK(Internals::HasHeapObjectTag(obj));
- return HeapObject::cast(obj)->address();
+ UNREACHABLE();
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
index a73c68ed34..38ba2fecea 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.h
+++ b/deps/v8/src/snapshot/builtin-deserializer.h
@@ -5,7 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
-#include "src/heap/heap.h"
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/builtin-deserializer-allocator.h"
+#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/deserializer.h"
namespace v8 {
@@ -14,7 +16,12 @@ namespace internal {
class BuiltinSnapshotData;
// Deserializes the builtins blob.
-class BuiltinDeserializer final : public Deserializer {
+class BuiltinDeserializer final
+ : public Deserializer<BuiltinDeserializerAllocator> {
+ using BSU = BuiltinSnapshotUtils;
+ using Bytecode = interpreter::Bytecode;
+ using OperandScale = interpreter::OperandScale;
+
public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
@@ -25,45 +32,27 @@ class BuiltinDeserializer final : public Deserializer {
//
// After this, the instruction cache must be flushed by the caller (we don't
// do it ourselves since the startup serializer batch-flushes all code pages).
- void DeserializeEagerBuiltins();
+ void DeserializeEagerBuiltinsAndHandlers();
- // Deserializes the single given builtin. Assumes that reservations have
- // already been allocated.
+ // Deserializes the single given builtin. This is used whenever a builtin is
+ // lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
- // These methods are used to pre-allocate builtin objects prior to
- // deserialization.
- // TODO(jgruber): Refactor reservation/allocation logic in deserializers to
- // make this less messy.
- Heap::Reservation CreateReservationsForEagerBuiltins();
- void InitializeBuiltinsTable(const Heap::Reservation& reservation);
-
- // Creates reservations and initializes the builtins table in preparation for
- // lazily deserializing a single builtin.
- void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
+ // Deserializes the single given handler. This is used whenever a handler is
+ // lazily deserialized at runtime.
+ Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
private:
- // TODO(jgruber): Remove once allocations have been refactored.
- void SetPositionToBuiltin(int builtin_id);
+ // Deserializes the single given builtin. Assumes that reservations have
+ // already been allocated.
+ Code* DeserializeBuiltinRaw(int builtin_id);
+
+ // Deserializes the single given bytecode handler. Assumes that reservations
+ // have already been allocated.
+ Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
// Extracts the size builtin Code objects (baked into the snapshot).
- uint32_t ExtractBuiltinSize(int builtin_id);
-
- // Used after memory allocation prior to isolate initialization, to register
- // the newly created object in code space and add it to the builtins table.
- void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
- int builtin_id);
-
- // Allocation works differently here than in other deserializers. Instead of
- // a statically-known memory area determined at serialization-time, our
- // memory requirements here are determined at runtime. Another major
- // difference is that we create builtin Code objects up-front (before
- // deserialization) in order to avoid having to patch builtin references
- // later on. See also the kBuiltin case in deserializer.cc.
- //
- // Allocate simply returns the pre-allocated object prepared by
- // InitializeBuiltinsTable.
- Address Allocate(int space_index, int size) override;
+ uint32_t ExtractCodeObjectSize(int builtin_id);
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
@@ -71,16 +60,29 @@ class BuiltinDeserializer final : public Deserializer {
UNREACHABLE();
}
- // Stores the builtin currently being deserialized. We need this to determine
- // where to 'allocate' from during deserialization.
- static const int kNoBuiltinId = -1;
- int current_builtin_id_ = kNoBuiltinId;
+ int CurrentCodeObjectId() const { return current_code_object_id_; }
+
+ // Convenience function to grab the handler off the heap's strong root list.
+ Code* GetDeserializeLazyHandler(OperandScale operand_scale) const;
+
+ private:
+ // Stores the code object currently being deserialized. The
+ // {current_code_object_id} stores the index of the currently-deserialized
+ // code object within the snapshot (and within {code_offsets_}). We need this
+ // to determine where to 'allocate' from during deserialization.
+ static const int kNoCodeObjectId = -1;
+ int current_code_object_id_ = kNoCodeObjectId;
// The offsets of each builtin within the serialized data. Equivalent to
// BuiltinSerializer::builtin_offsets_ but on the deserialization side.
- Vector<const uint32_t> builtin_offsets_;
+ Vector<const uint32_t> code_offsets_;
+
+ // For current_code_object_id_.
+ friend class DeserializingCodeObjectScope;
- friend class DeserializingBuiltinScope;
+ // For isolate(), IsLazyDeserializationEnabled(), CurrentCodeObjectId() and
+ // ExtractBuiltinSize().
+ friend class BuiltinDeserializerAllocator;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.cc b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
new file mode 100644
index 0000000000..dbb5789721
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-serializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+SerializerReference BuiltinSerializerAllocator::Allocate(AllocationSpace space,
+ uint32_t size) {
+ DCHECK_EQ(space, CODE_SPACE);
+ DCHECK_GT(size, 0);
+
+ // Builtin serialization & deserialization does not use the reservation
+ // system. Instead of worrying about chunk indices and offsets, we simply
+ // need to generate unique offsets here.
+
+ const uint32_t virtual_chunk_index = 0;
+ const auto ref = SerializerReference::BackReference(
+ CODE_SPACE, virtual_chunk_index, virtual_chunk_offset_);
+
+ virtual_chunk_size_ += size;
+ virtual_chunk_offset_ += kObjectAlignment; // Needs to be aligned.
+
+ return ref;
+}
+
+#ifdef DEBUG
+bool BuiltinSerializerAllocator::BackReferenceIsAlreadyAllocated(
+ SerializerReference reference) const {
+ DCHECK(reference.is_back_reference());
+ AllocationSpace space = reference.space();
+ DCHECK_EQ(space, CODE_SPACE);
+ DCHECK_EQ(reference.chunk_index(), 0);
+ return reference.chunk_offset() < virtual_chunk_offset_;
+}
+#endif // DEBUG
+
+std::vector<SerializedData::Reservation>
+BuiltinSerializerAllocator::EncodeReservations() const {
+ return std::vector<SerializedData::Reservation>();
+}
+
+void BuiltinSerializerAllocator::OutputStatistics() {
+ DCHECK(FLAG_serialization_statistics);
+
+ PrintF(" Spaces (bytes):\n");
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int space = 0; space < kNumberOfSpaces; space++) {
+ PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
+ }
+ PrintF("\n");
+
+ STATIC_ASSERT(NEW_SPACE == 0);
+ for (int space = 0; space < kNumberOfSpaces; space++) {
+ uint32_t space_size = (space == CODE_SPACE) ? virtual_chunk_size_ : 0;
+ PrintF("%16d", space_size);
+ }
+ PrintF("\n");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.h b/deps/v8/src/snapshot/builtin-serializer-allocator.h
new file mode 100644
index 0000000000..a2c9a036e4
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.h
@@ -0,0 +1,52 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
+
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Serializer;
+
+class BuiltinSerializerAllocator final {
+ public:
+ BuiltinSerializerAllocator(
+ Serializer<BuiltinSerializerAllocator>* serializer) {}
+
+ SerializerReference Allocate(AllocationSpace space, uint32_t size);
+ SerializerReference AllocateMap() { UNREACHABLE(); }
+ SerializerReference AllocateLargeObject(uint32_t size) { UNREACHABLE(); }
+ SerializerReference AllocateOffHeapBackingStore() { UNREACHABLE(); }
+
+#ifdef DEBUG
+ bool BackReferenceIsAlreadyAllocated(
+ SerializerReference back_reference) const;
+#endif
+
+ std::vector<SerializedData::Reservation> EncodeReservations() const;
+
+ void OutputStatistics();
+
+ private:
+ static constexpr int kNumberOfPreallocatedSpaces =
+ SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static constexpr int kNumberOfSpaces =
+ SerializerDeserializer::kNumberOfSpaces;
+
+ // We need to track a faked offset to create back-references. The size is
+ // kept simply to display statistics.
+ uint32_t virtual_chunk_size_ = 0;
+ uint32_t virtual_chunk_offset_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinSerializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
index 6e90ea18be..893c79c05e 100644
--- a/deps/v8/src/snapshot/builtin-serializer.cc
+++ b/deps/v8/src/snapshot/builtin-serializer.cc
@@ -4,12 +4,17 @@
#include "src/snapshot/builtin-serializer.h"
+#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "src/snapshot/startup-serializer.h"
namespace v8 {
namespace internal {
+using interpreter::Bytecode;
+using interpreter::Bytecodes;
+using interpreter::OperandScale;
+
BuiltinSerializer::BuiltinSerializer(Isolate* isolate,
StartupSerializer* startup_serializer)
: Serializer(isolate), startup_serializer_(startup_serializer) {}
@@ -18,17 +23,45 @@ BuiltinSerializer::~BuiltinSerializer() {
OutputStatistics("BuiltinSerializer");
}
-void BuiltinSerializer::SerializeBuiltins() {
- for (int i = 0; i < Builtins::builtin_count; i++) {
- builtin_offsets_[i] = sink_.Position();
+void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
+ // Serialize builtins.
+
+ STATIC_ASSERT(0 == BSU::kFirstBuiltinIndex);
+
+ for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
+ SetBuiltinOffset(i, sink_.Position());
SerializeBuiltin(isolate()->builtins()->builtin(i));
}
- Pad(); // Pad with kNop since GetInt() might read too far.
+
+ // Serialize bytecode handlers.
+
+ STATIC_ASSERT(BSU::kNumberOfBuiltins == BSU::kFirstHandlerIndex);
+
+ BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ SetHandlerOffset(bytecode, operand_scale, sink_.Position());
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+ SerializeHandler(
+ isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
+ });
+
+ STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
+ BSU::kNumberOfCodeObjects);
+
+ // The DeserializeLazy handlers are serialized by the StartupSerializer
+ // during strong root iteration.
+
+ DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
+ DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
+ DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
+
+ // Pad with kNop since GetInt() might read too far.
+ Pad();
// Append the offset table. During deserialization, the offset table is
// extracted by BuiltinSnapshotData.
- const byte* data = reinterpret_cast<const byte*>(&builtin_offsets_[0]);
- int data_length = static_cast<int>(sizeof(builtin_offsets_));
+ const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
+ int data_length = static_cast<int>(sizeof(code_offsets_));
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
@@ -50,6 +83,13 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
object_serializer.Serialize();
}
+void BuiltinSerializer::SerializeHandler(Code* code) {
+ DCHECK(ObjectIsBytecodeHandler(code));
+ ObjectSerializer object_serializer(this, code, &sink_, kPlain,
+ kStartOfObject);
+ object_serializer.Serialize();
+}
+
void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!o->IsSmi());
@@ -86,5 +126,19 @@ void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
sink_.PutInt(cache_index, "partial_snapshot_cache_index");
}
+void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK(BSU::IsBuiltinIndex(builtin_id));
+ code_offsets_[builtin_id] = offset;
+}
+
+void BuiltinSerializer::SetHandlerOffset(Bytecode bytecode,
+ OperandScale operand_scale,
+ uint32_t offset) {
+ const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
+ DCHECK(BSU::IsHandlerIndex(index));
+ code_offsets_[index] = offset;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
index 85c59f84c0..bb8bbdebfa 100644
--- a/deps/v8/src/snapshot/builtin-serializer.h
+++ b/deps/v8/src/snapshot/builtin-serializer.h
@@ -5,6 +5,9 @@
#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
+#include "src/interpreter/interpreter.h"
+#include "src/snapshot/builtin-serializer-allocator.h"
+#include "src/snapshot/builtin-snapshot-utils.h"
#include "src/snapshot/serializer.h"
namespace v8 {
@@ -12,31 +15,45 @@ namespace internal {
class StartupSerializer;
-// Responsible for serializing all builtin objects during startup snapshot
-// creation. Builtins are serialized into a dedicated area of the snapshot.
+// Responsible for serializing builtin and bytecode handler objects during
+// startup snapshot creation into a dedicated area of the snapshot.
// See snapshot.h for documentation of the snapshot layout.
-class BuiltinSerializer : public Serializer<> {
+class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
+ using BSU = BuiltinSnapshotUtils;
+
public:
BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~BuiltinSerializer() override;
- void SerializeBuiltins();
+ void SerializeBuiltinsAndHandlers();
private:
void VisitRootPointers(Root root, Object** start, Object** end) override;
void SerializeBuiltin(Code* code);
+ void SerializeHandler(Code* code);
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
+ void SetBuiltinOffset(int builtin_id, uint32_t offset);
+ void SetHandlerOffset(interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale,
+ uint32_t offset);
+
// The startup serializer is needed for access to the partial snapshot cache,
// which is used to serialize things like embedded constants.
StartupSerializer* startup_serializer_;
- // Stores the starting offset, within the serialized data, of each builtin.
- // This is later packed into the builtin snapshot, and used by the builtin
- // deserializer to deserialize individual builtins.
- uint32_t builtin_offsets_[Builtins::builtin_count];
+ // Stores the starting offset, within the serialized data, of each code
+ // object. This is later packed into the builtin snapshot, and used by the
+ // builtin deserializer to deserialize individual builtins and bytecode
+ // handlers.
+ //
+ // Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
+ // Builtin offsets.
+ // Indices [kFirstHandlerIndex, kFirstHandlerIndex + kNumberOfHandlers[:
+ // Bytecode handler offsets.
+ uint32_t code_offsets_[BuiltinSnapshotUtils::kNumberOfCodeObjects];
DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
};
diff --git a/deps/v8/src/snapshot/builtin-snapshot-utils.cc b/deps/v8/src/snapshot/builtin-snapshot-utils.cc
new file mode 100644
index 0000000000..e32a857c0b
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-snapshot-utils.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/builtin-snapshot-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+bool BuiltinSnapshotUtils::IsBuiltinIndex(int maybe_index) {
+ return (kFirstBuiltinIndex <= maybe_index &&
+ maybe_index < kFirstBuiltinIndex + kNumberOfBuiltins);
+}
+
+// static
+bool BuiltinSnapshotUtils::IsHandlerIndex(int maybe_index) {
+ return (kFirstHandlerIndex <= maybe_index &&
+ maybe_index < kFirstHandlerIndex + kNumberOfHandlers);
+}
+
+// static
+int BuiltinSnapshotUtils::BytecodeToIndex(Bytecode bytecode,
+ OperandScale operand_scale) {
+ int index =
+ BuiltinSnapshotUtils::kNumberOfBuiltins + static_cast<int>(bytecode);
+ switch (operand_scale) { // clang-format off
+ case OperandScale::kSingle: return index;
+ case OperandScale::kDouble: return index + Bytecodes::kBytecodeCount;
+ case OperandScale::kQuadruple: return index + 2 * Bytecodes::kBytecodeCount;
+ } // clang-format on
+ UNREACHABLE();
+}
+
+// static
+std::pair<interpreter::Bytecode, interpreter::OperandScale>
+BuiltinSnapshotUtils::BytecodeFromIndex(int index) {
+ DCHECK(IsHandlerIndex(index));
+
+ const int x = index - BuiltinSnapshotUtils::kNumberOfBuiltins;
+ Bytecode bytecode = Bytecodes::FromByte(x % Bytecodes::kBytecodeCount);
+ switch (x / Bytecodes::kBytecodeCount) { // clang-format off
+ case 0: return {bytecode, OperandScale::kSingle};
+ case 1: return {bytecode, OperandScale::kDouble};
+ case 2: return {bytecode, OperandScale::kQuadruple};
+ default: UNREACHABLE();
+ } // clang-format on
+}
+
+// static
+void BuiltinSnapshotUtils::ForEachBytecode(
+ std::function<void(Bytecode, OperandScale)> f) {
+ static const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
+ for (int i = 0; i < Bytecodes::kBytecodeCount; i++) {
+ f(Bytecodes::FromByte(i), operand_scale);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-snapshot-utils.h b/deps/v8/src/snapshot/builtin-snapshot-utils.h
new file mode 100644
index 0000000000..587b4a35b0
--- /dev/null
+++ b/deps/v8/src/snapshot/builtin-snapshot-utils.h
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
+#define V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
+
+#include <functional>
+
+#include "src/interpreter/interpreter.h"
+
+namespace v8 {
+namespace internal {
+
+// Constants and utility methods used by builtin and bytecode handler
+// (de)serialization.
+class BuiltinSnapshotUtils : public AllStatic {
+ using Bytecode = interpreter::Bytecode;
+ using BytecodeOperands = interpreter::BytecodeOperands;
+ using Bytecodes = interpreter::Bytecodes;
+ using Interpreter = interpreter::Interpreter;
+ using OperandScale = interpreter::OperandScale;
+
+ public:
+ static const int kFirstBuiltinIndex = 0;
+ static const int kNumberOfBuiltins = Builtins::builtin_count;
+
+ static const int kFirstHandlerIndex = kFirstBuiltinIndex + kNumberOfBuiltins;
+ static const int kNumberOfHandlers =
+ Bytecodes::kBytecodeCount * BytecodeOperands::kOperandScaleCount;
+
+ // The number of code objects in the builtin snapshot.
+ // TODO(jgruber): This could be reduced by a bit since not every
+ // {bytecode, operand_scale} combination has an associated handler
+ // (see Bytecodes::BytecodeHasHandler).
+ static const int kNumberOfCodeObjects = kNumberOfBuiltins + kNumberOfHandlers;
+
+ // Indexes into the offsets vector contained in snapshot.
+ // See e.g. BuiltinSerializer::code_offsets_.
+ static bool IsBuiltinIndex(int maybe_index);
+ static bool IsHandlerIndex(int maybe_index);
+ static int BytecodeToIndex(Bytecode bytecode, OperandScale operand_scale);
+
+ // Converts an index back into the {bytecode,operand_scale} tuple. This is the
+ // inverse operation of BytecodeToIndex().
+ static std::pair<Bytecode, OperandScale> BytecodeFromIndex(int index);
+
+ // Iteration over all {bytecode,operand_scale} pairs. Implemented here since
+ // (de)serialization depends on the iteration order.
+ static void ForEachBytecode(std::function<void(Bytecode, OperandScale)> f);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_BUILTIN_SNAPSHOT_UTILS_H_
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 29e1e783e4..3350ef3c0f 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -13,6 +13,7 @@
#include "src/objects-inl.h"
#include "src/snapshot/object-deserializer.h"
#include "src/snapshot/snapshot.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/version.h"
#include "src/visitors.h"
#include "src/wasm/wasm-module.h"
@@ -123,8 +124,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
- // There should be no hash table embedded. They would require rehashing.
- CHECK(!obj->IsHashTable());
+ // Embedded FixedArrays that need rehashing must support rehashing.
+ CHECK_IMPLIES(obj->NeedsRehashing(), obj->CanBeRehashed());
// We expect no instantiated function objects or contexts.
CHECK(!obj->IsJSFunction() && !obj->IsContext());
@@ -242,6 +243,8 @@ MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
return nothing;
}
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
MaybeHandle<WasmCompiledModule> maybe_result =
ObjectDeserializer::DeserializeWasmCompiledModule(isolate, &scd,
wire_bytes);
@@ -260,6 +263,8 @@ void WasmCompiledModuleSerializer::SerializeCodeObject(
switch (kind) {
case Code::WASM_FUNCTION:
case Code::JS_TO_WASM_FUNCTION: {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate()->heap());
// Because the trap handler index is not meaningful across copies and
// serializations, we need to serialize it as kInvalidIndex. We do this by
// saving the old value, setting the index to kInvalidIndex and then
@@ -276,6 +281,7 @@ void WasmCompiledModuleSerializer::SerializeCodeObject(
}
case Code::WASM_INTERPRETER_ENTRY:
case Code::WASM_TO_JS_FUNCTION:
+ case Code::WASM_TO_WASM_FUNCTION:
// Serialize the illegal builtin instead. On instantiation of a
// deserialized module, these will be replaced again.
SerializeBuiltinReference(*BUILTIN_CODE(isolate(), Illegal), how_to_code,
@@ -422,7 +428,7 @@ ScriptData* SerializedCodeData::GetScriptData() {
ScriptData* result = new ScriptData(data_, size_);
result->AcquireDataOwnership();
owns_data_ = false;
- data_ = NULL;
+ data_ = nullptr;
return result;
}
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
new file mode 100644
index 0000000000..b352409f7e
--- /dev/null
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -0,0 +1,246 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/default-deserializer-allocator.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/deserializer.h"
+#include "src/snapshot/startup-deserializer.h"
+
+namespace v8 {
+namespace internal {
+
+DefaultDeserializerAllocator::DefaultDeserializerAllocator(
+ Deserializer<DefaultDeserializerAllocator>* deserializer)
+ : deserializer_(deserializer) {}
+
+// We know the space requirements before deserialization and can
+// pre-allocate that reserved space. During deserialization, all we need
+// to do is to bump up the pointer for each space in the reserved
+// space. This is also used for fixing back references.
+// We may have to split up the pre-allocation into several chunks
+// because it would not fit onto a single page. We do not have to keep
+// track of when to move to the next chunk. An opcode will signal this.
+// Since multiple large objects cannot be folded into one large object
+// space allocation, we have to do an actual allocation when deserializing
+// each large object. Instead of tracking offset for back references, we
+// reference large objects by index.
+Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
+ int size) {
+ if (space == LO_SPACE) {
+ AlwaysAllocateScope scope(isolate());
+ LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
+ // TODO(jgruber): May be cleaner to pass in executability as an argument.
+ Executability exec =
+ static_cast<Executability>(deserializer_->source()->Get());
+ AllocationResult result = lo_space->AllocateRaw(size, exec);
+ HeapObject* obj = result.ToObjectChecked();
+ deserialized_large_objects_.push_back(obj);
+ return obj->address();
+ } else if (space == MAP_SPACE) {
+ DCHECK_EQ(Map::kSize, size);
+ return allocated_maps_[next_map_index_++];
+ } else {
+ DCHECK_LT(space, kNumberOfPreallocatedSpaces);
+ Address address = high_water_[space];
+ DCHECK_NOT_NULL(address);
+ high_water_[space] += size;
+#ifdef DEBUG
+ // Assert that the current reserved chunk is still big enough.
+ const Heap::Reservation& reservation = reservations_[space];
+ int chunk_index = current_chunk_[space];
+ DCHECK_LE(high_water_[space], reservation[chunk_index].end);
+#endif
+ if (space == CODE_SPACE) SkipList::Update(address, size);
+ return address;
+ }
+}
+
+Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
+ int size) {
+ Address address;
+ HeapObject* obj;
+
+ if (next_alignment_ != kWordAligned) {
+ const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
+ address = AllocateRaw(space, reserved);
+ obj = HeapObject::FromAddress(address);
+ // If one of the following assertions fails, then we are deserializing an
+ // aligned object when the filler maps have not been deserialized yet.
+ // We require filler maps as padding to align the object.
+ Heap* heap = isolate()->heap();
+ DCHECK(heap->free_space_map()->IsMap());
+ DCHECK(heap->one_pointer_filler_map()->IsMap());
+ DCHECK(heap->two_pointer_filler_map()->IsMap());
+ obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
+ address = obj->address();
+ next_alignment_ = kWordAligned;
+ return address;
+ } else {
+ return AllocateRaw(space, size);
+ }
+}
+
+void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
+ DCHECK_LT(space, kNumberOfPreallocatedSpaces);
+ uint32_t chunk_index = current_chunk_[space];
+ const Heap::Reservation& reservation = reservations_[space];
+ // Make sure the current chunk is indeed exhausted.
+ CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+ // Move to next reserved chunk.
+ chunk_index = ++current_chunk_[space];
+ CHECK_LT(chunk_index, reservation.size());
+ high_water_[space] = reservation[chunk_index].start;
+}
+
+HeapObject* DefaultDeserializerAllocator::GetMap(uint32_t index) {
+ DCHECK_LT(index, next_map_index_);
+ return HeapObject::FromAddress(allocated_maps_[index]);
+}
+
+HeapObject* DefaultDeserializerAllocator::GetLargeObject(uint32_t index) {
+ DCHECK_LT(index, deserialized_large_objects_.size());
+ return deserialized_large_objects_[index];
+}
+
+HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
+ uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK_LT(space, kNumberOfPreallocatedSpaces);
+ DCHECK_LE(chunk_index, current_chunk_[space]);
+ Address address = reservations_[space][chunk_index].start + chunk_offset;
+ if (next_alignment_ != kWordAligned) {
+ int padding = Heap::GetFillToAlign(address, next_alignment_);
+ next_alignment_ = kWordAligned;
+ DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
+ address += padding;
+ }
+ return HeapObject::FromAddress(address);
+}
+
+void DefaultDeserializerAllocator::DecodeReservation(
+ Vector<const SerializedData::Reservation> res) {
+ DCHECK_EQ(0, reservations_[NEW_SPACE].size());
+ STATIC_ASSERT(NEW_SPACE == 0);
+ int current_space = NEW_SPACE;
+ for (auto& r : res) {
+ reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
+ if (r.is_last()) current_space++;
+ }
+ DCHECK_EQ(kNumberOfSpaces, current_space);
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
+}
+
+bool DefaultDeserializerAllocator::ReserveSpace() {
+#ifdef DEBUG
+ for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
+ DCHECK_GT(reservations_[i].size(), 0);
+ }
+#endif // DEBUG
+ DCHECK(allocated_maps_.empty());
+ if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
+ return false;
+ }
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ high_water_[i] = reservations_[i][0].start;
+ }
+ return true;
+}
+
+// static
+bool DefaultDeserializerAllocator::ReserveSpace(
+ StartupDeserializer* startup_deserializer,
+ BuiltinDeserializer* builtin_deserializer) {
+ const int first_space = NEW_SPACE;
+ const int last_space = SerializerDeserializer::kNumberOfSpaces;
+ Isolate* isolate = startup_deserializer->isolate();
+
+ // Create a set of merged reservations to reserve space in one go.
+ // The BuiltinDeserializer's reservations are ignored, since our actual
+ // requirements vary based on whether lazy deserialization is enabled.
+ // Instead, we manually determine the required code-space.
+
+ Heap::Reservation merged_reservations[kNumberOfSpaces];
+ for (int i = first_space; i < last_space; i++) {
+ merged_reservations[i] =
+ startup_deserializer->allocator()->reservations_[i];
+ }
+
+ Heap::Reservation builtin_reservations =
+ builtin_deserializer->allocator()
+ ->CreateReservationsForEagerBuiltinsAndHandlers();
+ DCHECK(!builtin_reservations.empty());
+
+ for (const auto& c : builtin_reservations) {
+ merged_reservations[CODE_SPACE].push_back(c);
+ }
+
+ if (!isolate->heap()->ReserveSpace(
+ merged_reservations,
+ &startup_deserializer->allocator()->allocated_maps_)) {
+ return false;
+ }
+
+ DisallowHeapAllocation no_allocation;
+
+ // Distribute the successful allocations between both deserializers.
+ // There's nothing to be done here except for code space.
+
+ {
+ const int num_builtin_reservations =
+ static_cast<int>(builtin_reservations.size());
+ for (int i = num_builtin_reservations - 1; i >= 0; i--) {
+ const auto& c = merged_reservations[CODE_SPACE].back();
+ DCHECK_EQ(c.size, builtin_reservations[i].size);
+ DCHECK_EQ(c.size, c.end - c.start);
+ builtin_reservations[i].start = c.start;
+ builtin_reservations[i].end = c.end;
+ merged_reservations[CODE_SPACE].pop_back();
+ }
+
+ builtin_deserializer->allocator()->InitializeFromReservations(
+ builtin_reservations);
+ }
+
+ // Write back startup reservations.
+
+ for (int i = first_space; i < last_space; i++) {
+ startup_deserializer->allocator()->reservations_[i].swap(
+ merged_reservations[i]);
+ }
+
+ for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
+ startup_deserializer->allocator()->high_water_[i] =
+ startup_deserializer->allocator()->reservations_[i][0].start;
+ }
+
+ return true;
+}
+
+bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+ const uint32_t chunk_index = current_chunk_[space];
+ if (reservations_[space].size() != chunk_index + 1) {
+ return false;
+ }
+ if (reservations_[space][chunk_index].end != high_water_[space]) {
+ return false;
+ }
+ }
+ return (allocated_maps_.size() == next_map_index_);
+}
+
+void DefaultDeserializerAllocator::
+ RegisterDeserializedObjectsForBlackAllocation() {
+ isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
+ reservations_, deserialized_large_objects_, allocated_maps_);
+}
+
+Isolate* DefaultDeserializerAllocator::isolate() const {
+ return deserializer_->isolate();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/default-deserializer-allocator.h
new file mode 100644
index 0000000000..08d9f48cec
--- /dev/null
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.h
@@ -0,0 +1,102 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/snapshot/serializer-common.h"
+
+namespace v8 {
+namespace internal {
+
+template <class AllocatorT>
+class Deserializer;
+
+class BuiltinDeserializer;
+class StartupDeserializer;
+
+class DefaultDeserializerAllocator final {
+ public:
+ DefaultDeserializerAllocator(
+ Deserializer<DefaultDeserializerAllocator>* deserializer);
+
+ // ------- Allocation Methods -------
+ // Methods related to memory allocation during deserialization.
+
+ Address Allocate(AllocationSpace space, int size);
+
+ void MoveToNextChunk(AllocationSpace space);
+ void SetAlignment(AllocationAlignment alignment) {
+ DCHECK_EQ(kWordAligned, next_alignment_);
+ DCHECK_LE(kWordAligned, alignment);
+ DCHECK_LE(alignment, kDoubleUnaligned);
+ next_alignment_ = static_cast<AllocationAlignment>(alignment);
+ }
+
+ HeapObject* GetMap(uint32_t index);
+ HeapObject* GetLargeObject(uint32_t index);
+ HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset);
+
+ // ------- Reservation Methods -------
+ // Methods related to memory reservations (prior to deserialization).
+
+ void DecodeReservation(Vector<const SerializedData::Reservation> res);
+ bool ReserveSpace();
+
+ // Atomically reserves space for the two given deserializers. Guarantees
+ // reservation for both without garbage collection in-between.
+ static bool ReserveSpace(StartupDeserializer* startup_deserializer,
+ BuiltinDeserializer* builtin_deserializer);
+
+ bool ReservationsAreFullyUsed() const;
+
+ // ------- Misc Utility Methods -------
+
+ void RegisterDeserializedObjectsForBlackAllocation();
+
+ private:
+ Isolate* isolate() const;
+
+ // Raw allocation without considering alignment.
+ Address AllocateRaw(AllocationSpace space, int size);
+
+ private:
+ static constexpr int kNumberOfPreallocatedSpaces =
+ SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static constexpr int kNumberOfSpaces =
+ SerializerDeserializer::kNumberOfSpaces;
+
+ // The address of the next object that will be allocated in each space.
+ // Each space has a number of chunks reserved by the GC, with each chunk
+ // fitting into a page. Deserialized objects are allocated into the
+ // current chunk of the target space by bumping up high water mark.
+ Heap::Reservation reservations_[kNumberOfSpaces];
+ uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
+ Address high_water_[kNumberOfPreallocatedSpaces];
+
+ // The alignment of the next allocation.
+ AllocationAlignment next_alignment_ = kWordAligned;
+
+ // All required maps are pre-allocated during reservation. {next_map_index_}
+ // stores the index of the next map to return from allocation.
+ uint32_t next_map_index_ = 0;
+ std::vector<Address> allocated_maps_;
+
+ // Allocated large objects are kept in this map and may be fetched later as
+ // back-references.
+ std::vector<HeapObject*> deserialized_large_objects_;
+
+ // The current deserializer.
+ Deserializer<DefaultDeserializerAllocator>* const deserializer_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultDeserializerAllocator)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.cc b/deps/v8/src/snapshot/default-serializer-allocator.cc
index 1dfa21ad2b..b8cc55ff2b 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-serializer-allocator.cc
@@ -80,13 +80,6 @@ bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
}
}
}
-
-bool DefaultSerializerAllocator::HasNotExceededFirstPageOfEachSpace() const {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- if (!completed_chunks_[i].empty()) return false;
- }
- return true;
-}
#endif
std::vector<SerializedData::Reservation>
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.h b/deps/v8/src/snapshot/default-serializer-allocator.h
index 7bd247aaf1..b01532752a 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.h
+++ b/deps/v8/src/snapshot/default-serializer-allocator.h
@@ -26,7 +26,6 @@ class DefaultSerializerAllocator final {
#ifdef DEBUG
bool BackReferenceIsAlreadyAllocated(
SerializerReference back_reference) const;
- bool HasNotExceededFirstPageOfEachSpace() const;
#endif
std::vector<SerializedData::Reservation> EncodeReservations() const;
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 1eb15d6c38..5d7d551c98 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -4,134 +4,18 @@
#include "src/snapshot/deserializer.h"
-#include "src/api.h"
#include "src/assembler-inl.h"
-#include "src/bootstrapper.h"
-#include "src/deoptimizer.h"
-#include "src/external-reference-table.h"
-#include "src/heap/heap-inl.h"
#include "src/isolate.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/builtin-deserializer.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/string.h"
+#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/startup-deserializer.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
-void Deserializer::DecodeReservation(
- Vector<const SerializedData::Reservation> res) {
- DCHECK_EQ(0, reservations_[NEW_SPACE].size());
- STATIC_ASSERT(NEW_SPACE == 0);
- int current_space = NEW_SPACE;
- for (auto& r : res) {
- reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
- if (r.is_last()) current_space++;
- }
- DCHECK_EQ(kNumberOfSpaces, current_space);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-void Deserializer::RegisterDeserializedObjectsForBlackAllocation() {
- isolate_->heap()->RegisterDeserializedObjectsForBlackAllocation(
- reservations_, deserialized_large_objects_, allocated_maps_);
-}
-
-bool Deserializer::ReserveSpace() {
-#ifdef DEBUG
- for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
- DCHECK(reservations_[i].size() > 0);
- }
-#endif // DEBUG
- DCHECK(allocated_maps_.empty());
- if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
- return false;
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- high_water_[i] = reservations_[i][0].start;
- }
- return true;
-}
-
-// static
-bool Deserializer::ReserveSpace(StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer) {
- const int first_space = NEW_SPACE;
- const int last_space = SerializerDeserializer::kNumberOfSpaces;
- Isolate* isolate = startup_deserializer->isolate();
-
- // Create a set of merged reservations to reserve space in one go.
- // The BuiltinDeserializer's reservations are ignored, since our actual
- // requirements vary based on whether lazy deserialization is enabled.
- // Instead, we manually determine the required code-space.
-
- DCHECK(builtin_deserializer->ReservesOnlyCodeSpace());
- Heap::Reservation merged_reservations[kNumberOfSpaces];
- for (int i = first_space; i < last_space; i++) {
- merged_reservations[i] = startup_deserializer->reservations_[i];
- }
-
- Heap::Reservation builtin_reservations =
- builtin_deserializer->CreateReservationsForEagerBuiltins();
- DCHECK(!builtin_reservations.empty());
-
- for (const auto& c : builtin_reservations) {
- merged_reservations[CODE_SPACE].push_back(c);
- }
-
- if (!isolate->heap()->ReserveSpace(merged_reservations,
- &startup_deserializer->allocated_maps_)) {
- return false;
- }
-
- DisallowHeapAllocation no_allocation;
-
- // Distribute the successful allocations between both deserializers.
- // There's nothing to be done here except for code space.
-
- {
- const int num_builtin_reservations =
- static_cast<int>(builtin_reservations.size());
- for (int i = num_builtin_reservations - 1; i >= 0; i--) {
- const auto& c = merged_reservations[CODE_SPACE].back();
- DCHECK_EQ(c.size, builtin_reservations[i].size);
- DCHECK_EQ(c.size, c.end - c.start);
- builtin_reservations[i].start = c.start;
- builtin_reservations[i].end = c.end;
- merged_reservations[CODE_SPACE].pop_back();
- }
-
- builtin_deserializer->InitializeBuiltinsTable(builtin_reservations);
- }
-
- // Write back startup reservations.
-
- for (int i = first_space; i < last_space; i++) {
- startup_deserializer->reservations_[i].swap(merged_reservations[i]);
- }
-
- for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
- startup_deserializer->high_water_[i] =
- startup_deserializer->reservations_[i][0].start;
- builtin_deserializer->high_water_[i] = nullptr;
- }
-
- return true;
-}
-
-bool Deserializer::ReservesOnlyCodeSpace() const {
- for (int space = NEW_SPACE; space < kNumberOfSpaces; space++) {
- if (space == CODE_SPACE) continue;
- const auto& r = reservations_[space];
- for (const Heap::Chunk& c : r)
- if (c.size != 0) return false;
- }
- return true;
-}
-
-void Deserializer::Initialize(Isolate* isolate) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
@@ -150,60 +34,65 @@ void Deserializer::Initialize(Isolate* isolate) {
SerializedData::ComputeMagicNumber(external_reference_table_));
}
-void Deserializer::SortMapDescriptors() {
- for (const auto& address : allocated_maps_) {
- Map* map = Map::cast(HeapObject::FromAddress(address));
- if (map->instance_descriptors()->number_of_descriptors() > 1) {
- map->instance_descriptors()->Sort();
- }
- }
+template <class AllocatorT>
+bool Deserializer<AllocatorT>::IsLazyDeserializationEnabled() const {
+ return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
}
-bool Deserializer::IsLazyDeserializationEnabled() const {
- return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Rehash() {
+ DCHECK(can_rehash() || deserializing_user_code());
+ for (const auto& item : to_rehash_) item->RehashBasedOnMap();
}
-Deserializer::~Deserializer() {
+template <class AllocatorT>
+Deserializer<AllocatorT>::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
// Check that we only have padding bytes remaining.
while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- int chunk_index = current_chunk_[space];
- DCHECK_EQ(reservations_[space].size(), chunk_index + 1);
- DCHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
- }
- DCHECK_EQ(allocated_maps_.size(), next_map_index_);
+ // Check that we've fully used all reserved space.
+ DCHECK(allocator()->ReservationsAreFullyUsed());
#endif // DEBUG
}
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
-void Deserializer::VisitRootPointers(Root root, Object** start, Object** end) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ // Builtins and bytecode handlers are deserialized in a separate pass by the
+ // BuiltinDeserializer.
+ if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
+
// The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using NULL as the address.
- ReadData(start, end, NEW_SPACE, NULL);
+ // to update the remembered using nullptr as the address.
+ ReadData(start, end, NEW_SPACE, nullptr);
}
-void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::Synchronize(
+ VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
- deserializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
}
-void Deserializer::DeserializeDeferredObjects() {
+template <class AllocatorT>
+void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) {
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(code);
+ case kAlignmentPrefix + 2: {
+ int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
+ allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
+ }
default: {
int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
+ DCHECK_LE(space, kNumberOfSpaces);
+ DCHECK_EQ(code - space, kNewObject);
HeapObject* object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
Address obj_address = object->address();
@@ -241,24 +130,33 @@ uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
return string->hash_field();
}
-HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
- if (deserializing_user_code()) {
+template <class AllocatorT>
+HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
+ int space) {
+ if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj->IsString()) {
+ // Uninitialize hash field as we need to recompute the hash.
String* string = String::cast(obj);
- // Uninitialize hash field as the hash seed may have changed.
string->set_hash_field(String::kEmptyHashField);
+ } else if (obj->NeedsRehashing()) {
+ to_rehash_.push_back(obj);
+ }
+ }
+
+ if (deserializing_user_code()) {
+ if (obj->IsString()) {
+ String* string = String::cast(obj);
if (string->IsInternalizedString()) {
// Canonicalize the internalized string. If it already exists in the
// string table, set it to forward to the existing one.
StringTableInsertionKey key(string);
- String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
- if (canonical == NULL) {
- new_internalized_strings_.push_back(handle(string));
- return string;
- } else {
- string->SetForwardedInternalizedString(canonical);
- return canonical;
- }
+ String* canonical =
+ StringTable::ForwardStringIfExists(isolate_, &key, string);
+
+ if (canonical != nullptr) return canonical;
+
+ new_internalized_strings_.push_back(handle(string));
+ return string;
}
} else if (obj->IsScript()) {
new_scripts_.push_back(handle(Script::cast(obj)));
@@ -290,6 +188,10 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (isolate_->external_reference_redirector()) {
accessor_infos_.push_back(AccessorInfo::cast(obj));
}
+ } else if (obj->IsCallHandlerInfo()) {
+ if (isolate_->external_reference_redirector()) {
+ call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
+ }
} else if (obj->IsExternalOneByteString()) {
DCHECK(obj->map() == isolate_->heap()->native_source_string_map());
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
@@ -298,6 +200,21 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
NativesExternalStringResource::DecodeForDeserialization(
string->resource()));
isolate_->heap()->RegisterExternalString(string);
+ } else if (obj->IsJSTypedArray()) {
+ JSTypedArray* typed_array = JSTypedArray::cast(obj);
+ CHECK(typed_array->byte_offset()->IsSmi());
+ int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
+ if (byte_offset > 0) {
+ FixedTypedArrayBase* elements =
+ FixedTypedArrayBase::cast(typed_array->elements());
+ // Must be off-heap layout.
+ DCHECK_NULL(elements->base_pointer());
+
+ void* pointer_with_offset = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(elements->external_pointer()) +
+ byte_offset);
+ elements->set_external_pointer(pointer_with_offset);
+ }
} else if (obj->IsJSArrayBuffer()) {
JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
@@ -315,61 +232,46 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (fta->base_pointer() == nullptr) {
Smi* store_index = reinterpret_cast<Smi*>(fta->external_pointer());
void* backing_store = off_heap_backing_stores_[store_index->value()];
-
fta->set_external_pointer(backing_store);
}
}
- if (FLAG_rehash_snapshot && can_rehash_ && !deserializing_user_code()) {
- if (obj->IsString()) {
- // Uninitialize hash field as we are going to reinitialize the hash seed.
- String* string = String::cast(obj);
- string->set_hash_field(String::kEmptyHashField);
- } else if (obj->IsTransitionArray() &&
- TransitionArray::cast(obj)->number_of_entries() > 1) {
- transition_arrays_.push_back(TransitionArray::cast(obj));
- }
- }
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
return obj;
}
-int Deserializer::MaybeReplaceWithDeserializeLazy(int builtin_id) {
+template <class AllocatorT>
+int Deserializer<AllocatorT>::MaybeReplaceWithDeserializeLazy(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- return (IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id) &&
- !deserializing_builtins_)
+ return IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id)
? Builtins::kDeserializeLazy
: builtin_id;
}
-HeapObject* Deserializer::GetBackReferencedObject(int space) {
+template <class AllocatorT>
+HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
HeapObject* obj;
SerializerReference back_reference =
SerializerReference::FromBitfield(source_.GetInt());
- if (space == LO_SPACE) {
- uint32_t index = back_reference.large_object_index();
- obj = deserialized_large_objects_[index];
- } else if (space == MAP_SPACE) {
- int index = back_reference.map_index();
- DCHECK(index < next_map_index_);
- obj = HeapObject::FromAddress(allocated_maps_[index]);
- } else {
- DCHECK(space < kNumberOfPreallocatedSpaces);
- uint32_t chunk_index = back_reference.chunk_index();
- DCHECK_LE(chunk_index, current_chunk_[space]);
- uint32_t chunk_offset = back_reference.chunk_offset();
- Address address = reservations_[space][chunk_index].start + chunk_offset;
- if (next_alignment_ != kWordAligned) {
- int padding = Heap::GetFillToAlign(address, next_alignment_);
- next_alignment_ = kWordAligned;
- DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
- address += padding;
- }
- obj = HeapObject::FromAddress(address);
+
+ switch (space) {
+ case LO_SPACE:
+ obj = allocator()->GetLargeObject(back_reference.large_object_index());
+ break;
+ case MAP_SPACE:
+ obj = allocator()->GetMap(back_reference.map_index());
+ break;
+ default:
+ obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
+ back_reference.chunk_index(),
+ back_reference.chunk_offset());
+ break;
}
- if (deserializing_user_code() && obj->IsInternalizedString()) {
- obj = String::cast(obj)->GetForwardedInternalizedString();
+
+ if (deserializing_user_code() && obj->IsThinString()) {
+ obj = ThinString::cast(obj)->actual();
}
+
hot_objects_.Add(obj);
return obj;
}
@@ -379,29 +281,14 @@ HeapObject* Deserializer::GetBackReferencedObject(int space) {
// The reason for this strange interface is that otherwise the object is
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number, Object** write_back) {
- Address address;
- HeapObject* obj;
- int size = source_.GetInt() << kObjectAlignmentBits;
-
- if (next_alignment_ != kWordAligned) {
- int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
- address = Allocate(space_number, reserved);
- obj = HeapObject::FromAddress(address);
- // If one of the following assertions fails, then we are deserializing an
- // aligned object when the filler maps have not been deserialized yet.
- // We require filler maps as padding to align the object.
- Heap* heap = isolate_->heap();
- DCHECK(heap->free_space_map()->IsMap());
- DCHECK(heap->one_pointer_filler_map()->IsMap());
- DCHECK(heap->two_pointer_filler_map()->IsMap());
- obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
- address = obj->address();
- next_alignment_ = kWordAligned;
- } else {
- address = Allocate(space_number, size);
- obj = HeapObject::FromAddress(address);
- }
+template <class AllocatorT>
+void Deserializer<AllocatorT>::ReadObject(int space_number,
+ Object** write_back) {
+ const int size = source_.GetInt() << kObjectAlignmentBits;
+
+ Address address =
+ allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
+ HeapObject* obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
Object** current = reinterpret_cast<Object**>(address);
@@ -423,46 +310,8 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
#endif // DEBUG
}
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address Deserializer::Allocate(int space_index, int size) {
- if (space_index == LO_SPACE) {
- AlwaysAllocateScope scope(isolate_);
- LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
- Executability exec = static_cast<Executability>(source_.Get());
- AllocationResult result = lo_space->AllocateRaw(size, exec);
- HeapObject* obj = result.ToObjectChecked();
- deserialized_large_objects_.push_back(obj);
- return obj->address();
- } else if (space_index == MAP_SPACE) {
- DCHECK_EQ(Map::kSize, size);
- return allocated_maps_[next_map_index_++];
- } else {
- DCHECK(space_index < kNumberOfPreallocatedSpaces);
- Address address = high_water_[space_index];
- DCHECK_NOT_NULL(address);
- high_water_[space_index] += size;
-#ifdef DEBUG
- // Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space_index];
- int chunk_index = current_chunk_[space_index];
- DCHECK_LE(high_water_[space_index], reservation[chunk_index].end);
-#endif
- if (space_index == CODE_SPACE) SkipList::Update(address, size);
- return address;
- }
-}
-
-Object* Deserializer::ReadDataSingle() {
+template <class AllocatorT>
+Object* Deserializer<AllocatorT>::ReadDataSingle() {
Object* o;
Object** start = &o;
Object** end = start + 1;
@@ -474,14 +323,24 @@ Object* Deserializer::ReadDataSingle() {
return o;
}
-bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
- Address current_object_address) {
+static void NoExternalReferencesCallback() {
+ // The following check will trigger if a function or object template
+ // with references to native functions have been deserialized from
+ // snapshot, but no actual external references were provided when the
+ // isolate was created.
+ CHECK_WITH_MSG(false, "No external references provided via API");
+}
+
+template <class AllocatorT>
+bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
+ int source_space,
+ Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
bool write_barrier_needed =
- (current_object_address != NULL && source_space != NEW_SPACE &&
+ (current_object_address != nullptr && source_space != NEW_SPACE &&
source_space != CODE_SPACE);
while (current < limit) {
byte data = source_.Get();
@@ -618,15 +477,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kNextChunk: {
int space = source_.Get();
- DCHECK(space < kNumberOfPreallocatedSpaces);
- int chunk_index = current_chunk_[space];
- const Heap::Reservation& reservation = reservations_[space];
- // Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
- // Move to next reserved chunk.
- chunk_index = ++current_chunk_[space];
- CHECK_LT(chunk_index, reservation.size());
- high_water_[space] = reservation[chunk_index].start;
+ allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
break;
}
@@ -691,10 +542,16 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
current = reinterpret_cast<Object**>(
reinterpret_cast<Address>(current) + skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- DCHECK_WITH_MSG(reference_id < num_api_references_,
- "too few external references provided through the API");
- Address address = reinterpret_cast<Address>(
- isolate->api_external_references()[reference_id]);
+ Address address;
+ if (isolate->api_external_references()) {
+ DCHECK_WITH_MSG(
+ reference_id < num_api_references_,
+ "too few external references provided through the API");
+ address = reinterpret_cast<Address>(
+ isolate->api_external_references()[reference_id]);
+ } else {
+ address = reinterpret_cast<Address>(NoExternalReferencesCallback);
+ }
memcpy(current, &address, kPointerSize);
current++;
break;
@@ -702,9 +559,11 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(data);
+ case kAlignmentPrefix + 2: {
+ int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
+ allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
break;
+ }
STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
@@ -783,10 +642,13 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
return true;
}
+template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
-Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
- Address current_object_address, byte data,
- bool write_barrier_needed) {
+Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
+ Object** current,
+ Address current_object_address,
+ byte data,
+ bool write_barrier_needed) {
bool emit_write_barrier = false;
bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
@@ -795,7 +657,7 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
ReadObject(space_number, current);
emit_write_barrier = (space_number == NEW_SPACE);
} else {
- Object* new_object = NULL; /* May not be a real Object pointer. */
+ Object* new_object = nullptr; /* May not be a real Object pointer. */
if (where == kNewObject) {
ReadObject(space_number, &new_object);
} else if (where == kBackref) {
@@ -829,13 +691,13 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
new_object = *attached_objects_[index];
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
} else {
- DCHECK(where == kBuiltin);
+ DCHECK_EQ(where, kBuiltin);
int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
if (within == kInnerPointer) {
- DCHECK(how == kFromCode);
+ DCHECK_EQ(how, kFromCode);
if (where == kBuiltin) {
// At this point, new_object may still be uninitialized, thus the
// unchecked Code cast.
@@ -877,5 +739,9 @@ Object** Deserializer::ReadDataCase(Isolate* isolate, Object** current,
return current;
}
+// Explicit instantiation.
+template class Deserializer<BuiltinDeserializerAllocator>;
+template class Deserializer<DefaultDeserializerAllocator>;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 5aa2f8d656..5c9bda43ac 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -7,14 +7,17 @@
#include <vector>
-#include "src/heap/heap.h"
-#include "src/objects.h"
+#include "src/objects/js-array.h"
+#include "src/snapshot/default-deserializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
+class HeapObject;
+class Object;
+
// Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code.
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
@@ -25,55 +28,33 @@ namespace internal {
#define V8_CODE_EMBEDS_OBJECT_POINTER 0
#endif
-class BuiltinDeserializer;
-class Heap;
-class StartupDeserializer;
-
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+template <class AllocatorT = DefaultDeserializerAllocator>
class Deserializer : public SerializerDeserializer {
public:
~Deserializer() override;
- // Add an object to back an attached reference. The order to add objects must
- // mirror the order they are added in the serializer.
- void AddAttachedObject(Handle<HeapObject> attached_object) {
- attached_objects_.push_back(attached_object);
- }
-
void SetRehashability(bool v) { can_rehash_ = v; }
protected:
// Create a deserializer from a snapshot byte source.
template <class Data>
Deserializer(Data* data, bool deserializing_user_code)
- : isolate_(NULL),
+ : isolate_(nullptr),
source_(data->Payload()),
magic_number_(data->GetMagicNumber()),
- next_map_index_(0),
- external_reference_table_(NULL),
- deserialized_large_objects_(0),
+ external_reference_table_(nullptr),
+ allocator_(this),
deserializing_user_code_(deserializing_user_code),
- next_alignment_(kWordAligned),
can_rehash_(false) {
- DecodeReservation(data->Reservations());
- // We start the indicies here at 1, so that we can distinguish between an
+ allocator()->DecodeReservation(data->Reservations());
+ // We start the indices here at 1, so that we can distinguish between an
// actual index and a nullptr in a deserialized object requiring fix-up.
off_heap_backing_stores_.push_back(nullptr);
}
- bool ReserveSpace();
-
- // Atomically reserves space for the two given deserializers. Guarantees
- // reservation for both without garbage collection in-between.
- static bool ReserveSpace(StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer);
- bool ReservesOnlyCodeSpace() const;
-
void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
- void RegisterDeserializedObjectsForBlackAllocation();
-
- virtual Address Allocate(int space_index, int size);
// Deserializes into a single pointer and returns the resulting object.
Object* ReadDataSingle();
@@ -82,8 +63,11 @@ class Deserializer : public SerializerDeserializer {
// snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space);
- // Sort descriptors of deserialized maps using new string hashes.
- void SortMapDescriptors();
+ // Add an object to back an attached reference. The order to add objects must
+ // mirror the order they are added in the serializer.
+ void AddAttachedObject(Handle<HeapObject> attached_object) {
+ attached_objects_.push_back(attached_object);
+ }
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
@@ -93,42 +77,36 @@ class Deserializer : public SerializerDeserializer {
const std::vector<AccessorInfo*>& accessor_infos() const {
return accessor_infos_;
}
+ const std::vector<CallHandlerInfo*>& call_handler_infos() const {
+ return call_handler_infos_;
+ }
const std::vector<Handle<String>>& new_internalized_strings() const {
return new_internalized_strings_;
}
const std::vector<Handle<Script>>& new_scripts() const {
return new_scripts_;
}
- const std::vector<TransitionArray*>& transition_arrays() const {
- return transition_arrays_;
- }
+
+ AllocatorT* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
bool IsLazyDeserializationEnabled() const;
+ void Rehash();
+
private:
void VisitRootPointers(Root root, Object** start, Object** end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
- void DecodeReservation(Vector<const SerializedData::Reservation> res);
-
void UnalignedCopy(Object** dest, Object** src) {
memcpy(dest, src, sizeof(*src));
}
- void SetAlignment(byte data) {
- DCHECK_EQ(kWordAligned, next_alignment_);
- int alignment = data - (kAlignmentPrefix - 1);
- DCHECK_LE(kWordAligned, alignment);
- DCHECK_LE(alignment, kDoubleUnaligned);
- next_alignment_ = static_cast<AllocationAlignment>(alignment);
- }
-
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
- // of the object we are writing into, or NULL if we are not writing into an
+ // of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
bool ReadData(Object** start, Object** end, int space,
@@ -159,41 +137,29 @@ class Deserializer : public SerializerDeserializer {
SnapshotByteSource source_;
uint32_t magic_number_;
- // The address of the next object that will be allocated in each space.
- // Each space has a number of chunks reserved by the GC, with each chunk
- // fitting into a page. Deserialized objects are allocated into the
- // current chunk of the target space by bumping up high water mark.
- Heap::Reservation reservations_[kNumberOfSpaces];
- uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
- Address high_water_[kNumberOfPreallocatedSpaces];
- int next_map_index_;
- std::vector<Address> allocated_maps_;
-
ExternalReferenceTable* external_reference_table_;
- std::vector<HeapObject*> deserialized_large_objects_;
std::vector<Code*> new_code_objects_;
std::vector<AccessorInfo*> accessor_infos_;
+ std::vector<CallHandlerInfo*> call_handler_infos_;
std::vector<Handle<String>> new_internalized_strings_;
std::vector<Handle<Script>> new_scripts_;
- std::vector<TransitionArray*> transition_arrays_;
std::vector<byte*> off_heap_backing_stores_;
+ AllocatorT allocator_;
const bool deserializing_user_code_;
- // TODO(jgruber): This workaround will no longer be necessary once builtin
- // reference patching has been removed (through advance allocation).
- bool deserializing_builtins_ = false;
-
- AllocationAlignment next_alignment_;
-
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
+ std::vector<HeapObject*> to_rehash_;
#ifdef DEBUG
uint32_t num_api_references_;
#endif // DEBUG
+ // For source(), isolate(), and allocator().
+ friend class DefaultDeserializerAllocator;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index a6d9862c10..b1ecd61f2f 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -17,7 +17,8 @@
class SnapshotWriter {
public:
- SnapshotWriter() : snapshot_cpp_path_(NULL), snapshot_blob_path_(NULL) {}
+ SnapshotWriter()
+ : snapshot_cpp_path_(nullptr), snapshot_blob_path_(nullptr) {}
void SetSnapshotFile(const char* snapshot_cpp_file) {
snapshot_cpp_path_ = snapshot_cpp_file;
@@ -102,7 +103,7 @@ class SnapshotWriter {
static FILE* GetFileDescriptorOrDie(const char* filename) {
FILE* fp = v8::base::OS::FOpen(filename, "wb");
- if (fp == NULL) {
+ if (fp == nullptr) {
i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
exit(1);
}
@@ -114,10 +115,10 @@ class SnapshotWriter {
};
char* GetExtraCode(char* filename, const char* description) {
- if (filename == NULL || strlen(filename) == 0) return NULL;
+ if (filename == nullptr || strlen(filename) == 0) return nullptr;
::printf("Loading script for %s: %s\n", description, filename);
FILE* file = v8::base::OS::FOpen(filename, "rb");
- if (file == NULL) {
+ if (file == nullptr) {
fprintf(stderr, "Failed to open '%s': errno %d\n", filename, errno);
exit(1);
}
@@ -155,8 +156,8 @@ int main(int argc, char** argv) {
i::CpuFeatures::Probe(true);
v8::V8::InitializeICUDefaultLocation(argv[0]);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
+ v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
{
@@ -164,11 +165,13 @@ int main(int argc, char** argv) {
if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
- char* embed_script = GetExtraCode(argc >= 2 ? argv[1] : NULL, "embedding");
+ char* embed_script =
+ GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding");
v8::StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
delete[] embed_script;
- char* warmup_script = GetExtraCode(argc >= 3 ? argv[2] : NULL, "warm up");
+ char* warmup_script =
+ GetExtraCode(argc >= 3 ? argv[2] : nullptr, "warm up");
if (warmup_script) {
v8::StartupData cold = blob;
blob = v8::V8::WarmUpSnapshotDataBlob(cold, warmup_script);
@@ -183,6 +186,5 @@ int main(int argc, char** argv) {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
- delete platform;
return 0;
}
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index 71f81ea971..e865498c7d 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -15,7 +15,7 @@ NativesExternalStringResource::NativesExternalStringResource(NativeType type,
int index)
: type_(type), index_(index) {
Vector<const char> source;
- DCHECK(0 <= index);
+ DCHECK_LE(0, index);
switch (type_) {
case CORE:
DCHECK(index < Natives::GetBuiltinsCount());
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index d8e74049f1..ea2a9e6f84 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -120,23 +120,21 @@ class NativesHolder {
CHECK(store);
holder_ = store;
}
- static bool empty() { return holder_ == NULL; }
+ static bool empty() { return holder_ == nullptr; }
static void Dispose() {
delete holder_;
- holder_ = NULL;
+ holder_ = nullptr;
}
private:
static NativesStore* holder_;
};
-template<NativeType type>
-NativesStore* NativesHolder<type>::holder_ = NULL;
-
+template <NativeType type>
+NativesStore* NativesHolder<type>::holder_ = nullptr;
// The natives blob. Memory is owned by caller.
-static StartupData* natives_blob_ = NULL;
-
+static StartupData* natives_blob_ = nullptr;
/**
* Read the Natives blob, as previously set by SetNativesFromFile.
@@ -161,7 +159,7 @@ void SetNativesFromFile(StartupData* natives_blob) {
DCHECK(!natives_blob_);
DCHECK(natives_blob);
DCHECK(natives_blob->data);
- DCHECK(natives_blob->raw_size > 0);
+ DCHECK_GT(natives_blob->raw_size, 0);
natives_blob_ = natives_blob;
ReadNatives();
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 253480535b..3f92e7757f 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -67,7 +67,8 @@ ObjectDeserializer::DeserializeWasmCompiledModule(
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
Initialize(isolate);
- if (!ReserveSpace()) return MaybeHandle<HeapObject>();
+
+ if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
DCHECK(deserializing_user_code());
HandleScope scope(isolate);
@@ -79,7 +80,8 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root));
- RegisterDeserializedObjectsForBlackAllocation();
+ Rehash();
+ allocator()->RegisterDeserializedObjectsForBlackAllocation();
}
CommitPostProcessedObjects();
return scope.CloseAndEscape(result);
@@ -97,12 +99,12 @@ void ObjectDeserializer::
}
void ObjectDeserializer::CommitPostProcessedObjects() {
- CHECK(new_internalized_strings().size() <= kMaxInt);
+ CHECK_LE(new_internalized_strings().size(), kMaxInt);
StringTable::EnsureCapacityForDeserialization(
isolate(), static_cast<int>(new_internalized_strings().size()));
for (Handle<String> string : new_internalized_strings()) {
StringTableInsertionKey key(*string);
- DCHECK_NULL(StringTable::LookupKeyIfExists(isolate(), &key));
+ DCHECK_NULL(StringTable::ForwardStringIfExists(isolate(), &key, *string));
StringTable::LookupKey(isolate(), &key);
}
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index 00e6a5b486..8f236f5f20 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -15,7 +15,7 @@ class SharedFunctionInfo;
class WasmCompiledModule;
// Deserializes the object graph rooted at a given object.
-class ObjectDeserializer final : public Deserializer {
+class ObjectDeserializer final : public Deserializer<> {
public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index f4786006f8..41df5dbba7 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -30,7 +30,9 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
Initialize(isolate);
- if (!ReserveSpace()) V8::FatalProcessOutOfMemory("PartialDeserializer");
+ if (!allocator()->ReserveSpace()) {
+ V8::FatalProcessOutOfMemory("PartialDeserializer");
+ }
AddAttachedObject(global_proxy);
@@ -44,14 +46,14 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
- RegisterDeserializedObjectsForBlackAllocation();
+ allocator()->RegisterDeserializedObjectsForBlackAllocation();
// There's no code deserialized here. If this assert fires then that's
// changed and logging should be added to notify the profiler et al of the
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
- if (FLAG_rehash_snapshot && can_rehash()) RehashContext(Context::cast(root));
+ if (FLAG_rehash_snapshot && can_rehash()) Rehash();
return Handle<Object>(root, isolate);
}
@@ -67,8 +69,8 @@ void PartialDeserializer::DeserializeEmbedderFields(
code = source()->Get()) {
HandleScope scope(isolate());
int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
+ DCHECK_LE(space, kNumberOfSpaces);
+ DCHECK_EQ(code - space, kNewObject);
Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
isolate());
int index = source()->GetInt();
@@ -82,13 +84,5 @@ void PartialDeserializer::DeserializeEmbedderFields(
delete[] data;
}
}
-
-void PartialDeserializer::RehashContext(Context* context) {
- DCHECK(can_rehash());
- for (const auto& array : transition_arrays()) array->Sort();
- context->global_object()->global_dictionary()->Rehash();
- SortMapDescriptors();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/partial-deserializer.h b/deps/v8/src/snapshot/partial-deserializer.h
index fec28ce0af..bbc55b7b51 100644
--- a/deps/v8/src/snapshot/partial-deserializer.h
+++ b/deps/v8/src/snapshot/partial-deserializer.h
@@ -15,7 +15,7 @@ class Context;
// Deserializes the context-dependent object graph rooted at a given object.
// The PartialDeserializer is not expected to deserialize any code objects.
-class PartialDeserializer final : public Deserializer {
+class PartialDeserializer final : public Deserializer<> {
public:
static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
@@ -33,9 +33,6 @@ class PartialDeserializer final : public Deserializer {
void DeserializeEmbedderFields(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
-
- // Rehash after deserializing a context.
- void RehashContext(Context* context);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index cae28234c1..11b21a17b3 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -17,7 +17,6 @@ PartialSerializer::PartialSerializer(
: Serializer(isolate),
startup_serializer_(startup_serializer),
serialize_embedder_fields_(callback),
- rehashable_global_dictionary_(nullptr),
can_be_rehashed_(true) {
InitializeCodeAddressMap();
}
@@ -42,8 +41,6 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
// Reset math random cache to get fresh random numbers.
context->set_math_random_index(Smi::kZero);
context->set_math_random_cache(isolate()->heap()->undefined_value());
- DCHECK_NULL(rehashable_global_dictionary_);
- rehashable_global_dictionary_ = context->global_object()->global_dictionary();
VisitRootPointer(Root::kPartialSnapshotCache, o);
SerializeDeferredObjects();
@@ -53,6 +50,8 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
+ DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
+
BuiltinReferenceSerializationMode mode =
startup_serializer_->clear_function_code() ? kCanonicalizeCompileLazy
: kDefault;
@@ -102,7 +101,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
- if (obj->IsHashTable()) CheckRehashability(obj);
+ CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
@@ -153,17 +152,10 @@ void PartialSerializer::SerializeEmbedderFields() {
sink_.Put(kSynchronize, "Finished with embedder fields data");
}
-void PartialSerializer::CheckRehashability(HeapObject* table) {
- DCHECK(table->IsHashTable());
+void PartialSerializer::CheckRehashability(HeapObject* obj) {
if (!can_be_rehashed_) return;
- if (table->IsUnseededNumberDictionary()) return;
- if (table->IsOrderedHashMap() &&
- OrderedHashMap::cast(table)->NumberOfElements() == 0) {
- return;
- }
- // We can only correctly rehash if the global dictionary is the only hash
- // table that we deserialize.
- if (table == rehashable_global_dictionary_) return;
+ if (!obj->NeedsRehashing()) return;
+ if (obj->CanBeRehashed()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 6eb8b91436..b436c40cbe 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -33,12 +33,11 @@ class PartialSerializer : public Serializer<> {
void SerializeEmbedderFields();
- void CheckRehashability(HeapObject* table);
+ void CheckRehashability(HeapObject* obj);
StartupSerializer* startup_serializer_;
std::vector<JSObject*> embedder_field_holders_;
v8::SerializeEmbedderFieldsCallback serialize_embedder_fields_;
- GlobalDictionary* rehashable_global_dictionary_;
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index ec7b7b25c7..f201342105 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -106,7 +106,7 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
}
bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
- return !o->IsString() && !o->IsScript();
+ return !o->IsString() && !o->IsScript() && !o->IsJSTypedArray();
}
void SerializerDeserializer::RestoreExternalReferenceRedirectors(
@@ -118,5 +118,13 @@ void SerializerDeserializer::RestoreExternalReferenceRedirectors(
}
}
+void SerializerDeserializer::RestoreExternalReferenceRedirectors(
+ const std::vector<CallHandlerInfo*>& call_handler_infos) {
+ for (CallHandlerInfo* info : call_handler_infos) {
+ Foreign::cast(info->js_callback())
+ ->set_foreign_address(info->redirected_callback());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index f753402d15..6482c350f7 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -57,7 +57,7 @@ class ExternalReferenceEncoder {
class HotObjectsList {
public:
HotObjectsList() : index_(0) {
- for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
+ for (int i = 0; i < kSize; i++) circular_queue_[i] = nullptr;
}
void Add(HeapObject* object) {
@@ -111,6 +111,8 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<AccessorInfo*>& accessor_infos);
+ void RestoreExternalReferenceRedirectors(
+ const std::vector<CallHandlerInfo*>& call_handler_infos);
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
@@ -192,9 +194,8 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-allocated backing stores for TypedArrays.
static const int kOffHeapBackingStore = 0x1c;
- // Used to encode deoptimizer entry code.
- static const int kDeoptimizerEntryPlain = 0x1d;
- static const int kDeoptimizerEntryFromCode = 0x1e;
+ // 0x1d, 0x1e unused.
+
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
@@ -268,7 +269,7 @@ class SerializedData {
SerializedData(byte* data, int size)
: data_(data), size_(size), owns_data_(false) {}
- SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
+ SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
SerializedData(SerializedData&& other)
: data_(other.data_), size_(other.size_), owns_data_(other.owns_data_) {
// Ensure |other| will not attempt to destroy our data in destructor.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 9db7d798a5..fd96850890 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -5,7 +5,10 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
+#include "src/interpreter/interpreter.h"
+#include "src/objects/code.h"
#include "src/objects/map.h"
+#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/natives.h"
namespace v8 {
@@ -26,17 +29,17 @@ Serializer<AllocatorT>::Serializer(Isolate* isolate)
instance_type_size_[i] = 0;
}
} else {
- instance_type_count_ = NULL;
- instance_type_size_ = NULL;
+ instance_type_count_ = nullptr;
+ instance_type_size_ = nullptr;
}
#endif // OBJECT_PRINT
}
template <class AllocatorT>
Serializer<AllocatorT>::~Serializer() {
- if (code_address_map_ != NULL) delete code_address_map_;
+ if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
- if (instance_type_count_ != NULL) {
+ if (instance_type_count_ != nullptr) {
DeleteArray(instance_type_count_);
DeleteArray(instance_type_size_);
}
@@ -91,6 +94,10 @@ bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
template <class AllocatorT>
void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
Object** end) {
+ // Builtins and bytecode handlers are serialized in a separate pass by the
+ // BuiltinSerializer.
+ if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
+
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
PutSmi(Smi::cast(*current));
@@ -207,6 +214,14 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
}
template <class AllocatorT>
+bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
+ if (!obj->IsCode()) return false;
+ Code* code = Code::cast(obj);
+ if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
+ return (code->kind() == Code::BYTECODE_HANDLER);
+}
+
+template <class AllocatorT>
void Serializer<AllocatorT>::PutRoot(
int root_index, HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
@@ -379,19 +394,44 @@ int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
return static_cast<int32_t>(reference.off_heap_backing_store_index());
}
-// When a JSArrayBuffer is neutered, the FixedTypedArray that points to the
-// same backing store does not know anything about it. This fixup step finds
-// neutered TypedArrays and clears the values in the FixedTypedArray so that
-// we don't try to serialize the now invalid backing store.
template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::FixupIfNeutered() {
- JSTypedArray* array = JSTypedArray::cast(object_);
- if (!array->WasNeutered()) return;
+void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
+ JSTypedArray* typed_array = JSTypedArray::cast(object_);
+ FixedTypedArrayBase* elements =
+ FixedTypedArrayBase::cast(typed_array->elements());
+
+ if (!typed_array->WasNeutered()) {
+ bool off_heap = elements->base_pointer() == nullptr;
+
+ if (off_heap) {
+ // Explicitly serialize the backing store now.
+ JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
+ CHECK(buffer->byte_length()->IsSmi());
+ CHECK(typed_array->byte_offset()->IsSmi());
+ int32_t byte_length = NumberToInt32(buffer->byte_length());
+ int32_t byte_offset = NumberToInt32(typed_array->byte_offset());
- FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(array->elements());
- DCHECK(fta->base_pointer() == nullptr);
- fta->set_external_pointer(Smi::kZero);
- fta->set_length(0);
+ // We need to calculate the backing store from the external pointer
+ // because the ArrayBuffer may already have been serialized.
+ void* backing_store = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(elements->external_pointer()) -
+ byte_offset);
+ int32_t ref = SerializeBackingStore(backing_store, byte_length);
+
+ // The external_pointer is the backing_store + typed_array->byte_offset.
+ // To properly share the buffer, we set the backing store ref here. On
+ // deserialization we re-add the byte_offset to external_pointer.
+ elements->set_external_pointer(Smi::FromInt(ref));
+ }
+ } else {
+ // When a JSArrayBuffer is neutered, the FixedTypedArray that points to the
+ // same backing store does not know anything about it. This fixup step finds
+ // neutered TypedArrays and clears the values in the FixedTypedArray so that
+ // we don't try to serialize the now invalid backing store.
+ elements->set_external_pointer(Smi::kZero);
+ elements->set_length(0);
+ }
+ SerializeObject();
}
template <class AllocatorT>
@@ -412,26 +452,6 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
}
template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeFixedTypedArray() {
- FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(object_);
- void* backing_store = fta->DataPtr();
- // We cannot store byte_length larger than Smi range in the snapshot.
- CHECK(fta->ByteLength() < Smi::kMaxValue);
- int32_t byte_length = static_cast<int32_t>(fta->ByteLength());
-
- // The heap contains empty FixedTypedArrays for each type, with a byte_length
- // of 0 (e.g. empty_fixed_uint8_array). These look like they are are 'on-heap'
- // but have no data to copy, so we skip the backing store here.
-
- // The embedder-allocated backing store only exists for the off-heap case.
- if (byte_length > 0 && fta->base_pointer() == nullptr) {
- int32_t ref = SerializeBackingStore(backing_store, byte_length);
- fta->set_external_pointer(Smi::FromInt(ref));
- }
- SerializeObject();
-}
-
-template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
if (object_->map() != heap->native_source_string_map()) {
@@ -559,16 +579,13 @@ void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
SeqTwoByteString::cast(object_)->clear_padding();
}
if (object_->IsJSTypedArray()) {
- FixupIfNeutered();
+ SerializeJSTypedArray();
+ return;
}
if (object_->IsJSArrayBuffer()) {
SerializeJSArrayBuffer();
return;
}
- if (object_->IsFixedTypedArrayBase()) {
- SerializeFixedTypedArray();
- return;
- }
// We don't expect fillers.
DCHECK(!object_->IsFiller());
@@ -795,7 +812,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
bytes_processed_so_far_ += to_skip;
- DCHECK(to_skip >= 0);
+ DCHECK_GE(to_skip, 0);
if (bytes_to_output != 0) {
DCHECK(to_skip == bytes_to_output);
if (IsAligned(bytes_to_output, kPointerAlignment) &&
@@ -810,7 +827,22 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
// Check that we do not serialize uninitialized memory.
__msan_check_mem_is_initialized(object_start + base, bytes_to_output);
#endif // MEMORY_SANITIZER
- sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ if (object_->IsBytecodeArray()) {
+ // The code age byte can be changed concurrently by GC.
+ const int bytes_to_age_byte = BytecodeArray::kBytecodeAgeOffset - base;
+ if (0 <= bytes_to_age_byte && bytes_to_age_byte < bytes_to_output) {
+ sink_->PutRaw(object_start + base, bytes_to_age_byte, "Bytes");
+ byte bytecode_age = BytecodeArray::kNoAgeBytecodeAge;
+ sink_->PutRaw(&bytecode_age, 1, "Bytes");
+ const int bytes_written = bytes_to_age_byte + 1;
+ sink_->PutRaw(object_start + base + bytes_written,
+ bytes_to_output - bytes_written, "Bytes");
+ } else {
+ sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ }
+ } else {
+ sink_->PutRaw(object_start + base, bytes_to_output, "Bytes");
+ }
}
}
@@ -822,7 +854,7 @@ int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
bytes_processed_so_far_ += to_skip;
// This assert will fail if the reloc info gives us the target_address_address
// locations in a non-ascending order. Luckily that doesn't happen.
- DCHECK(to_skip >= 0);
+ DCHECK_GE(to_skip, 0);
return to_skip;
}
@@ -863,6 +895,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
}
// Explicit instantiation.
+template class Serializer<BuiltinSerializerAllocator>;
template class Serializer<DefaultSerializerAllocator>;
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 1fe607b530..eda25fbd35 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -44,7 +44,7 @@ class CodeAddressMap : public CodeEventLogger {
NameMap() : impl_() {}
~NameMap() {
- for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
+ for (base::HashMap::Entry* p = impl_.Start(); p != nullptr;
p = impl_.Next(p)) {
DeleteArray(static_cast<const char*>(p->value));
}
@@ -52,19 +52,20 @@ class CodeAddressMap : public CodeEventLogger {
void Insert(Address code_address, const char* name, int name_size) {
base::HashMap::Entry* entry = FindOrCreateEntry(code_address);
- if (entry->value == NULL) {
+ if (entry->value == nullptr) {
entry->value = CopyName(name, name_size);
}
}
const char* Lookup(Address code_address) {
base::HashMap::Entry* entry = FindEntry(code_address);
- return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+ return (entry != nullptr) ? static_cast<const char*>(entry->value)
+ : nullptr;
}
void Remove(Address code_address) {
base::HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) {
+ if (entry != nullptr) {
DeleteArray(static_cast<char*>(entry->value));
RemoveEntry(entry);
}
@@ -73,11 +74,11 @@ class CodeAddressMap : public CodeEventLogger {
void Move(Address from, Address to) {
if (from == to) return;
base::HashMap::Entry* from_entry = FindEntry(from);
- DCHECK(from_entry != NULL);
+ DCHECK_NOT_NULL(from_entry);
void* value = from_entry->value;
RemoveEntry(from_entry);
base::HashMap::Entry* to_entry = FindOrCreateEntry(to);
- DCHECK(to_entry->value == NULL);
+ DCHECK_NULL(to_entry->value);
to_entry->value = value;
}
@@ -193,6 +194,9 @@ class Serializer : public SerializerDeserializer {
HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
int skip, BuiltinReferenceSerializationMode mode = kDefault);
+ // Returns true if the given heap object is a bytecode handler code object.
+ bool ObjectIsBytecodeHandler(HeapObject* obj) const;
+
inline void FlushSkip(int skip) {
if (skip != 0) {
sink_.Put(kSkip, "SkipFromSerializeObject");
@@ -303,9 +307,8 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
void OutputCode(int size);
int SkipTo(Address to);
int32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
- void FixupIfNeutered();
+ void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
- void SerializeFixedTypedArray();
void SerializeExternalString();
void SerializeExternalStringAsSequentialString();
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index da528a50ba..e7efd87bd8 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -96,11 +96,8 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
- builtin_deserializer.ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
-
- DisallowHeapAllocation no_gc;
-
Code* code = builtin_deserializer.DeserializeBuiltin(builtin_id);
DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
@@ -111,6 +108,40 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
Builtins::name(builtin_id), bytes, ms);
}
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ isolate->logger()->LogCodeObject(code);
+ }
+
+ return code;
+}
+
+// static
+Code* Snapshot::DeserializeHandler(Isolate* isolate,
+ interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
+ const v8::StartupData* blob = isolate->snapshot_blob();
+ Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
+ BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
+ BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
+ Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int bytes = code->Size();
+ PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
+ interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
+ bytes, ms);
+ }
+
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ isolate->logger()->LogCodeObject(code);
+ }
+
return code;
}
@@ -349,7 +380,8 @@ Vector<const byte> BuiltinSnapshotData::Payload() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
- int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ const int builtin_offsets_size =
+ BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
@@ -360,13 +392,15 @@ Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
uint32_t reservations_size =
GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
- int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
+ const int builtin_offsets_size =
+ BuiltinSnapshotUtils::kNumberOfCodeObjects * kUInt32Size;
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + payload_length);
DCHECK_GT(payload_length, builtin_offsets_size);
const uint32_t* data = reinterpret_cast<const uint32_t*>(
payload + payload_length - builtin_offsets_size);
- return Vector<const uint32_t>(data, Builtins::builtin_count);
+ return Vector<const uint32_t>(data,
+ BuiltinSnapshotUtils::kNumberOfCodeObjects);
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index 35cb6c38f5..a13f2e8870 100644
--- a/deps/v8/src/snapshot/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -21,7 +21,6 @@ void ReadNatives() {}
void DisposeNatives() {}
#endif // V8_USE_EXTERNAL_STARTUP_DATA
-
-const v8::StartupData* Snapshot::DefaultSnapshotBlob() { return NULL; }
+const v8::StartupData* Snapshot::DefaultSnapshotBlob() { return nullptr; }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index 67dcb60f0b..fc68a85c5f 100644
--- a/deps/v8/src/snapshot/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -20,14 +20,14 @@ namespace v8 {
namespace internal {
static base::LazyMutex external_startup_data_mutex = LAZY_MUTEX_INITIALIZER;
-static v8::StartupData external_startup_blob = {NULL, 0};
+static v8::StartupData external_startup_blob = {nullptr, 0};
void SetSnapshotFromFile(StartupData* snapshot_blob) {
base::LockGuard<base::Mutex> lock_guard(
external_startup_data_mutex.Pointer());
DCHECK(snapshot_blob);
DCHECK(snapshot_blob->data);
- DCHECK(snapshot_blob->raw_size > 0);
+ DCHECK_GT(snapshot_blob->raw_size, 0);
DCHECK(!external_startup_blob.data);
DCHECK(Snapshot::SnapshotIsValid(snapshot_blob));
external_startup_blob = *snapshot_blob;
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 66210be709..77b19d51a1 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
- DCHECK(integer < 1 << 30);
+ DCHECK_LT(integer, 1 << 30);
integer <<= 2;
int bytes = 1;
if (integer > 0xff) bytes = 2;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 0c639d4c53..2ffe5b6086 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -98,6 +98,12 @@ class Snapshot : public AllStatic {
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
+ // Deserializes a single given handler code object. Intended to be called at
+ // runtime after the isolate has been fully initialized.
+ static Code* DeserializeHandler(Isolate* isolate,
+ interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale);
+
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index a6e9d6a203..91432e185a 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -18,7 +18,8 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
- if (!Deserializer::ReserveSpace(this, &builtin_deserializer)) {
+ if (!DefaultDeserializerAllocator::ReserveSpace(this,
+ &builtin_deserializer)) {
V8::FatalProcessOutOfMemory("StartupDeserializer");
}
@@ -33,18 +34,17 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
-
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate->heap()->RepairFreeListsAfterDeserialization();
isolate->heap()->IterateWeakRoots(this, VISIT_ALL);
DeserializeDeferredObjects();
RestoreExternalReferenceRedirectors(accessor_infos());
+ RestoreExternalReferenceRedirectors(call_handler_infos());
// Deserialize eager builtins from the builtin snapshot. Note that deferred
// objects must have been deserialized prior to this.
- builtin_deserializer.DeserializeEagerBuiltins();
+ builtin_deserializer.DeserializeEagerBuiltinsAndHandlers();
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
@@ -71,7 +71,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// to display the builtin names.
PrintDisassembledCodeObjects();
- if (FLAG_rehash_snapshot && can_rehash()) Rehash();
+ if (FLAG_rehash_snapshot && can_rehash()) RehashHeap();
}
void StartupDeserializer::FlushICacheForNewIsolate() {
@@ -93,7 +93,7 @@ void StartupDeserializer::PrintDisassembledCodeObjects() {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
- for (HeapObject* obj = iterator.next(); obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
if (obj->IsCode()) {
Code::cast(obj)->Disassemble(nullptr, os);
@@ -103,12 +103,10 @@ void StartupDeserializer::PrintDisassembledCodeObjects() {
#endif
}
-void StartupDeserializer::Rehash() {
+void StartupDeserializer::RehashHeap() {
DCHECK(FLAG_rehash_snapshot && can_rehash());
isolate()->heap()->InitializeHashSeed();
- isolate()->heap()->string_table()->Rehash();
- isolate()->heap()->weak_object_to_code_table()->Rehash();
- SortMapDescriptors();
+ Rehash();
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index 269ac8b555..6e1b5db332 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
-class StartupDeserializer final : public Deserializer {
+class StartupDeserializer final : public Deserializer<> {
public:
StartupDeserializer(const SnapshotData* startup_data,
const BuiltinSnapshotData* builtin_data)
@@ -26,7 +26,7 @@ class StartupDeserializer final : public Deserializer {
void PrintDisassembledCodeObjects();
// Rehash after deserializing an isolate.
- void Rehash();
+ void RehashHeap();
const BuiltinSnapshotData* builtin_data_;
};
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 8fec389ee9..8b4a79b8b1 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -17,18 +17,19 @@ StartupSerializer::StartupSerializer(
: Serializer(isolate),
clear_function_code_(function_code_handling ==
v8::SnapshotCreator::FunctionCodeHandling::kClear),
- serializing_builtins_(false),
can_be_rehashed_(true) {
InitializeCodeAddressMap();
}
StartupSerializer::~StartupSerializer() {
RestoreExternalReferenceRedirectors(accessor_infos_);
+ RestoreExternalReferenceRedirectors(call_handler_infos_);
OutputStatistics("StartupSerializer");
}
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
+ DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
DCHECK(!obj->IsJSFunction());
if (clear_function_code() && obj->IsBytecodeArray()) {
@@ -36,9 +37,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
BuiltinReferenceSerializationMode mode =
- (clear_function_code() && !serializing_builtins_)
- ? kCanonicalizeCompileLazy
- : kDefault;
+ clear_function_code() ? kCanonicalizeCompileLazy : kDefault;
if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip, mode)) {
return;
}
@@ -64,6 +63,13 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Address original_address = Foreign::cast(info->getter())->foreign_address();
Foreign::cast(info->js_getter())->set_foreign_address(original_address);
accessor_infos_.push_back(info);
+ } else if (isolate()->external_reference_redirector() &&
+ obj->IsCallHandlerInfo()) {
+ CallHandlerInfo* info = CallHandlerInfo::cast(obj);
+ Address original_address =
+ Foreign::cast(info->callback())->foreign_address();
+ Foreign::cast(info->js_callback())->set_foreign_address(original_address);
+ call_handler_infos_.push_back(info);
} else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
Script::cast(obj)->set_context_data(
isolate()->heap()->uninitialized_symbol());
@@ -75,7 +81,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
- if (obj->IsHashTable()) CheckRehashability(obj);
+ CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
@@ -107,9 +113,6 @@ int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
}
void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
- // We expect the builtins tag after builtins have been serialized.
- DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
- serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
sink_.Put(kSynchronize, "Synchronize");
}
@@ -121,19 +124,13 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK(isolate->handle_scope_implementer()->blocks()->empty());
CHECK_EQ(0, isolate->global_handles()->global_handles_count());
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
- // First visit immortal immovables to make sure they end up in the first page.
- serializing_immortal_immovables_roots_ = true;
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
- // Check that immortal immovable roots are allocated on the first page.
- DCHECK(allocator()->HasNotExceededFirstPageOfEachSpace());
- serializing_immortal_immovables_roots_ = false;
- // Visit the rest of the strong roots.
+ // Visit smi roots.
// Clear the stack limits to make the snapshot reproducible.
// Reset it again afterwards.
isolate->heap()->ClearStackLimits();
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->SetStackLimits();
-
+ // First visit immortal immovables to make sure they end up in the first page.
isolate->heap()->IterateStrongRoots(this,
VISIT_ONLY_STRONG_FOR_SERIALIZATION);
}
@@ -149,20 +146,15 @@ void StartupSerializer::VisitRootPointers(Root root, Object** start,
int skip = 0;
for (Object** current = start; current < end; current++) {
int root_index = static_cast<int>(current - start);
- if (RootShouldBeSkipped(root_index)) {
- skip += kPointerSize;
- continue;
+ if ((*current)->IsSmi()) {
+ FlushSkip(skip);
+ PutSmi(Smi::cast(*current));
} else {
- if ((*current)->IsSmi()) {
- FlushSkip(skip);
- PutSmi(Smi::cast(*current));
- } else {
- SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
- skip);
- }
- root_has_been_serialized_.set(root_index);
- skip = 0;
+ SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
+ skip);
}
+ root_has_been_serialized_.set(root_index);
+ skip = 0;
}
FlushSkip(skip);
} else {
@@ -170,26 +162,10 @@ void StartupSerializer::VisitRootPointers(Root root, Object** start,
}
}
-bool StartupSerializer::RootShouldBeSkipped(int root_index) {
- if (root_index == Heap::kStackLimitRootIndex ||
- root_index == Heap::kRealStackLimitRootIndex) {
- return true;
- }
- return Heap::RootIsImmortalImmovable(root_index) !=
- serializing_immortal_immovables_roots_;
-}
-
-void StartupSerializer::CheckRehashability(HeapObject* table) {
- DCHECK(table->IsHashTable());
+void StartupSerializer::CheckRehashability(HeapObject* obj) {
if (!can_be_rehashed_) return;
- // We can only correctly rehash if the four hash tables below are the only
- // ones that we deserialize.
- if (table->IsUnseededNumberDictionary()) return;
- if (table == isolate()->heap()->empty_ordered_hash_table()) return;
- if (table == isolate()->heap()->empty_slow_element_dictionary()) return;
- if (table == isolate()->heap()->empty_property_dictionary()) return;
- if (table == isolate()->heap()->weak_object_to_code_table()) return;
- if (table == isolate()->heap()->string_table()) return;
+ if (!obj->NeedsRehashing()) return;
+ if (obj->CanBeRehashed()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 9c575adbe1..69985388e9 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -69,20 +69,13 @@ class StartupSerializer : public Serializer<> {
void Synchronize(VisitorSynchronization::SyncTag tag) override;
bool MustBeDeferred(HeapObject* object) override;
- // Some roots should not be serialized, because their actual value depends on
- // absolute addresses and they are reset after deserialization, anyway.
- // In the first pass over the root list, we only serialize immortal immovable
- // roots. In the second pass, we serialize the rest.
- bool RootShouldBeSkipped(int root_index);
-
- void CheckRehashability(HeapObject* hashtable);
+ void CheckRehashability(HeapObject* obj);
const bool clear_function_code_;
- bool serializing_builtins_;
- bool serializing_immortal_immovables_roots_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
std::vector<AccessorInfo*> accessor_infos_;
+ std::vector<CallHandlerInfo*> call_handler_infos_;
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;