diff options
Diffstat (limited to 'deps/v8/src/mark-compact.cc')
-rw-r--r-- | deps/v8/src/mark-compact.cc | 762 |
1 files changed, 210 insertions, 552 deletions
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index dcce8daddf..8ca14db506 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -62,24 +62,24 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT sweep_precisely_(false), reduce_memory_footprint_(false), abort_incremental_marking_(false), + marking_parity_(ODD_MARKING_PARITY), compacting_(false), was_marked_incrementally_(false), tracer_(NULL), migration_slots_buffer_(NULL), heap_(NULL), code_flusher_(NULL), - encountered_weak_maps_(NULL), - marker_(this, this) { } + encountered_weak_maps_(NULL) { } -#ifdef DEBUG +#ifdef VERIFY_HEAP class VerifyMarkingVisitor: public ObjectVisitor { public: void VisitPointers(Object** start, Object** end) { for (Object** current = start; current < end; current++) { if ((*current)->IsHeapObject()) { HeapObject* object = HeapObject::cast(*current); - ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); + CHECK(HEAP->mark_compact_collector()->IsMarked(object)); } } } @@ -96,7 +96,7 @@ static void VerifyMarking(Address bottom, Address top) { current += kPointerSize) { object = HeapObject::FromAddress(current); if (MarkCompactCollector::IsMarked(object)) { - ASSERT(current >= next_object_must_be_here_or_later); + CHECK(current >= next_object_must_be_here_or_later); object->Iterate(&visitor); next_object_must_be_here_or_later = current + object->Size(); } @@ -109,12 +109,12 @@ static void VerifyMarking(NewSpace* space) { NewSpacePageIterator it(space->bottom(), end); // The bottom position is at the start of its page. Allows us to use // page->area_start() as start of range on all pages. - ASSERT_EQ(space->bottom(), + CHECK_EQ(space->bottom(), NewSpacePage::FromAddress(space->bottom())->area_start()); while (it.has_next()) { NewSpacePage* page = it.next(); Address limit = it.has_next() ? page->area_end() : end; - ASSERT(limit == end || !page->Contains(end)); + CHECK(limit == end || !page->Contains(end)); VerifyMarking(page->area_start(), limit); } } @@ -174,7 +174,7 @@ static void VerifyEvacuation(Address bottom, Address top) { current += kPointerSize) { object = HeapObject::FromAddress(current); if (MarkCompactCollector::IsMarked(object)) { - ASSERT(current >= next_object_must_be_here_or_later); + CHECK(current >= next_object_must_be_here_or_later); object->Iterate(&visitor); next_object_must_be_here_or_later = current + object->Size(); } @@ -190,7 +190,7 @@ static void VerifyEvacuation(NewSpace* space) { NewSpacePage* page = it.next(); Address current = page->area_start(); Address limit = it.has_next() ? page->area_end() : space->top(); - ASSERT(limit == space->top() || !page->Contains(space->top())); + CHECK(limit == space->top() || !page->Contains(space->top())); while (current < limit) { HeapObject* object = HeapObject::FromAddress(current); object->Iterate(&visitor); @@ -222,8 +222,10 @@ static void VerifyEvacuation(Heap* heap) { VerifyEvacuationVisitor visitor; heap->IterateStrongRoots(&visitor, VISIT_ALL); } +#endif // VERIFY_HEAP +#ifdef DEBUG class VerifyNativeContextSeparationVisitor: public ObjectVisitor { public: VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {} @@ -348,7 +350,9 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) { CollectEvacuationCandidates(heap()->old_pointer_space()); CollectEvacuationCandidates(heap()->old_data_space()); - if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) { + if (FLAG_compact_code_space && + (mode == NON_INCREMENTAL_COMPACTION || + FLAG_incremental_code_compaction)) { CollectEvacuationCandidates(heap()->code_space()); } else if (FLAG_trace_fragmentation) { TraceFragmentation(heap()->code_space()); @@ -383,7 +387,7 @@ void MarkCompactCollector::CollectGarbage() { ClearWeakMaps(); -#ifdef DEBUG +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { VerifyMarking(heap_); } @@ -401,11 +405,18 @@ void MarkCompactCollector::CollectGarbage() { Finish(); + if (marking_parity_ == EVEN_MARKING_PARITY) { + marking_parity_ = ODD_MARKING_PARITY; + } else { + ASSERT(marking_parity_ == ODD_MARKING_PARITY); + marking_parity_ = EVEN_MARKING_PARITY; + } + tracer_ = NULL; } -#ifdef DEBUG +#ifdef VERIFY_HEAP void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { PageIterator it(space); @@ -416,6 +427,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { } } + void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { NewSpacePageIterator it(space->bottom(), space->top()); @@ -426,6 +438,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { } } + void MarkCompactCollector::VerifyMarkbitsAreClean() { VerifyMarkbitsAreClean(heap_->old_pointer_space()); VerifyMarkbitsAreClean(heap_->old_data_space()); @@ -437,11 +450,11 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() { LargeObjectIterator it(heap_->lo_space()); for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { MarkBit mark_bit = Marking::MarkBitFrom(obj); - ASSERT(Marking::IsWhite(mark_bit)); - ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); + CHECK(Marking::IsWhite(mark_bit)); + CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); } } -#endif +#endif // VERIFY_HEAP static void ClearMarkbitsInPagedSpace(PagedSpace* space) { @@ -475,6 +488,7 @@ void MarkCompactCollector::ClearMarkbits() { MarkBit mark_bit = Marking::MarkBitFrom(obj); mark_bit.Clear(); mark_bit.Next().Clear(); + Page::FromAddress(obj->address())->ResetProgressBar(); Page::FromAddress(obj->address())->ResetLiveBytes(); } } @@ -803,7 +817,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { space->PrepareForMarkCompact(); } -#ifdef DEBUG +#ifdef VERIFY_HEAP if (!was_marked_incrementally_ && FLAG_verify_heap) { VerifyMarkbitsAreClean(); } @@ -854,133 +868,144 @@ void MarkCompactCollector::Finish() { // and continue with marking. This process repeats until all reachable // objects have been marked. -class CodeFlusher { - public: - explicit CodeFlusher(Isolate* isolate) - : isolate_(isolate), - jsfunction_candidates_head_(NULL), - shared_function_info_candidates_head_(NULL) {} +void CodeFlusher::ProcessJSFunctionCandidates() { + Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); + Object* undefined = isolate_->heap()->undefined_value(); - void AddCandidate(SharedFunctionInfo* shared_info) { - SetNextCandidate(shared_info, shared_function_info_candidates_head_); - shared_function_info_candidates_head_ = shared_info; - } + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + ClearNextCandidate(candidate, undefined); - void AddCandidate(JSFunction* function) { - ASSERT(function->code() == function->shared()->code()); + SharedFunctionInfo* shared = candidate->shared(); - SetNextCandidate(function, jsfunction_candidates_head_); - jsfunction_candidates_head_ = function; - } + Code* code = shared->code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + if (!code_mark.Get()) { + shared->set_code(lazy_compile); + candidate->set_code(lazy_compile); + } else if (code == lazy_compile) { + candidate->set_code(lazy_compile); + } - void ProcessCandidates() { - ProcessSharedFunctionInfoCandidates(); - ProcessJSFunctionCandidates(); - } + // We are in the middle of a GC cycle so the write barrier in the code + // setter did not record the slot update and we have to do that manually. + Address slot = candidate->address() + JSFunction::kCodeEntryOffset; + Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); + isolate_->heap()->mark_compact_collector()-> + RecordCodeEntrySlot(slot, target); - private: - void ProcessJSFunctionCandidates() { - Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); + Object** shared_code_slot = + HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset); + isolate_->heap()->mark_compact_collector()-> + RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot); - JSFunction* candidate = jsfunction_candidates_head_; - JSFunction* next_candidate; - while (candidate != NULL) { - next_candidate = GetNextCandidate(candidate); + candidate = next_candidate; + } - SharedFunctionInfo* shared = candidate->shared(); + jsfunction_candidates_head_ = NULL; +} - Code* code = shared->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); - if (!code_mark.Get()) { - shared->set_code(lazy_compile); - candidate->set_code(lazy_compile); - } else { - candidate->set_code(shared->code()); - } - // We are in the middle of a GC cycle so the write barrier in the code - // setter did not record the slot update and we have to do that manually. - Address slot = candidate->address() + JSFunction::kCodeEntryOffset; - Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); - isolate_->heap()->mark_compact_collector()-> - RecordCodeEntrySlot(slot, target); +void CodeFlusher::ProcessSharedFunctionInfoCandidates() { + Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); - RecordSharedFunctionInfoCodeSlot(shared); + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + ClearNextCandidate(candidate); - candidate = next_candidate; + Code* code = candidate->code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + if (!code_mark.Get()) { + candidate->set_code(lazy_compile); } - jsfunction_candidates_head_ = NULL; + Object** code_slot = + HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset); + isolate_->heap()->mark_compact_collector()-> + RecordSlot(code_slot, code_slot, *code_slot); + + candidate = next_candidate; } + shared_function_info_candidates_head_ = NULL; +} - void ProcessSharedFunctionInfoCandidates() { - Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); - SharedFunctionInfo* candidate = shared_function_info_candidates_head_; - SharedFunctionInfo* next_candidate; +void CodeFlusher::EvictCandidate(JSFunction* function) { + ASSERT(!function->next_function_link()->IsUndefined()); + Object* undefined = isolate_->heap()->undefined_value(); + + // The function is no longer a candidate, make sure it gets visited + // again so that previous flushing decisions are revisited. + isolate_->heap()->incremental_marking()->RecordWrites(function); + + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + if (candidate == function) { + next_candidate = GetNextCandidate(function); + jsfunction_candidates_head_ = next_candidate; + ClearNextCandidate(function, undefined); + } else { while (candidate != NULL) { next_candidate = GetNextCandidate(candidate); - SetNextCandidate(candidate, NULL); - Code* code = candidate->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); - if (!code_mark.Get()) { - candidate->set_code(lazy_compile); + if (next_candidate == function) { + next_candidate = GetNextCandidate(function); + SetNextCandidate(candidate, next_candidate); + ClearNextCandidate(function, undefined); } - RecordSharedFunctionInfoCodeSlot(candidate); - candidate = next_candidate; } - - shared_function_info_candidates_head_ = NULL; } +} - void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) { - Object** slot = HeapObject::RawField(shared, - SharedFunctionInfo::kCodeOffset); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(slot, slot, HeapObject::cast(*slot)); - } - static JSFunction** GetNextCandidateField(JSFunction* candidate) { - return reinterpret_cast<JSFunction**>( - candidate->address() + JSFunction::kCodeEntryOffset); - } +void CodeFlusher::EvictJSFunctionCandidates() { + Object* undefined = isolate_->heap()->undefined_value(); - static JSFunction* GetNextCandidate(JSFunction* candidate) { - return *GetNextCandidateField(candidate); + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + ClearNextCandidate(candidate, undefined); + candidate = next_candidate; } - static void SetNextCandidate(JSFunction* candidate, - JSFunction* next_candidate) { - *GetNextCandidateField(candidate) = next_candidate; - } + jsfunction_candidates_head_ = NULL; +} - static SharedFunctionInfo** GetNextCandidateField( - SharedFunctionInfo* candidate) { - Code* code = candidate->code(); - return reinterpret_cast<SharedFunctionInfo**>( - code->address() + Code::kGCMetadataOffset); - } - static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { - return reinterpret_cast<SharedFunctionInfo*>( - candidate->code()->gc_metadata()); +void CodeFlusher::EvictSharedFunctionInfoCandidates() { + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + ClearNextCandidate(candidate); + candidate = next_candidate; } - static void SetNextCandidate(SharedFunctionInfo* candidate, - SharedFunctionInfo* next_candidate) { - candidate->code()->set_gc_metadata(next_candidate); - } + shared_function_info_candidates_head_ = NULL; +} - Isolate* isolate_; - JSFunction* jsfunction_candidates_head_; - SharedFunctionInfo* shared_function_info_candidates_head_; - DISALLOW_COPY_AND_ASSIGN(CodeFlusher); -}; +void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { + Heap* heap = isolate_->heap(); + + JSFunction** slot = &jsfunction_candidates_head_; + JSFunction* candidate = jsfunction_candidates_head_; + while (candidate != NULL) { + if (heap->InFromSpace(candidate)) { + v->VisitPointer(reinterpret_cast<Object**>(slot)); + } + candidate = GetNextCandidate(*slot); + slot = GetNextCandidateSlot(*slot); + } +} MarkCompactCollector::~MarkCompactCollector() { @@ -1063,11 +1088,23 @@ class MarkCompactMarkingVisitor } } + // Marks the object black and pushes it on the marking stack. INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { MarkBit mark = Marking::MarkBitFrom(object); heap->mark_compact_collector()->MarkObject(object, mark); } + // Marks the object black without pushing it on the marking stack. + // Returns true if object needed marking and false otherwise. + INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (!mark_bit.Get()) { + heap->mark_compact_collector()->SetMark(object, mark_bit); + return true; + } + return false; + } + // Mark object pointed to by p. INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, Object** anchor_slot, @@ -1120,6 +1157,11 @@ class MarkCompactMarkingVisitor return true; } + INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) { + SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); + shared->BeforeVisitingPointers(); + } + static void VisitJSWeakMap(Map* map, HeapObject* object) { MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); @@ -1163,123 +1205,8 @@ class MarkCompactMarkingVisitor // Code flushing support. - // How many collections newly compiled code object will survive before being - // flushed. - static const int kCodeAgeThreshold = 5; - static const int kRegExpCodeThreshold = 5; - inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { - Object* undefined = heap->undefined_value(); - return (info->script() != undefined) && - (reinterpret_cast<Script*>(info->script())->source() != undefined); - } - - - inline static bool IsCompiled(JSFunction* function) { - return function->code() != - function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); - } - - inline static bool IsCompiled(SharedFunctionInfo* function) { - return function->code() != - function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); - } - - inline static bool IsFlushable(Heap* heap, JSFunction* function) { - SharedFunctionInfo* shared_info = function->unchecked_shared(); - - // Code is either on stack, in compilation cache or referenced - // by optimized version of function. - MarkBit code_mark = Marking::MarkBitFrom(function->code()); - if (code_mark.Get()) { - if (!Marking::MarkBitFrom(shared_info).Get()) { - shared_info->set_code_age(0); - } - return false; - } - - // We do not flush code for optimized functions. - if (function->code() != shared_info->code()) { - return false; - } - - return IsFlushable(heap, shared_info); - } - - inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { - // Code is either on stack, in compilation cache or referenced - // by optimized version of function. - MarkBit code_mark = - Marking::MarkBitFrom(shared_info->code()); - if (code_mark.Get()) { - return false; - } - - // The function must be compiled and have the source code available, - // to be able to recompile it in case we need the function again. - if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { - return false; - } - - // We never flush code for Api functions. - Object* function_data = shared_info->function_data(); - if (function_data->IsFunctionTemplateInfo()) { - return false; - } - - // Only flush code for functions. - if (shared_info->code()->kind() != Code::FUNCTION) { - return false; - } - - // Function must be lazy compilable. - if (!shared_info->allows_lazy_compilation()) { - return false; - } - - // If this is a full script wrapped in a function we do no flush the code. - if (shared_info->is_toplevel()) { - return false; - } - - // Age this shared function info. - if (shared_info->code_age() < kCodeAgeThreshold) { - shared_info->set_code_age(shared_info->code_age() + 1); - return false; - } - - return true; - } - - - static bool FlushCodeForFunction(Heap* heap, JSFunction* function) { - if (!IsFlushable(heap, function)) return false; - - // This function's code looks flushable. But we have to postpone the - // decision until we see all functions that point to the same - // SharedFunctionInfo because some of them might be optimized. - // That would make the nonoptimized version of the code nonflushable, - // because it is required for bailing out from optimized code. - heap->mark_compact_collector()->code_flusher()->AddCandidate(function); - return true; - } - - static inline bool IsValidNotBuiltinContext(Object* ctx) { - return ctx->IsContext() && - !Context::cast(ctx)->global_object()->IsJSBuiltinsObject(); - } - - - static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) { - SharedFunctionInfo::cast(object)->BeforeVisitingPointers(); - - FixedBodyVisitor<MarkCompactMarkingVisitor, - SharedFunctionInfo::BodyDescriptor, - void>::Visit(map, object); - } - - static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re, bool is_ascii) { @@ -1353,138 +1280,6 @@ class MarkCompactMarkingVisitor VisitJSRegExp(map, object); } - - static void VisitSharedFunctionInfoAndFlushCode(Map* map, - HeapObject* object) { - Heap* heap = map->GetHeap(); - SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); - if (shared->ic_age() != heap->global_ic_age()) { - shared->ResetForNewContext(heap->global_ic_age()); - } - - MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); - if (!collector->is_code_flushing_enabled()) { - VisitSharedFunctionInfoGeneric(map, object); - return; - } - VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false); - } - - - static void VisitSharedFunctionInfoAndFlushCodeGeneric( - Map* map, HeapObject* object, bool known_flush_code_candidate) { - Heap* heap = map->GetHeap(); - SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); - - shared->BeforeVisitingPointers(); - - if (!known_flush_code_candidate) { - known_flush_code_candidate = IsFlushable(heap, shared); - if (known_flush_code_candidate) { - heap->mark_compact_collector()->code_flusher()->AddCandidate(shared); - } - } - - VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate); - } - - - static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { - Heap* heap = map->GetHeap(); - MarkCompactCollector* collector = heap->mark_compact_collector(); - if (!collector->is_code_flushing_enabled()) { - VisitJSFunction(map, object); - return; - } - - JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); - // The function must have a valid context and not be a builtin. - bool flush_code_candidate = false; - if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { - flush_code_candidate = FlushCodeForFunction(heap, jsfunction); - } - - if (!flush_code_candidate) { - Code* code = jsfunction->shared()->code(); - MarkBit code_mark = Marking::MarkBitFrom(code); - collector->MarkObject(code, code_mark); - - if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) { - collector->MarkInlinedFunctionsCode(jsfunction->code()); - } - } - - VisitJSFunctionFields(map, - reinterpret_cast<JSFunction*>(object), - flush_code_candidate); - } - - - static void VisitJSFunction(Map* map, HeapObject* object) { - VisitJSFunctionFields(map, - reinterpret_cast<JSFunction*>(object), - false); - } - - - static inline void VisitJSFunctionFields(Map* map, - JSFunction* object, - bool flush_code_candidate) { - Heap* heap = map->GetHeap(); - - VisitPointers(heap, - HeapObject::RawField(object, JSFunction::kPropertiesOffset), - HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); - - if (!flush_code_candidate) { - VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); - } else { - // Don't visit code object. - - // Visit shared function info to avoid double checking of it's - // flushability. - SharedFunctionInfo* shared_info = object->unchecked_shared(); - MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); - if (!shared_info_mark.Get()) { - Map* shared_info_map = shared_info->map(); - MarkBit shared_info_map_mark = - Marking::MarkBitFrom(shared_info_map); - heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); - heap->mark_compact_collector()->MarkObject(shared_info_map, - shared_info_map_mark); - VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, - shared_info, - true); - } - } - - VisitPointers( - heap, - HeapObject::RawField(object, - JSFunction::kCodeEntryOffset + kPointerSize), - HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset)); - } - - - static void VisitSharedFunctionInfoFields(Heap* heap, - HeapObject* object, - bool flush_code_candidate) { - VisitPointer(heap, - HeapObject::RawField(object, SharedFunctionInfo::kNameOffset)); - - if (!flush_code_candidate) { - VisitPointer(heap, - HeapObject::RawField(object, - SharedFunctionInfo::kCodeOffset)); - } - - VisitPointers( - heap, - HeapObject::RawField(object, - SharedFunctionInfo::kOptimizedCodeMapOffset), - HeapObject::RawField(object, SharedFunctionInfo::kSize)); - } - static VisitorDispatchTable<Callback> non_count_table_; }; @@ -1544,7 +1339,8 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< Map* map_obj = Map::cast(obj); ASSERT(map->instance_type() == MAP_TYPE); DescriptorArray* array = map_obj->instance_descriptors(); - if (array != heap->empty_descriptor_array()) { + if (map_obj->owns_descriptors() && + array != heap->empty_descriptor_array()) { int fixed_array_size = array->Size(); heap->RecordObjectStats(FIXED_ARRAY_TYPE, DESCRIPTOR_ARRAY_SUB_TYPE, @@ -1620,12 +1416,6 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< void MarkCompactMarkingVisitor::Initialize() { StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); - table_.Register(kVisitSharedFunctionInfo, - &VisitSharedFunctionInfoAndFlushCode); - - table_.Register(kVisitJSFunction, - &VisitJSFunctionAndFlushCode); - table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode); @@ -1700,26 +1490,6 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { }; -void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) { - // For optimized functions we should retain both non-optimized version - // of it's code and non-optimized version of all inlined functions. - // This is required to support bailing out from inlined code. - DeoptimizationInputData* data = - DeoptimizationInputData::cast(code->deoptimization_data()); - - FixedArray* literals = data->LiteralArray(); - - for (int i = 0, count = data->InlinedFunctionCount()->value(); - i < count; - i++) { - JSFunction* inlined = JSFunction::cast(literals->get(i)); - Code* inlined_code = inlined->shared()->code(); - MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code); - MarkObject(inlined_code, inlined_code_mark); - } -} - - void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top) { for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { @@ -1732,7 +1502,8 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, MarkBit code_mark = Marking::MarkBitFrom(code); MarkObject(code, code_mark); if (frame->is_optimized()) { - MarkInlinedFunctionsCode(frame->LookupCode()); + MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(), + frame->LookupCode()); } } } @@ -1741,21 +1512,13 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, void MarkCompactCollector::PrepareForCodeFlushing() { ASSERT(heap() == Isolate::Current()->heap()); - // TODO(1609) Currently incremental marker does not support code flushing. - if (!FLAG_flush_code || was_marked_incrementally_) { - EnableCodeFlushing(false); - return; + // Enable code flushing for non-incremental cycles. + if (FLAG_flush_code && !FLAG_flush_code_incrementally) { + EnableCodeFlushing(!was_marked_incrementally_); } -#ifdef ENABLE_DEBUGGER_SUPPORT - if (heap()->isolate()->debug()->IsLoaded() || - heap()->isolate()->debug()->has_break_points()) { - EnableCodeFlushing(false); - return; - } -#endif - - EnableCodeFlushing(true); + // If code flushing is disabled, there is no need to prepare for it. + if (!is_code_flushing_enabled()) return; // Ensure that empty descriptor array is marked. Method MarkDescriptorArray // relies on it being marked before any other descriptor array. @@ -1874,97 +1637,6 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { }; -void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { - ASSERT(IsMarked(object)); - ASSERT(HEAP->Contains(object)); - if (object->IsMap()) { - Map* map = Map::cast(object); - heap_->ClearCacheOnMap(map); - - // When map collection is enabled we have to mark through map's transitions - // in a special way to make transition links weak. Only maps for subclasses - // of JSReceiver can have transitions. - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - marker_.MarkMapContents(map); - } else { - marking_deque_.PushBlack(map); - } - } else { - marking_deque_.PushBlack(object); - } -} - - -// Force instantiation of template instances. -template void Marker<IncrementalMarking>::MarkMapContents(Map* map); -template void Marker<MarkCompactCollector>::MarkMapContents(Map* map); - - -template <class T> -void Marker<T>::MarkMapContents(Map* map) { - // Make sure that the back pointer stored either in the map itself or inside - // its transitions array is marked. Treat pointers in the transitions array as - // weak and also mark that array to prevent visiting it later. - base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer())); - - Object** transitions_slot = - HeapObject::RawField(map, Map::kTransitionsOrBackPointerOffset); - Object* transitions = *transitions_slot; - if (transitions->IsTransitionArray()) { - MarkTransitionArray(reinterpret_cast<TransitionArray*>(transitions)); - } else { - // Already marked by marking map->GetBackPointer(). - ASSERT(transitions->IsMap() || transitions->IsUndefined()); - } - - // Mark the Object* fields of the Map. Since the transitions array has been - // marked already, it is fine that one of these fields contains a pointer to - // it. - Object** start_slot = - HeapObject::RawField(map, Map::kPointerFieldsBeginOffset); - Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); - for (Object** slot = start_slot; slot < end_slot; slot++) { - Object* obj = *slot; - if (!obj->NonFailureIsHeapObject()) continue; - mark_compact_collector()->RecordSlot(start_slot, slot, obj); - base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj)); - } -} - - -template <class T> -void Marker<T>::MarkTransitionArray(TransitionArray* transitions) { - if (!base_marker()->MarkObjectWithoutPush(transitions)) return; - Object** transitions_start = transitions->data_start(); - - DescriptorArray* descriptors = transitions->descriptors(); - base_marker()->MarkObjectAndPush(descriptors); - mark_compact_collector()->RecordSlot( - transitions_start, transitions->GetDescriptorsSlot(), descriptors); - - if (transitions->HasPrototypeTransitions()) { - // Mark prototype transitions array but don't push it into marking stack. - // This will make references from it weak. We will clean dead prototype - // transitions in ClearNonLiveTransitions. - Object** proto_trans_slot = transitions->GetPrototypeTransitionsSlot(); - HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot); - base_marker()->MarkObjectWithoutPush(prototype_transitions); - mark_compact_collector()->RecordSlot( - transitions_start, proto_trans_slot, prototype_transitions); - } - - for (int i = 0; i < transitions->number_of_transitions(); ++i) { - Object** key_slot = transitions->GetKeySlot(i); - Object* key = *key_slot; - if (key->IsHeapObject()) { - base_marker()->MarkObjectAndPush(HeapObject::cast(key)); - mark_compact_collector()->RecordSlot(transitions_start, key_slot, key); - } - } -} - - // Fill the marking stack with overflowed objects returned by the given // iterator. Stop when the marking stack is filled or the end of the space // is reached, whichever comes first. @@ -2077,6 +1749,16 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { } +bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, + Object** p) { + Object* o = *p; + ASSERT(o->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(heap_object); + return !mark.Get(); +} + + void MarkCompactCollector::MarkSymbolTable() { SymbolTable* symbol_table = heap()->symbol_table(); // Mark the symbol table itself. @@ -2105,54 +1787,6 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { } -void MarkCompactCollector::MarkObjectGroups() { - List<ObjectGroup*>* object_groups = - heap()->isolate()->global_handles()->object_groups(); - - int last = 0; - for (int i = 0; i < object_groups->length(); i++) { - ObjectGroup* entry = object_groups->at(i); - ASSERT(entry != NULL); - - Object*** objects = entry->objects_; - bool group_marked = false; - for (size_t j = 0; j < entry->length_; j++) { - Object* object = *objects[j]; - if (object->IsHeapObject()) { - HeapObject* heap_object = HeapObject::cast(object); - MarkBit mark = Marking::MarkBitFrom(heap_object); - if (mark.Get()) { - group_marked = true; - break; - } - } - } - - if (!group_marked) { - (*object_groups)[last++] = entry; - continue; - } - - // An object in the group is marked, so mark as grey all white heap - // objects in the group. - for (size_t j = 0; j < entry->length_; ++j) { - Object* object = *objects[j]; - if (object->IsHeapObject()) { - HeapObject* heap_object = HeapObject::cast(object); - MarkBit mark = Marking::MarkBitFrom(heap_object); - MarkObject(heap_object, mark); - } - } - - // Once the entire group has been colored grey, set the object group - // to NULL so it won't be processed again. - entry->Dispose(); - object_groups->at(i) = NULL; - } - object_groups->Rewind(last); -} - - void MarkCompactCollector::MarkImplicitRefGroups() { List<ImplicitRefGroup*>* ref_groups = heap()->isolate()->global_handles()->implicit_ref_groups(); @@ -2271,11 +1905,12 @@ void MarkCompactCollector::ProcessMarkingDeque() { } -void MarkCompactCollector::ProcessExternalMarking() { +void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) { bool work_to_do = true; ASSERT(marking_deque_.IsEmpty()); while (work_to_do) { - MarkObjectGroups(); + heap()->isolate()->global_handles()->IterateObjectGroups( + visitor, &IsUnmarkedHeapObjectWithHeap); MarkImplicitRefGroups(); work_to_do = !marking_deque_.IsEmpty(); ProcessMarkingDeque(); @@ -2298,7 +1933,7 @@ void MarkCompactCollector::MarkLiveObjects() { // non-incremental marker can deal with them as if overflow // occured during normal marking. // But incremental marker uses a separate marking deque - // so we have to explicitly copy it's overflow state. + // so we have to explicitly copy its overflow state. incremental_marking->Finalize(); incremental_marking_overflowed = incremental_marking->marking_deque()->overflowed(); @@ -2354,7 +1989,7 @@ void MarkCompactCollector::MarkLiveObjects() { // The objects reachable from the roots are marked, yet unreachable // objects are unmarked. Mark objects reachable due to host // application specific logic. - ProcessExternalMarking(); + ProcessExternalMarking(&root_visitor); // The objects reachable from the roots or object groups are marked, // yet unreachable objects are unmarked. Mark objects reachable @@ -2373,7 +2008,7 @@ void MarkCompactCollector::MarkLiveObjects() { // Repeat host application specific marking to mark unmarked objects // reachable from the weak roots. - ProcessExternalMarking(); + ProcessExternalMarking(&root_visitor); AfterMarking(); } @@ -2407,6 +2042,11 @@ void MarkCompactCollector::AfterMarking() { // Flush code from collected candidates. if (is_code_flushing_enabled()) { code_flusher_->ProcessCandidates(); + // If incremental marker does not support code flushing, we need to + // disable it before incremental marking steps for next cycle. + if (FLAG_flush_code && !FLAG_flush_code_incrementally) { + EnableCodeFlushing(false); + } } if (!FLAG_watch_ic_patching) { @@ -2714,15 +2354,33 @@ class PointersUpdatingVisitor: public ObjectVisitor { void VisitEmbeddedPointer(RelocInfo* rinfo) { ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); Object* target = rinfo->target_object(); + Object* old_target = target; VisitPointer(&target); - rinfo->set_target_object(target); + // Avoid unnecessary changes that might unnecessary flush the instruction + // cache. + if (target != old_target) { + rinfo->set_target_object(target); + } } void VisitCodeTarget(RelocInfo* rinfo) { ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + Object* old_target = target; VisitPointer(&target); - rinfo->set_target_address(Code::cast(target)->instruction_start()); + if (target != old_target) { + rinfo->set_target_address(Code::cast(target)->instruction_start()); + } + } + + void VisitCodeAgeSequence(RelocInfo* rinfo) { + ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Object* stub = rinfo->code_age_stub(); + ASSERT(stub != NULL); + VisitPointer(&stub); + if (stub != rinfo->code_age_stub()) { + rinfo->set_code_age_stub(Code::cast(stub)); + } } void VisitDebugTarget(RelocInfo* rinfo) { @@ -3426,7 +3084,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { heap_->isolate()->inner_pointer_to_code_cache()->Flush(); -#ifdef DEBUG +#ifdef VERIFY_HEAP if (FLAG_verify_heap) { VerifyEvacuation(heap_); } @@ -3834,7 +3492,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { intptr_t freed_bytes = 0; int pages_swept = 0; - intptr_t newspace_size = space->heap()->new_space()->Size(); bool lazy_sweeping_active = false; bool unused_page_present = false; @@ -3897,15 +3554,8 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { } freed_bytes += SweepConservatively(space, p); pages_swept++; - if (freed_bytes > 2 * newspace_size) { - space->SetPagesToSweep(p->next_page()); - lazy_sweeping_active = true; - } else { - if (FLAG_gc_verbose) { - PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n", - freed_bytes); - } - } + space->SetPagesToSweep(p->next_page()); + lazy_sweeping_active = true; break; } case PRECISE: { @@ -3973,11 +3623,19 @@ void MarkCompactCollector::SweepSpaces() { void MarkCompactCollector::EnableCodeFlushing(bool enable) { +#ifdef ENABLE_DEBUGGER_SUPPORT + if (heap()->isolate()->debug()->IsLoaded() || + heap()->isolate()->debug()->has_break_points()) { + enable = false; + } +#endif + if (enable) { if (code_flusher_ != NULL) return; code_flusher_ = new CodeFlusher(heap()->isolate()); } else { if (code_flusher_ == NULL) return; + code_flusher_->EvictAllCandidates(); delete code_flusher_; code_flusher_ = NULL; } |