summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/heap/test-heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/heap/test-heap.cc')
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc380
1 files changed, 240 insertions, 140 deletions
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 6446adba6a..fc2b68e809 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -529,6 +529,129 @@ TEST(WeakGlobalHandlesScavenge) {
GlobalHandles::Destroy(h2.location());
}
+TEST(WeakGlobalUnmodifiedApiHandlesScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+ Factory* factory = isolate->factory();
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ WeakPointerCleared = false;
+
+ Handle<Object> h1;
+ Handle<Object> h2;
+
+ {
+ HandleScope scope(isolate);
+
+ // Create an Api object that is unmodified.
+ auto function = FunctionTemplate::New(context->GetIsolate())
+ ->GetFunction(context.local())
+ .ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+ Handle<Object> u = factory->NewNumber(1.12344);
+
+ h1 = global_handles->Create(*u);
+ h2 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ }
+
+ std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
+ GlobalHandles::MakeWeak(
+ h2.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ CHECK((*h1)->IsHeapNumber());
+ CHECK(WeakPointerCleared);
+ CHECK(!global_handles->IsNearDeath(h1.location()));
+
+ GlobalHandles::Destroy(h1.location());
+}
+
+TEST(WeakGlobalApiHandleModifiedMapScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ WeakPointerCleared = false;
+
+ Handle<Object> h1;
+
+ {
+ HandleScope scope(isolate);
+
+ // Create an API object which does not have the same map as constructor.
+ auto function_template = FunctionTemplate::New(context->GetIsolate());
+ auto instance_t = function_template->InstanceTemplate();
+ instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "a",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(context->GetIsolate(), 10));
+ auto function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+
+ h1 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ }
+
+ std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
+ GlobalHandles::MakeWeak(
+ h1.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ CHECK(!WeakPointerCleared);
+ CHECK(!global_handles->IsNearDeath(h1.location()));
+
+ GlobalHandles::Destroy(h1.location());
+}
+
+TEST(WeakGlobalApiHandleWithElementsScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ WeakPointerCleared = false;
+
+ Handle<Object> h1;
+
+ {
+ HandleScope scope(isolate);
+
+ // Create an API object which has elements.
+ auto function_template = FunctionTemplate::New(context->GetIsolate());
+ auto instance_t = function_template->InstanceTemplate();
+ instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "1",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(context->GetIsolate(), 10));
+ instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "2",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(context->GetIsolate(), 10));
+ auto function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+
+ h1 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ }
+
+ std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
+ GlobalHandles::MakeWeak(
+ h1.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ CHECK(!WeakPointerCleared);
+ CHECK(!global_handles->IsNearDeath(h1.location()));
+
+ GlobalHandles::Destroy(h1.location());
+}
TEST(WeakGlobalHandlesMark) {
CcTest::InitializeVM();
@@ -565,7 +688,7 @@ TEST(WeakGlobalHandlesMark) {
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
// Incremental marking potentially marked handles before they turned weak.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK((*h1)->IsString());
@@ -653,7 +776,7 @@ TEST(BytecodeArray) {
// evacuation candidate.
Page* evac_page = Page::FromAddress(constant_pool->address());
heap::ForceEvacuationCandidate(evac_page);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// BytecodeArray should survive.
CHECK_EQ(array->length(), kRawBytesSize);
@@ -1184,12 +1307,12 @@ TEST(TestCodeFlushingPreAged) {
CHECK(function->shared()->is_compiled());
// The code has been run so will survive at least one GC.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// The code was only run once, so it should be pre-aged and collected on the
// next GC.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
function->IsInterpreted());
@@ -1200,14 +1323,14 @@ TEST(TestCodeFlushingPreAged) {
}
// The code will survive at least two GC now that it is young again.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// foo should no longer be in the compilation cache
@@ -1253,15 +1376,15 @@ TEST(TestCodeFlushingIncremental) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
function->IsInterpreted());
@@ -1279,7 +1402,7 @@ TEST(TestCodeFlushingIncremental) {
heap::SimulateIncrementalMarking(CcTest::heap());
if (!function->next_function_link()->IsUndefined(CcTest::i_isolate()))
break;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// Force optimization while incremental marking is active and while
@@ -1289,7 +1412,7 @@ TEST(TestCodeFlushingIncremental) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1319,7 +1442,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
Handle<String> bar_name = factory->InternalizeUtf8String("bar");
// Perfrom one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1359,7 +1482,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
CcTest::CollectGarbage(NEW_SPACE);
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
function->IsInterpreted());
CHECK(!function->is_compiled() || function->IsOptimized() ||
@@ -1399,8 +1522,8 @@ TEST(TestCodeFlushingIncrementalAbort) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Bump the code age so that flushing is triggered.
@@ -1429,7 +1552,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1515,7 +1638,7 @@ TEST(CompilationCacheCachingBehavior) {
// (Unless --optimize-for-size, in which case it might get collected
// immediately.)
if (!FLAG_optimize_for_size) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
pair = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
native_context, language_mode);
@@ -1531,7 +1654,7 @@ TEST(CompilationCacheCachingBehavior) {
}
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Ensure code aging cleared the entry from the cache.
pair = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
@@ -1595,7 +1718,7 @@ TEST(TestInternalWeakLists) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
CHECK_EQ(0, CountNativeContexts());
@@ -1606,7 +1729,7 @@ TEST(TestInternalWeakLists) {
// Collect garbage that might have been created by one of the
// installed extensions.
isolate->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(i + 1, CountNativeContexts());
@@ -1638,7 +1761,7 @@ TEST(TestInternalWeakLists) {
// Mark compact handles the weak references.
isolate->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
@@ -1647,14 +1770,14 @@ TEST(TestInternalWeakLists) {
CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
@@ -1662,7 +1785,7 @@ TEST(TestInternalWeakLists) {
// Force compilation cache cleanup.
CcTest::heap()->NotifyContextDisposed(true);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
@@ -1678,7 +1801,7 @@ TEST(TestInternalWeakLists) {
}
// Mark compact handles the weak references.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts());
}
@@ -1694,8 +1817,7 @@ static int CountNativeContextsWithGC(Isolate* isolate, int n) {
Handle<Object> object(heap->native_contexts_list(), isolate);
while (!object->IsUndefined(isolate)) {
count++;
- if (count == n)
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ if (count == n) CcTest::CollectAllGarbage();
object =
Handle<Object>(Context::cast(*object)->next_context_link(), isolate);
}
@@ -1738,7 +1860,7 @@ TEST(TestInternalWeakListsTraverseWithGC) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
CHECK_EQ(0, CountNativeContexts());
@@ -1810,11 +1932,11 @@ TEST(TestSizeOfRegExpCode) {
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
CompileRun("'foo'.match(reg_exp_source);");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int size_with_regexp = static_cast<int>(CcTest::heap()->SizeOfObjects());
CompileRun("'foo'.match(half_size_reg_exp);");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int size_with_optimized_regexp =
static_cast<int>(CcTest::heap()->SizeOfObjects());
@@ -1861,7 +1983,7 @@ HEAP_TEST(TestSizeOfObjects) {
// The heap size should go back to initial size after a full GC, even
// though sweeping didn't finish yet.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Normally sweeping would not be complete here, but no guarantees.
CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
@@ -2107,8 +2229,7 @@ TEST(GrowAndShrinkNewSpace) {
}
// Make sure we're in a consistent state to start out.
- CcTest::CollectGarbage(NEW_SPACE);
- CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectAllGarbage();
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
@@ -2387,7 +2508,7 @@ TEST(InstanceOfStubWriteBarrier) {
#endif
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
if (i::FLAG_force_marking_deque_overflows) return;
v8::HandleScope outer_scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2456,7 +2577,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
#endif
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
v8::HandleScope outer_scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2485,7 +2606,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
// The following calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2501,7 +2622,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
#endif
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
v8::HandleScope outer_scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2527,7 +2648,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
// The following two calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2619,7 +2740,7 @@ TEST(IdleNotificationFinishMarking) {
TEST(OptimizedAllocationAlwaysInNewSpace) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2654,7 +2775,7 @@ TEST(OptimizedPretenuringAllocationFolding) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2705,7 +2826,7 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2745,7 +2866,7 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2803,7 +2924,7 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2843,7 +2964,7 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2883,7 +3004,7 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2933,7 +3054,7 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2984,7 +3105,7 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -3035,7 +3156,7 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -3100,7 +3221,7 @@ TEST(Regress1465) {
CHECK_EQ(transitions_count, transitions_before);
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3284,7 +3405,7 @@ TEST(Regress2143a) {
CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
@@ -3327,7 +3448,7 @@ TEST(Regress2143b) {
CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
@@ -3344,7 +3465,7 @@ TEST(ReleaseOverReservedPages) {
if (FLAG_never_compact) return;
i::FLAG_trace_gc = true;
// The optimizer can allocate stuff, messing up the test.
- i::FLAG_crankshaft = false;
+ i::FLAG_opt = false;
i::FLAG_always_opt = false;
// Parallel compaction increases fragmentation, depending on how existing
// memory is distributed. Since this is non-deterministic because of
@@ -3378,14 +3499,14 @@ TEST(ReleaseOverReservedPages) {
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_GE(overall_page_count, old_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_GE(overall_page_count, old_space->CountTotalPages());
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
@@ -3481,7 +3602,7 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeakCell());
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(!WeakCell::cast(feedback_vector->Get(feedback_helper.slot(slot1)))
->cleared());
@@ -3538,7 +3659,7 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
CHECK(vector->Get(FeedbackSlot(0))->IsWeakCell());
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(vector->Get(FeedbackSlot(0))->IsWeakCell());
}
@@ -3560,7 +3681,7 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
CheckVectorIC(f, 0, MONOMORPHIC);
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckVectorIC(f, 0, MONOMORPHIC);
}
@@ -3598,7 +3719,7 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
// Fire context dispose notification.
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -3637,7 +3758,7 @@ TEST(ContextDisposeDoesntClearPolymorphicIC) {
// Fire context dispose notification.
CcTest::isolate()->ContextDisposedNotification();
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -3769,7 +3890,7 @@ TEST(Regress159140) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare several closures that are all eligible for code flushing
// because all reachable ones are not optimized. Make sure that the
@@ -3813,7 +3934,7 @@ TEST(Regress159140) {
// finish the GC to complete code flushing.
heap::SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Unoptimized code is missing and the deoptimizer will go ballistic.
CompileRun("g('bozo');");
@@ -3829,7 +3950,7 @@ TEST(Regress165495) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare an optimized closure that the optimized code map will get
// populated. Then age the unoptimized code to trigger code flushing
@@ -3859,7 +3980,7 @@ TEST(Regress165495) {
// Simulate incremental marking so that unoptimized code is flushed
// even though it still is cached in the optimized code map.
heap::SimulateIncrementalMarking(heap);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
@@ -3881,7 +4002,7 @@ TEST(Regress169209) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare a shared function info eligible for code flushing for which
// the unoptimized code will be replaced during optimization.
@@ -3939,14 +4060,14 @@ TEST(Regress169209) {
"g(false);");
// Finish garbage collection cycle.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(shared1->code()->gc_metadata() == NULL);
}
TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_crankshaft = false;
+ i::FLAG_opt = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
LocalContext env;
@@ -4068,8 +4189,8 @@ TEST(Regress513507) {
heap->set_allocation_timeout(5);
FLAG_gc_interval = 1000;
for (int i = 0; i < 10; ++i) {
- BailoutId id = BailoutId(i);
- SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, id);
+ BailoutId id = BailoutId(i + 1);
+ Context::AddToOSROptimizedCodeCache(context, shared, code, id);
}
}
#endif // DEBUG
@@ -4081,12 +4202,12 @@ TEST(Regress513496) {
HandleScope scope(isolate);
// Perfrom one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare an optimized closure with containing an inlined function. Then age
// the inlined unoptimized code to trigger code flushing but make sure the
// outer optimized code is kept in the optimized code map.
- Handle<SharedFunctionInfo> shared;
+ Handle<SharedFunctionInfo> optimized_code;
{
LocalContext context;
HandleScope inner_scope(isolate);
@@ -4114,18 +4235,19 @@ TEST(Regress513496) {
->Get(context.local(), v8_str("f"))
.ToLocalChecked())));
CHECK(f->is_compiled());
- shared = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
+
+ // Lookup the optimized code and keep it alive.
+ Code* result = f->feedback_vector()->optimized_code();
+ Handle<Code> optimized_code(result, isolate);
+ optimized_code = inner_scope.CloseAndEscape(handle(result, isolate));
+
CompileRun("f = null");
}
- // Lookup the optimized code and keep it alive.
- Code* result = shared->SearchOptimizedCodeMap(
- isolate->context()->native_context(), BailoutId::None());
- Handle<Code> optimized_code(result, isolate);
// Finish a full GC cycle so that the unoptimized code of 'g' is flushed even
// though the optimized code for 'f' is reachable via the optimized code map.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
@@ -4169,7 +4291,7 @@ TEST(LargeObjectSlotRecording) {
}
// Move the evaucation candidate object.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Verify that the pointers in the large object got updated.
for (int i = 0; i < size; i += kStep) {
@@ -4178,10 +4300,9 @@ TEST(LargeObjectSlotRecording) {
}
}
-
-class DummyVisitor : public ObjectVisitor {
+class DummyVisitor : public RootVisitor {
public:
- void VisitPointers(Object** start, Object** end) override {}
+ void VisitRootPointers(Root root, Object** start, Object** end) override {}
};
@@ -4269,14 +4390,14 @@ static int AllocationSitesCount(Heap* heap) {
TEST(EnsureAllocationSiteDependentCodesProcessed) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
GlobalHandles* global_handles = isolate->global_handles();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
// The allocation site at the head of the list is ours.
Handle<AllocationSite> site;
@@ -4329,7 +4450,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// Now make sure that a gc should get rid of the function, even though we
// still have the allocation site alive.
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// The site still exists because of our global handle, but the code is no
@@ -4340,13 +4461,13 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
TEST(CellsInOptimizedCodeAreWeak) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
{
@@ -4376,7 +4497,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(code->marked_for_deoptimization());
@@ -4384,13 +4505,13 @@ TEST(CellsInOptimizedCodeAreWeak) {
TEST(ObjectsInOptimizedCodeAreWeak) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
{
@@ -4418,20 +4539,20 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(code->marked_for_deoptimization());
}
TEST(NewSpaceObjectsInOptimizedCode) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft || i::FLAG_turbo) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt || i::FLAG_turbo) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
{
@@ -4479,14 +4600,14 @@ TEST(NewSpaceObjectsInOptimizedCode) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(code->marked_for_deoptimization());
}
TEST(NoWeakHashTableLeakWithIncrementalMarking) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
if (!i::FLAG_incremental_marking) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_compilation_cache = false;
@@ -4501,9 +4622,9 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
// Get a clean slate regarding optimized functions on the heap.
i::Deoptimizer::DeoptimizeAll(isolate);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
for (int i = 0; i < 3; i++) {
heap::SimulateIncrementalMarking(heap);
@@ -4571,7 +4692,7 @@ TEST(NextCodeLinkIsWeak) {
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
CcTest::CollectAllAvailableGarbage();
@@ -4617,7 +4738,7 @@ TEST(NextCodeLinkIsWeak2) {
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
CcTest::CollectAllAvailableGarbage();
Handle<Context> context(Context::cast(heap->native_contexts_list()), isolate);
@@ -4682,7 +4803,7 @@ TEST(WeakFunctionInConstructor) {
}
weak_ic_cleared = false;
garbage.SetWeak(&garbage, &ClearWeakIC, v8::WeakCallbackType::kParameter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(weak_ic_cleared);
// We've determined the constructor in createObj has had it's weak cell
@@ -4694,7 +4815,7 @@ TEST(WeakFunctionInConstructor) {
Object* slot_value = feedback_vector->Get(FeedbackSlot(0));
CHECK(slot_value->IsWeakCell());
if (WeakCell::cast(slot_value)->cleared()) break;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
Object* slot_value = feedback_vector->Get(FeedbackSlot(0));
@@ -4724,7 +4845,7 @@ void CheckWeakness(const char* source) {
}
weak_ic_cleared = false;
garbage.SetWeak(&garbage, &ClearWeakIC, v8::WeakCallbackType::kParameter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(weak_ic_cleared);
}
@@ -4931,7 +5052,7 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
@@ -4966,7 +5087,7 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
@@ -5040,8 +5161,8 @@ TEST(WeakCellsWithIncrementalMarking) {
}
// Call collect all twice to make sure that we also cleared
// weak cells that were allocated on black pages.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK_EQ(*survivor, weak_cells[0]->value());
for (int i = 1; i < N; i++) {
CHECK(weak_cells[i]->cleared());
@@ -5088,7 +5209,7 @@ TEST(AddInstructionChangesNewSpacePromotion) {
heap->DisableInlineAllocation();
heap->set_allocation_timeout(1);
g->Call(env.local(), global, 1, args1).ToLocalChecked();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -5421,14 +5542,14 @@ TEST(Regress3877) {
"a.x = new cls();"
"cls.prototype = null;");
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// The map of a.x keeps prototype alive
CHECK(!weak_prototype->cleared());
// Change the map of a.x and make the previous map garbage collectable.
CompileRun("a.x.__proto__ = {};");
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(weak_prototype->cleared());
}
@@ -5873,7 +5994,7 @@ TEST(ScriptIterator) {
Heap* heap = CcTest::heap();
LocalContext context;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int script_count = 0;
{
@@ -5899,8 +6020,8 @@ TEST(SharedFunctionInfoIterator) {
Heap* heap = CcTest::heap();
LocalContext context;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
int sfi_count = 0;
{
@@ -6157,7 +6278,7 @@ TEST(Regress615489) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6185,7 +6306,7 @@ TEST(Regress615489) {
}
CHECK(marking->IsComplete());
intptr_t size_before = heap->SizeOfObjects();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
intptr_t size_after = heap->SizeOfObjects();
// Live size does not increase after garbage collection.
CHECK_LE(size_after, size_before);
@@ -6258,7 +6379,7 @@ TEST(LeftTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6297,7 +6418,7 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6363,7 +6484,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6451,7 +6572,7 @@ TEST(UncommitUnusedLargeObjectMemory) {
array->Shrink(1);
CHECK(array->Size() < size_before);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
size_t shrinked_size =
RoundUp((array->address() - chunk->address()) + array->Size(),
@@ -6532,7 +6653,7 @@ HEAP_TEST(Regress670675) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
@@ -6609,26 +6730,5 @@ HEAP_TEST(Regress5831) {
CHECK(chunk->NeverEvacuate());
}
-UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
- // Enable rehashing and create an isolate and context.
- i::FLAG_rehash_snapshot = true;
- for (int i = 1; i < 3; i++) {
- i::FLAG_hash_seed = 1337 * i;
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(isolate);
- CHECK_EQ(1337 * i,
- reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- CHECK(!context.IsEmpty());
- v8::Context::Scope context_scope(context);
- }
- isolate->Dispose();
- }
-}
-
} // namespace internal
} // namespace v8