aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/runtime-profiler.cc
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-04-17 16:10:37 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2013-04-17 16:10:37 +0200
commit9f682265d6631a29457abeb53827d01fa77493c8 (patch)
tree92a1eec49b1f280931598a72dcf0cca3d795f210 /deps/v8/src/runtime-profiler.cc
parent951e0b69fa3c8b1a5d708e29de9d6f7d1db79827 (diff)
downloadandroid-node-v8-9f682265d6631a29457abeb53827d01fa77493c8.tar.gz
android-node-v8-9f682265d6631a29457abeb53827d01fa77493c8.tar.bz2
android-node-v8-9f682265d6631a29457abeb53827d01fa77493c8.zip
deps: upgrade v8 to 3.18.0
Diffstat (limited to 'deps/v8/src/runtime-profiler.cc')
-rw-r--r--deps/v8/src/runtime-profiler.cc87
1 files changed, 11 insertions, 76 deletions
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 2606f8ab37..752d79c982 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -86,17 +86,6 @@ static const int kMaxSizeEarlyOpt =
5 * FullCodeGenerator::kBackEdgeDistanceUnit;
-Atomic32 RuntimeProfiler::state_ = 0;
-
-// TODO(isolates): Clean up the semaphore when it is no longer required.
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
-
-#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_set_up_ = false;
-#endif
-bool RuntimeProfiler::enabled_ = false;
-
-
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate),
sampler_threshold_(kSamplerThresholdInit),
@@ -110,15 +99,6 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
}
-void RuntimeProfiler::GlobalSetUp() {
- ASSERT(!has_been_globally_set_up_);
- enabled_ = V8::UseCrankshaft() && FLAG_opt;
-#ifdef DEBUG
- has_been_globally_set_up_ = true;
-#endif
-}
-
-
static void GetICCounts(JSFunction* function,
int* ic_with_type_info_count,
int* ic_total_count,
@@ -190,23 +170,22 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// any back edge in any unoptimized frame will trigger on-stack
// replacement for that frame.
if (FLAG_trace_osr) {
- PrintF("[patching stack checks in ");
+ PrintF("[patching back edges in ");
function->PrintName();
PrintF(" for on-stack replacement]\n");
}
- // Get the stack check stub code object to match against. We aren't
+ // Get the interrupt stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
- Code* stack_check_code = NULL;
+ Code* interrupt_code = NULL;
InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_);
+ bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
- Deoptimizer::PatchStackCheckCode(unoptimized_code,
- stack_check_code,
- replacement_code);
+ Deoptimizer::PatchInterruptCode(
+ unoptimized_code, interrupt_code, replacement_code);
}
}
@@ -296,9 +275,11 @@ void RuntimeProfiler::OptimizeNow() {
function->IsMarkedForParallelRecompilation() ||
function->IsOptimized())) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting == 0) AttemptOnStackReplacement(function);
- int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ if (nesting < Code::kMaxLoopNestingMarker) {
+ int new_nesting = nesting + 1;
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ AttemptOnStackReplacement(function);
+ }
}
// Only record top-level code on top of the execution stack and
@@ -385,13 +366,9 @@ void RuntimeProfiler::OptimizeNow() {
void RuntimeProfiler::SetUp() {
- ASSERT(has_been_globally_set_up_);
if (!FLAG_watch_ic_patching) {
ClearSampleBuffer();
}
- // If the ticker hasn't already started, make sure to do so to get
- // the ticks for the runtime profiler.
- if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
}
@@ -431,48 +408,6 @@ void RuntimeProfiler::UpdateSamplesAfterScavenge() {
}
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
- // The profiler thread must still be waiting.
- ASSERT(NoBarrier_Load(&state_) >= 0);
- // In IsolateEnteredJS we have already incremented the counter and
- // undid the decrement done by the profiler thread. Increment again
- // to get the right count of active isolates.
- NoBarrier_AtomicIncrement(&state_, 1);
- semaphore.Pointer()->Signal();
-}
-
-
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
- Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
- ASSERT(old_state >= -1);
- if (old_state != 0) return false;
- semaphore.Pointer()->Wait();
- return true;
-}
-
-
-void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
- // Do a fake increment. If the profiler is waiting on the semaphore,
- // the returned state is 0, which can be left as an initial state in
- // case profiling is restarted later. If the profiler is not
- // waiting, the increment will prevent it from waiting, but has to
- // be undone after the profiler is stopped.
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- ASSERT(new_state >= 0);
- if (new_state == 0) {
- // The profiler thread is waiting. Wake it up. It must check for
- // stop conditions before attempting to wait again.
- semaphore.Pointer()->Signal();
- }
- thread->Join();
- // The profiler thread is now stopped. Undo the increment in case it
- // was not waiting.
- if (new_state != 0) {
- NoBarrier_AtomicIncrement(&state_, -1);
- }
-}
-
-
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window_[i];