aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/runtime-profiler.cc
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-07-02 17:11:31 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2013-07-06 16:53:06 +0200
commit704fd8f3745527fc080f96e54e5ec1857c505399 (patch)
treebff68e8a731f3618d3e8f1708aa9de194bc1f612 /deps/v8/src/runtime-profiler.cc
parenteec43351c44c0bec31a83e1a28be15e30722936a (diff)
downloadandroid-node-v8-704fd8f3745527fc080f96e54e5ec1857c505399.tar.gz
android-node-v8-704fd8f3745527fc080f96e54e5ec1857c505399.tar.bz2
android-node-v8-704fd8f3745527fc080f96e54e5ec1857c505399.zip
v8: upgrade to v3.20.2
Diffstat (limited to 'deps/v8/src/runtime-profiler.cc')
-rw-r--r--deps/v8/src/runtime-profiler.cc55
1 files changed, 38 insertions, 17 deletions
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index c4b79b11b5..bd02a69042 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -80,11 +80,17 @@ STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
+// Maximum size in bytes of generate code for a function to allow OSR.
+static const int kOSRCodeSizeAllowanceBase =
+ 100 * FullCodeGenerator::kCodeSizeMultiplier;
+
+static const int kOSRCodeSizeAllowancePerTick =
+ 3 * FullCodeGenerator::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt =
- 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
+ 5 * FullCodeGenerator::kCodeSizeMultiplier;
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
@@ -100,14 +106,13 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
}
-static void GetICCounts(JSFunction* function,
+static void GetICCounts(Code* shared_code,
int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
*ic_with_type_info_count = 0;
- Object* raw_info =
- function->shared()->code()->type_feedback_info();
+ Object* raw_info = shared_code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
*ic_with_type_info_count = info->ic_with_type_info_count();
@@ -128,7 +133,7 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF(" for recompilation, reason: %s", reason);
if (FLAG_type_info_threshold > 0) {
int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
+ GetICCounts(function->shared()->code(), &typeinfo, &total, &percentage);
PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
}
PrintF("]\n");
@@ -148,9 +153,6 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation() ||
- function->IsOptimized());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
@@ -225,6 +227,8 @@ void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
+ if (isolate_->DebuggerHasBreakPoints()) return;
+
if (FLAG_parallel_recompilation) {
// Take this as opportunity to process the optimizing compiler thread's
// output queue so that it does not unnecessarily keep objects alive.
@@ -268,18 +272,35 @@ void RuntimeProfiler::OptimizeNow() {
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsInRecompileQueue()) continue;
- // Attempt OSR if we are still running unoptimized code even though the
- // the function has long been marked or even already been optimized.
- if (!frame->is_optimized() &&
+ if (FLAG_always_osr &&
+ shared_code->allow_osr_at_loop_nesting_level() == 0) {
+ // Testing mode: always try an OSR compile for every function.
+ for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
+ // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
+ shared_code->set_allow_osr_at_loop_nesting_level(i);
+ AttemptOnStackReplacement(function);
+ }
+ // Fall through and do a normal optimized compile as well.
+ } else if (!frame->is_optimized() &&
(function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForParallelRecompilation() ||
function->IsOptimized())) {
- int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting < Code::kMaxLoopNestingMarker) {
- int new_nesting = nesting + 1;
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
- AttemptOnStackReplacement(function);
+ // Attempt OSR if we are still running unoptimized code even though the
+ // the function has long been marked or even already been optimized.
+ int ticks = shared_code->profiler_ticks();
+ int allowance = kOSRCodeSizeAllowanceBase +
+ ticks * kOSRCodeSizeAllowancePerTick;
+ if (shared_code->CodeSize() > allowance) {
+ if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
+ } else {
+ int nesting = shared_code->allow_osr_at_loop_nesting_level();
+ if (nesting < Code::kMaxLoopNestingMarker) {
+ int new_nesting = nesting + 1;
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ AttemptOnStackReplacement(function);
+ }
}
+ continue;
}
// Only record top-level code on top of the execution stack and
@@ -313,7 +334,7 @@ void RuntimeProfiler::OptimizeNow() {
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
- GetICCounts(function, &typeinfo, &total, &percentage);
+ GetICCounts(shared_code, &typeinfo, &total, &percentage);
if (percentage >= FLAG_type_info_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.