aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/runtime-profiler.cc
diff options
context:
space:
mode:
authorisaacs <i@izs.me>2012-05-15 19:53:16 -0700
committerisaacs <i@izs.me>2012-05-16 14:22:33 -0700
commit3f3f958c14cf4e963a73d6f037ac381c77fe78bb (patch)
tree391e35b59e76d038534fbd375f1bbe0dc55076cf /deps/v8/src/runtime-profiler.cc
parent4099d1eebae4e78864a6879c0b9e08f31d48d8cb (diff)
downloadandroid-node-v8-3f3f958c14cf4e963a73d6f037ac381c77fe78bb.tar.gz
android-node-v8-3f3f958c14cf4e963a73d6f037ac381c77fe78bb.tar.bz2
android-node-v8-3f3f958c14cf4e963a73d6f037ac381c77fe78bb.zip
Upgrade V8 to 3.11.1
Diffstat (limited to 'deps/v8/src/runtime-profiler.cc')
-rw-r--r--deps/v8/src/runtime-profiler.cc64
1 files changed, 26 insertions, 38 deletions
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 6ed4ff483a..568e48e412 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -65,6 +65,12 @@ static const int kSizeLimit = 1500;
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
+// If a function does not have enough type info (according to
+// FLAG_type_info_threshold), but has seen a huge number of ticks,
+// optimize it as it is.
+static const int kTicksWhenNotEnoughTypeInfo = 100;
+// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
@@ -88,12 +94,14 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
sampler_ticks_until_threshold_adjustment_(
kSamplerTicksBetweenThresholdAdjustment),
- sampler_window_position_(0) {
+ sampler_window_position_(0),
+ any_ic_changed_(false),
+ code_generated_(false) {
ClearSampleBuffer();
}
-void RuntimeProfiler::GlobalSetup() {
+void RuntimeProfiler::GlobalSetUp() {
ASSERT(!has_been_globally_set_up_);
enabled_ = V8::UseCrankshaft() && FLAG_opt;
#ifdef DEBUG
@@ -103,20 +111,20 @@ void RuntimeProfiler::GlobalSetup() {
static void GetICCounts(JSFunction* function,
- int* ic_with_typeinfo_count,
+ int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
- *ic_with_typeinfo_count = 0;
+ *ic_with_type_info_count = 0;
Object* raw_info =
function->shared()->code()->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
- *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+ *ic_with_type_info_count = info->ic_with_type_info_count();
*ic_total_count = info->ic_total_count();
}
*percentage = *ic_total_count > 0
- ? 100 * *ic_with_typeinfo_count / *ic_total_count
+ ? 100 * *ic_with_type_info_count / *ic_total_count
: 100;
}
@@ -173,14 +181,10 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// prepared to generate it, but we don't expect to have to.
bool found_code = false;
Code* stack_check_code = NULL;
-#if defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_ARM) || \
- defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
} else // NOLINT
-#endif
{ // NOLINT
StackCheckStub check_stub;
found_code = check_stub.FindCodeInCache(&stack_check_code);
@@ -259,13 +263,14 @@ void RuntimeProfiler::OptimizeNow() {
}
}
- if (function->IsMarkedForLazyRecompilation() &&
- function->shared()->code()->kind() == Code::FUNCTION) {
- Code* unoptimized = function->shared()->code();
- int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+ Code* shared_code = function->shared()->code();
+ if (shared_code->kind() != Code::FUNCTION) continue;
+
+ if (function->IsMarkedForLazyRecompilation()) {
+ int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
// Do not record non-optimizable functions.
@@ -283,7 +288,7 @@ void RuntimeProfiler::OptimizeNow() {
}
if (FLAG_watch_ic_patching) {
- int ticks = function->shared()->profiler_ticks();
+ int ticks = shared_code->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
@@ -292,12 +297,10 @@ void RuntimeProfiler::OptimizeNow() {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
- } else if (ticks >= 100) {
- // If this function does not have enough type info, but has
- // seen a huge number of ticks, optimize it as it is.
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
} else {
- function->shared()->set_profiler_ticks(ticks + 1);
+ shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
@@ -306,20 +309,12 @@ void RuntimeProfiler::OptimizeNow() {
}
}
} else if (!any_ic_changed_ &&
- function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
- } else if (!code_generated_ &&
- !any_ic_changed_ &&
- total_code_generated_ > 0 &&
- total_code_generated_ < 2000) {
- // If no code was generated and no IC was patched since the last tick,
- // but a little code has already been generated since last Reset(),
- // then type info might already be stable and we can optimize now.
- Optimize(function, "stable on startup");
} else {
- function->shared()->set_profiler_ticks(ticks + 1);
+ shared_code->set_profiler_ticks(ticks + 1);
}
} else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
@@ -338,7 +333,6 @@ void RuntimeProfiler::OptimizeNow() {
}
if (FLAG_watch_ic_patching) {
any_ic_changed_ = false;
- code_generated_ = false;
} else { // !FLAG_watch_ic_patching
// Add the collected functions as samples. It's important not to do
// this as part of collecting them because this will interfere with
@@ -351,11 +345,7 @@ void RuntimeProfiler::OptimizeNow() {
void RuntimeProfiler::NotifyTick() {
-#if defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_ARM) || \
- defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) return;
-#endif
isolate_->stack_guard()->RequestRuntimeProfilerTick();
}
@@ -372,9 +362,7 @@ void RuntimeProfiler::SetUp() {
void RuntimeProfiler::Reset() {
- if (FLAG_watch_ic_patching) {
- total_code_generated_ = 0;
- } else { // !FLAG_watch_ic_patching
+ if (!FLAG_watch_ic_patching) {
sampler_threshold_ = kSamplerThresholdInit;
sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
sampler_ticks_until_threshold_adjustment_ =