summaryrefslogtreecommitdiff
path: root/deps/v8/src/profiler
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-09-06 22:49:51 +0200
committerMichaël Zasso <targos@protonmail.com>2016-09-22 09:51:19 +0200
commitec02b811a8a5c999bab4de312be2d732b7d9d50b (patch)
treeca3068017254f238cf413a451c57a803572983a4 /deps/v8/src/profiler
parentd2eb7ce0105369a9cad82787cb33a665e9bd00ad (diff)
downloadandroid-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.gz
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.bz2
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.zip
deps: update V8 to 5.4.500.27
Pick up latest commit from the 5.4-lkgr branch. deps: edit V8 gitignore to allow trace event copy deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8 deps: edit V8 gitignore to allow gtest_prod.h copy deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87 PR-URL: https://github.com/nodejs/node/pull/8317 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/profiler')
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc25
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h7
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h13
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc369
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h88
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc24
-rw-r--r--deps/v8/src/profiler/heap-profiler.h22
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc338
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h29
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h4
-rw-r--r--deps/v8/src/profiler/profile-generator.cc283
-rw-r--r--deps/v8/src/profiler/profile-generator.h160
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc335
-rw-r--r--deps/v8/src/profiler/profiler-listener.h97
-rw-r--r--deps/v8/src/profiler/sampler.cc898
-rw-r--r--deps/v8/src/profiler/sampler.h139
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc109
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h50
-rw-r--r--deps/v8/src/profiler/strings-storage.cc22
-rw-r--r--deps/v8/src/profiler/strings-storage.h12
-rw-r--r--deps/v8/src/profiler/tick-sample.cc272
-rw-r--r--deps/v8/src/profiler/tick-sample.h27
22 files changed, 1364 insertions, 1959 deletions
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 791cdf03f0..d094d0ecc6 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -149,11 +149,10 @@ void AddressToTraceMap::Clear() {
void AddressToTraceMap::Print() {
- PrintF("[AddressToTraceMap (%" V8_SIZET_PREFIX V8PRIuPTR "): \n",
- ranges_.size());
+ PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
- PrintF("[%p - %p] => %u\n", it->second.start, it->first,
- it->second.trace_node_id);
+ PrintF("[%p - %p] => %u\n", static_cast<void*>(it->second.start),
+ static_cast<void*>(it->first), it->second.trace_node_id);
}
PrintF("]\n");
}
@@ -191,12 +190,10 @@ void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
delete *info;
}
-
-AllocationTracker::AllocationTracker(
- HeapObjectsMap* ids, StringsStorage* names)
+AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
- id_to_function_info_index_(HashMap::PointersMatch),
+ id_to_function_info_index_(base::HashMap::PointersMatch),
info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(root)";
@@ -231,7 +228,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
Isolate* isolate = heap->isolate();
int length = 0;
- StackTraceFrameIterator it(isolate);
+ JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
@@ -262,7 +259,7 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
+ base::HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
@@ -307,9 +304,8 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- &HandleWeakScript);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()), this,
+ &HandleWeakScript, v8::WeakCallbackType::kParameter);
}
@@ -327,9 +323,8 @@ void AllocationTracker::UnresolvedLocation::Resolve() {
info_->column = Script::GetColumnNumber(script_, start_position_);
}
-
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+ const v8::WeakCallbackInfo<void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index 03802a5c66..45bd446714 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -8,8 +8,8 @@
#include <map>
#include "include/v8-profiler.h"
+#include "src/base/hashmap.h"
#include "src/handles.h"
-#include "src/hashmap.h"
#include "src/list.h"
#include "src/vector.h"
@@ -129,8 +129,7 @@ class AllocationTracker {
void Resolve();
private:
- static void HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void HandleWeakScript(const v8::WeakCallbackInfo<void>& data);
Handle<Script> script_;
int start_position_;
@@ -144,7 +143,7 @@ class AllocationTracker {
AllocationTraceTree trace_tree_;
unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
List<FunctionInfo*> function_info_list_;
- HashMap id_to_function_info_index_;
+ base::HashMap id_to_function_info_index_;
List<UnresolvedLocation*> unresolved_locations_;
unsigned info_index_for_other_state_;
AddressToTraceMap address_to_trace_;
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 45e4ccf136..504c3f6e1a 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -35,7 +35,7 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) entry->set_deopt_info(deopt_reason, position, pc_offset);
+ if (entry != NULL) entry->set_deopt_info(deopt_reason, position, deopt_id);
}
@@ -50,17 +50,6 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-TickSample* CpuProfiler::StartTickSample() {
- if (is_profiling_) return processor_->StartTickSample();
- return NULL;
-}
-
-
-void CpuProfiler::FinishTickSample() {
- processor_->FinishTickSample();
-}
-
-
TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == NULL) return NULL;
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 47585b7b08..7a0cf9c8bf 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -12,28 +12,49 @@
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
-#include "include/v8-profiler.h"
-
namespace v8 {
namespace internal {
static const int kProfilerStackSize = 64 * KB;
+class CpuSampler : public sampler::Sampler {
+ public:
+ CpuSampler(Isolate* isolate, ProfilerEventsProcessor* processor)
+ : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
+ processor_(processor) {}
+
+ void SampleStack(const v8::RegisterState& regs) override {
+ TickSample* sample = processor_->StartTickSample();
+ if (sample == nullptr) return;
+ Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
+ sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
+ if (is_counting_samples_ && !sample->timestamp.IsNull()) {
+ if (sample->state == JS) ++js_sample_count_;
+ if (sample->state == EXTERNAL) ++external_sample_count_;
+ }
+ processor_->FinishTickSample();
+ }
+
+ private:
+ ProfilerEventsProcessor* processor_;
+};
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
+ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
+ ProfileGenerator* generator,
base::TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
- sampler_(sampler),
+ sampler_(new CpuSampler(isolate, this)),
running_(1),
period_(period),
last_code_event_id_(0),
- last_processed_code_event_id_(0) {}
-
-
-ProfilerEventsProcessor::~ProfilerEventsProcessor() {}
+ last_processed_code_event_id_(0) {
+ sampler_->IncreaseProfilingDepth();
+}
+ProfilerEventsProcessor::~ProfilerEventsProcessor() {
+ sampler_->DecreaseProfilingDepth();
+}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
event.generic.order = last_code_event_id_.Increment(1);
@@ -49,7 +70,7 @@ void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
regs.sp = fp - fp_to_sp_delta;
regs.fp = fp;
regs.pc = from;
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false, false);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -64,7 +85,8 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
regs.fp = frame->fp();
regs.pc = frame->pc();
}
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats,
+ false);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -199,257 +221,23 @@ void CpuProfiler::DeleteProfile(CpuProfile* profile) {
}
}
-
-void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = entry_point;
- rec->entry = profiles_->NewCodeEntry(
- Logger::CALLBACK_TAG,
- profiles_->GetName(name));
- rec->size = 1;
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code, const char* name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code, Name* name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(shared->DebugName()),
- CodeEntry::kEmptyNamePrefix,
- profiles_->GetName(InferScriptName(script_name, shared)),
- CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- if (info) {
- rec->entry->set_inlined_function_infos(info->inlined_function_infos());
- }
- rec->entry->FillFunctionInfo(shared);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* abstract_code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name,
- int line, int column) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = abstract_code->address();
- Script* script = Script::cast(shared->script());
- JITLineInfoTable* line_table = NULL;
- if (script) {
- if (abstract_code->IsCode()) {
- Code* code = abstract_code->GetCode();
- int start_position = shared->start_position();
- int end_position = shared->end_position();
- line_table = new JITLineInfoTable();
- for (RelocIterator it(code); !it.done(); it.next()) {
- RelocInfo* reloc_info = it.rinfo();
- if (!RelocInfo::IsPosition(reloc_info->rmode())) continue;
- int position = static_cast<int>(reloc_info->data());
- // TODO(alph): in case of inlining the position may correspond
- // to an inlined function source code. Do not collect positions
- // that fall beyond the function source code. There's however a
- // chance the inlined function has similar positions but in another
- // script. So the proper fix is to store script_id in some form
- // along with the inlined function positions.
- if (position < start_position || position >= end_position) continue;
- int pc_offset = static_cast<int>(reloc_info->pc() - code->address());
- int line_number = script->GetLineNumber(position) + 1;
- line_table->SetPosition(pc_offset, line_number);
- }
- } else {
- BytecodeArray* bytecode = abstract_code->GetBytecodeArray();
- line_table = new JITLineInfoTable();
- interpreter::SourcePositionTableIterator it(
- bytecode->source_position_table());
- for (; !it.done(); it.Advance()) {
- int line_number = script->GetLineNumber(it.source_position()) + 1;
- int pc_offset = it.bytecode_offset() + BytecodeArray::kHeaderSize;
- line_table->SetPosition(pc_offset, line_number);
- }
- }
- }
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(shared->DebugName()),
- CodeEntry::kEmptyNamePrefix,
- profiles_->GetName(InferScriptName(script_name, shared)), line, column,
- line_table, abstract_code->instruction_start());
- RecordInliningInfo(rec->entry, abstract_code);
- if (info) {
- rec->entry->set_inlined_function_infos(info->inlined_function_infos());
- }
- rec->entry->FillFunctionInfo(shared);
- rec->size = abstract_code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code, int args_count) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetName(args_count), "args_count: ",
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeMoveEvent(AbstractCode* from, Address to) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
- CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->from = from->address();
- rec->to = to;
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
- CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
- rec->start = code->address();
- rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
- CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
- Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
- rec->start = code->address();
- rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
- rec->position = info.position;
- rec->pc_offset = pc - code->instruction_start();
- processor_->Enqueue(evt_rec);
- processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
-}
-
-void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = entry_point;
- rec->entry = profiles_->NewCodeEntry(
- Logger::CALLBACK_TAG,
- profiles_->GetName(name),
- "get ");
- rec->size = 1;
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- Logger::REG_EXP_TAG, profiles_->GetName(source), "RegExp: ",
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-
-void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = entry_point;
- rec->entry = profiles_->NewCodeEntry(
- Logger::CALLBACK_TAG,
- profiles_->GetName(name),
- "set ");
- rec->size = 1;
- processor_->Enqueue(evt_rec);
-}
-
-Name* CpuProfiler::InferScriptName(Name* name, SharedFunctionInfo* info) {
- if (name->IsString() && String::cast(name)->length()) return name;
- if (!info->script()->IsScript()) return name;
- Object* source_url = Script::cast(info->script())->source_url();
- return source_url->IsName() ? Name::cast(source_url) : name;
-}
-
-void CpuProfiler::RecordInliningInfo(CodeEntry* entry,
- AbstractCode* abstract_code) {
- if (!abstract_code->IsCode()) return;
- Code* code = abstract_code->GetCode();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
- DeoptimizationInputData* deopt_input_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int deopt_count = deopt_input_data->DeoptCount();
- for (int i = 0; i < deopt_count; i++) {
- int pc_offset = deopt_input_data->Pc(i)->value();
- if (pc_offset == -1) continue;
- int translation_index = deopt_input_data->TranslationIndex(i)->value();
- TranslationIterator it(deopt_input_data->TranslationByteArray(),
- translation_index);
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- int depth = 0;
- std::vector<CodeEntry*> inline_stack;
- while (it.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(it.Next()))) {
- if (opcode != Translation::JS_FRAME &&
- opcode != Translation::INTERPRETED_FRAME) {
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- continue;
- }
- it.Next(); // Skip ast_id
- int shared_info_id = it.Next();
- it.Next(); // Skip height
- SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
- deopt_input_data->LiteralArray()->get(shared_info_id));
- if (!depth++) continue; // Skip the current function itself.
- CodeEntry* inline_entry = new CodeEntry(
- entry->tag(), profiles_->GetFunctionName(shared_info->DebugName()),
- CodeEntry::kEmptyNamePrefix, entry->resource_name(),
- CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- inline_entry->FillFunctionInfo(shared_info);
- inline_stack.push_back(inline_entry);
- }
- if (!inline_stack.empty()) {
- entry->AddInlineStack(pc_offset, inline_stack);
- DCHECK(inline_stack.empty());
+void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
+ switch (evt_rec.generic.type) {
+ case CodeEventRecord::CODE_CREATION:
+ case CodeEventRecord::CODE_MOVE:
+ case CodeEventRecord::CODE_DISABLE_OPT:
+ processor_->Enqueue(evt_rec);
+ break;
+ case CodeEventRecord::CODE_DEOPT: {
+ const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Address pc = reinterpret_cast<Address>(rec->pc);
+ int fp_to_sp_delta = rec->fp_to_sp_delta;
+ processor_->Enqueue(evt_rec);
+ processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
+ break;
}
+ default:
+ UNREACHABLE();
}
}
@@ -457,15 +245,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
- profiles_(new CpuProfilesCollection(isolate->heap())),
- generator_(NULL),
- processor_(NULL),
+ profiles_(new CpuProfilesCollection(isolate)),
is_profiling_(false) {
+ profiles_->set_cpu_profiler(this);
}
-
-CpuProfiler::CpuProfiler(Isolate* isolate,
- CpuProfilesCollection* test_profiles,
+CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
@@ -475,28 +260,25 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
generator_(test_generator),
processor_(test_processor),
is_profiling_(false) {
+ profiles_->set_cpu_profiler(this);
}
-
CpuProfiler::~CpuProfiler() {
DCHECK(!is_profiling_);
- delete profiles_;
}
-
void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
DCHECK(!is_profiling_);
sampling_interval_ = value;
}
-
void CpuProfiler::ResetProfiles() {
- delete profiles_;
- profiles_ = new CpuProfilesCollection(isolate()->heap());
+ profiles_.reset(new CpuProfilesCollection(isolate_));
+ profiles_->set_cpu_profiler(this);
}
void CpuProfiler::CollectSample() {
- if (processor_ != NULL) {
+ if (processor_) {
processor_->AddCurrentStack(isolate_);
}
}
@@ -515,7 +297,7 @@ void CpuProfiler::StartProfiling(String* title, bool record_samples) {
void CpuProfiler::StartProcessorIfNotStarted() {
- if (processor_ != NULL) {
+ if (processor_) {
processor_->AddCurrentStack(isolate_);
return;
}
@@ -523,11 +305,14 @@ void CpuProfiler::StartProcessorIfNotStarted() {
// Disable logging when using the new implementation.
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
- generator_ = new ProfileGenerator(profiles_);
- Sampler* sampler = logger->sampler();
- processor_ = new ProfilerEventsProcessor(
- generator_, sampler, sampling_interval_);
+ generator_.reset(new ProfileGenerator(profiles_.get()));
+ processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
+ sampling_interval_));
+ logger->SetUpProfilerListener();
+ ProfilerListener* profiler_listener = logger->profiler_listener();
+ profiler_listener->AddObserver(this);
is_profiling_ = true;
+ isolate_->set_is_profiling(true);
// Enumerate stuff we already have in the heap.
DCHECK(isolate_->heap()->HasBeenSetUp());
if (!FLAG_prof_browser_mode) {
@@ -537,18 +322,16 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->LogAccessorCallbacks();
LogBuiltins();
// Enable stack sampling.
- sampler->SetHasProcessingThread(true);
- sampler->IncreaseProfilingDepth();
processor_->AddCurrentStack(isolate_);
processor_->StartSynchronously();
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- if (!is_profiling_) return NULL;
+ if (!is_profiling_) return nullptr;
StopProcessorIfLastProfile(title);
CpuProfile* result = profiles_->StopProfiling(title);
- if (result != NULL) {
+ if (result) {
result->Print();
}
return result;
@@ -556,7 +339,7 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
CpuProfile* CpuProfiler::StopProfiling(String* title) {
- if (!is_profiling_) return NULL;
+ if (!is_profiling_) return nullptr;
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
return profiles_->StopProfiling(profile_title);
@@ -564,21 +347,22 @@ CpuProfile* CpuProfiler::StopProfiling(String* title) {
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
- if (profiles_->IsLastProfile(title)) StopProcessor();
+ if (profiles_->IsLastProfile(title)) {
+ StopProcessor();
+ }
}
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
- Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
is_profiling_ = false;
+ isolate_->set_is_profiling(false);
+ ProfilerListener* profiler_listener = logger->profiler_listener();
+ profiler_listener->RemoveObserver(this);
processor_->StopSynchronously();
- delete processor_;
- delete generator_;
- processor_ = NULL;
- generator_ = NULL;
- sampler->SetHasProcessingThread(false);
- sampler->DecreaseProfilingDepth();
+ logger->TearDownProfilerListener();
+ processor_.reset();
+ generator_.reset();
logger->is_logging_ = saved_is_logging_;
}
@@ -596,6 +380,5 @@ void CpuProfiler::LogBuiltins() {
}
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index a04ee3c3a8..e3df609f89 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -5,14 +5,19 @@
#ifndef V8_PROFILER_CPU_PROFILER_H_
#define V8_PROFILER_CPU_PROFILER_H_
+#include <memory>
+
#include "src/allocation.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
+#include "src/isolate.h"
+#include "src/libsampler/sampler.h"
#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
-#include "src/profiler/sampler.h"
+#include "src/profiler/profiler-listener.h"
+#include "src/profiler/tick-sample.h"
namespace v8 {
namespace internal {
@@ -20,7 +25,6 @@ namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
-class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
@@ -81,7 +85,9 @@ class CodeDeoptEventRecord : public CodeEventRecord {
Address start;
const char* deopt_reason;
SourcePosition position;
- size_t pc_offset;
+ int deopt_id;
+ void* pc;
+ int fp_to_sp_delta;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -127,8 +133,7 @@ class CodeEventsContainer {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public base::Thread {
public:
- ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
+ ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
base::TimeDelta period);
virtual ~ProfilerEventsProcessor();
@@ -154,6 +159,8 @@ class ProfilerEventsProcessor : public base::Thread {
void* operator new(size_t size);
void operator delete(void* ptr);
+ sampler::Sampler* sampler() { return sampler_.get(); }
+
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
@@ -166,7 +173,7 @@ class ProfilerEventsProcessor : public base::Thread {
SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
- Sampler* sampler_;
+ std::unique_ptr<sampler::Sampler> sampler_;
base::Atomic32 running_;
const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
@@ -176,28 +183,15 @@ class ProfilerEventsProcessor : public base::Thread {
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- AtomicNumber<unsigned> last_code_event_id_;
+ base::AtomicNumber<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
};
-
-#define PROFILE(IsolateGetter, Call) \
- do { \
- Isolate* cpu_profiler_isolate = (IsolateGetter); \
- v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
- CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
- if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
- logger->Call; \
- } \
- } while (false)
-
-
-class CpuProfiler : public CodeEventListener {
+class CpuProfiler : public CodeEventObserver {
public:
explicit CpuProfiler(Isolate* isolate);
- CpuProfiler(Isolate* isolate,
- CpuProfilesCollection* test_collection,
+ CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor);
@@ -214,42 +208,12 @@ class CpuProfiler : public CodeEventListener {
void DeleteAllProfiles();
void DeleteProfile(CpuProfile* profile);
- // Invoked from stack sampler (thread or signal handler.)
- inline TickSample* StartTickSample();
- inline void FinishTickSample();
+ void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
- // Must be called via PROFILE macro, otherwise will crash when
- // profiling is not enabled.
- void CallbackEvent(Name* name, Address entry_point) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- const char* comment) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- Name* name) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* script_name) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* script_name, int line, int column) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- int args_count) override;
- void CodeMovingGCEvent() override {}
- void CodeMoveEvent(AbstractCode* from, Address to) override;
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override;
- void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
- void GetterCallbackEvent(Name* name, Address entry_point) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void SetterCallbackEvent(Name* name, Address entry_point) override;
- void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
-
- INLINE(bool is_profiling() const) { return is_profiling_; }
- bool* is_profiling_address() {
- return &is_profiling_;
- }
+ bool is_profiling() const { return is_profiling_; }
- ProfileGenerator* generator() const { return generator_; }
- ProfilerEventsProcessor* processor() const { return processor_; }
+ ProfileGenerator* generator() const { return generator_.get(); }
+ ProfilerEventsProcessor* processor() const { return processor_.get(); }
Isolate* isolate() const { return isolate_; }
private:
@@ -258,14 +222,12 @@ class CpuProfiler : public CodeEventListener {
void StopProcessor();
void ResetProfiles();
void LogBuiltins();
- void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
- Name* InferScriptName(Name* name, SharedFunctionInfo* info);
- Isolate* isolate_;
+ Isolate* const isolate_;
base::TimeDelta sampling_interval_;
- CpuProfilesCollection* profiles_;
- ProfileGenerator* generator_;
- ProfilerEventsProcessor* processor_;
+ std::unique_ptr<CpuProfilesCollection> profiles_;
+ std::unique_ptr<ProfileGenerator> generator_;
+ std::unique_ptr<ProfilerEventsProcessor> processor_;
bool saved_is_logging_;
bool is_profiling_;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 1305cae66e..2df28a7958 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -34,7 +34,7 @@ HeapProfiler::~HeapProfiler() {
void HeapProfiler::DeleteAllSnapshots() {
snapshots_.Iterate(DeleteHeapSnapshot);
snapshots_.Clear();
- names_.Reset(new StringsStorage(heap()));
+ names_.reset(new StringsStorage(heap()));
}
@@ -84,20 +84,20 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
return result;
}
-
-bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
- int stack_depth) {
+bool HeapProfiler::StartSamplingHeapProfiler(
+ uint64_t sample_interval, int stack_depth,
+ v8::HeapProfiler::SamplingFlags flags) {
if (sampling_heap_profiler_.get()) {
return false;
}
- sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
- heap(), names_.get(), sample_interval, stack_depth));
+ sampling_heap_profiler_.reset(new SamplingHeapProfiler(
+ heap(), names_.get(), sample_interval, stack_depth, flags));
return true;
}
void HeapProfiler::StopSamplingHeapProfiler() {
- sampling_heap_profiler_.Reset(nullptr);
+ sampling_heap_profiler_.reset();
}
@@ -115,7 +115,7 @@ void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
is_tracking_object_moves_ = true;
DCHECK(!is_tracking_allocations());
if (track_allocations) {
- allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
+ allocation_tracker_.reset(new AllocationTracker(ids_.get(), names_.get()));
heap()->DisableInlineAllocation();
heap()->isolate()->debug()->feature_tracker()->Track(
DebugFeatureTracker::kAllocationTracking);
@@ -132,7 +132,7 @@ SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream,
void HeapProfiler::StopHeapObjectsTracking() {
ids_->StopHeapObjectsTracking();
if (is_tracking_allocations()) {
- allocation_tracker_.Reset(NULL);
+ allocation_tracker_.reset();
heap()->EnableInlineAllocation();
}
}
@@ -170,7 +170,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
base::LockGuard<base::Mutex> guard(&profiler_mutex_);
bool known_object = ids_->MoveObject(from, to, size);
- if (!known_object && !allocation_tracker_.is_empty()) {
+ if (!known_object && allocation_tracker_) {
allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
}
}
@@ -178,7 +178,7 @@ void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
void HeapProfiler::AllocationEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
- if (!allocation_tracker_.is_empty()) {
+ if (allocation_tracker_) {
allocation_tracker_->AllocationEvent(addr, size);
}
}
@@ -214,7 +214,7 @@ Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
void HeapProfiler::ClearHeapObjectMap() {
- ids_.Reset(new HeapObjectsMap(heap()));
+ ids_.reset(new HeapObjectsMap(heap()));
if (!is_tracking_allocations()) is_tracking_object_moves_ = false;
}
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 32e143c74f..3e1dcb54f9 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -5,7 +5,8 @@
#ifndef V8_PROFILER_HEAP_PROFILER_H_
#define V8_PROFILER_HEAP_PROFILER_H_
-#include "src/base/smart-pointers.h"
+#include <memory>
+
#include "src/isolate.h"
#include "src/list.h"
@@ -30,9 +31,10 @@ class HeapProfiler {
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
- bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+ bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth,
+ v8::HeapProfiler::SamplingFlags);
void StopSamplingHeapProfiler();
- bool is_sampling_allocations() { return !sampling_heap_profiler_.is_empty(); }
+ bool is_sampling_allocations() { return !!sampling_heap_profiler_; }
AllocationProfile* GetAllocationProfile();
void StartHeapObjectsTracking(bool track_allocations);
@@ -65,9 +67,7 @@ class HeapProfiler {
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
- bool is_tracking_allocations() const {
- return !allocation_tracker_.is_empty();
- }
+ bool is_tracking_allocations() const { return !!allocation_tracker_; }
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
void ClearHeapObjectMap();
@@ -78,14 +78,16 @@ class HeapProfiler {
Heap* heap() const;
// Mapping from HeapObject addresses to objects' uids.
- base::SmartPointer<HeapObjectsMap> ids_;
+ std::unique_ptr<HeapObjectsMap> ids_;
List<HeapSnapshot*> snapshots_;
- base::SmartPointer<StringsStorage> names_;
+ std::unique_ptr<StringsStorage> names_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- base::SmartPointer<AllocationTracker> allocation_tracker_;
+ std::unique_ptr<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
- base::SmartPointer<SamplingHeapProfiler> sampling_heap_profiler_;
+ std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
} // namespace internal
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 748f3074a1..9273168f80 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -80,8 +80,8 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
- base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent,
- ' ', prefix, edge_name);
+ base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
+ prefix, edge_name);
if (type() != kString) {
base::OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -392,7 +392,7 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
entries_.at(to_entry_info_index).addr = NULL;
}
} else {
- HashMap::Entry* to_entry =
+ base::HashMap::Entry* to_entry =
entries_map_.LookupOrInsert(to, ComputePointerHash(to));
if (to_entry->value != NULL) {
// We found the existing entry with to address for an old object.
@@ -412,10 +412,8 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
// object is migrated.
if (FLAG_heap_profiler_trace_objects) {
PrintF("Move object from %p to %p old size %6d new size %6d\n",
- from,
- to,
- entries_.at(from_entry_info_index).size,
- object_size);
+ static_cast<void*>(from), static_cast<void*>(to),
+ entries_.at(from_entry_info_index).size, object_size);
}
entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
@@ -430,7 +428,8 @@ void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr));
+ base::HashMap::Entry* entry =
+ entries_map_.Lookup(addr, ComputePointerHash(addr));
if (entry == NULL) return 0;
int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
@@ -443,7 +442,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
unsigned int size,
bool accessed) {
DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
entries_map_.LookupOrInsert(addr, ComputePointerHash(addr));
if (entry->value != NULL) {
int entry_index =
@@ -452,9 +451,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
entry_info.accessed = accessed;
if (FLAG_heap_profiler_trace_objects) {
PrintF("Update object size : %p with old size %d and new size %d\n",
- addr,
- entry_info.size,
- size);
+ static_cast<void*>(addr), entry_info.size, size);
}
entry_info.size = size;
return entry_info.id;
@@ -487,9 +484,8 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
FindOrAddEntry(obj->address(), obj->Size());
if (FLAG_heap_profiler_trace_objects) {
PrintF("Update object : %p %6d. Next address is %p\n",
- obj->address(),
- obj->Size(),
- obj->address() + obj->Size());
+ static_cast<void*>(obj->address()), obj->Size(),
+ static_cast<void*>(obj->address() + obj->Size()));
}
}
RemoveDeadEntries();
@@ -517,20 +513,16 @@ struct HeapObjectInfo {
void Print() const {
if (expected_size == 0) {
PrintF("Untracked object : %p %6d. Next address is %p\n",
- obj->address(),
- obj->Size(),
- obj->address() + obj->Size());
+ static_cast<void*>(obj->address()), obj->Size(),
+ static_cast<void*>(obj->address() + obj->Size()));
} else if (obj->Size() != expected_size) {
- PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
- expected_size,
- obj->address(),
- obj->Size(),
- obj->address() + obj->Size());
+ PrintF("Wrong size %6d: %p %6d. Next address is %p\n", expected_size,
+ static_cast<void*>(obj->address()), obj->Size(),
+ static_cast<void*>(obj->address() + obj->Size()));
} else {
PrintF("Good object : %p %6d. Next address is %p\n",
- obj->address(),
- expected_size,
- obj->address() + obj->Size());
+ static_cast<void*>(obj->address()), expected_size,
+ static_cast<void*>(obj->address() + obj->Size()));
}
}
};
@@ -554,7 +546,7 @@ int HeapObjectsMap::FindUntrackedObjects() {
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
entries_map_.Lookup(obj->address(), ComputePointerHash(obj->address()));
if (entry == NULL) {
++untracked;
@@ -674,7 +666,7 @@ void HeapObjectsMap::RemoveDeadEntries() {
entries_.at(first_free_entry) = entry_info;
}
entries_.at(first_free_entry).accessed = false;
- HashMap::Entry* entry = entries_map_.Lookup(
+ base::HashMap::Entry* entry = entries_map_.Lookup(
entry_info.addr, ComputePointerHash(entry_info.addr));
DCHECK(entry);
entry->value = reinterpret_cast<void*>(first_free_entry);
@@ -707,37 +699,28 @@ SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
size_t HeapObjectsMap::GetUsedMemorySize() const {
- return
- sizeof(*this) +
- sizeof(HashMap::Entry) * entries_map_.capacity() +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(time_intervals_);
-}
-
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HashMap::PointersMatch) {
+ return sizeof(*this) +
+ sizeof(base::HashMap::Entry) * entries_map_.capacity() +
+ GetMemoryUsedByList(entries_) + GetMemoryUsedByList(time_intervals_);
}
+HeapEntriesMap::HeapEntriesMap() : entries_(base::HashMap::PointersMatch) {}
int HeapEntriesMap::Map(HeapThing thing) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
+ base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
if (cache_entry == NULL) return HeapEntry::kNoEntry;
return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
}
void HeapEntriesMap::Pair(HeapThing thing, int entry) {
- HashMap::Entry* cache_entry = entries_.LookupOrInsert(thing, Hash(thing));
+ base::HashMap::Entry* cache_entry =
+ entries_.LookupOrInsert(thing, Hash(thing));
DCHECK(cache_entry->value == NULL);
cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
}
-
-HeapObjectsSet::HeapObjectsSet()
- : entries_(HashMap::PointersMatch) {
-}
-
+HeapObjectsSet::HeapObjectsSet() : entries_(base::HashMap::PointersMatch) {}
void HeapObjectsSet::Clear() {
entries_.Clear();
@@ -760,7 +743,7 @@ void HeapObjectsSet::Insert(Object* obj) {
const char* HeapObjectsSet::GetTag(Object* obj) {
HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
+ base::HashMap::Entry* cache_entry =
entries_.Lookup(object, HeapEntriesMap::Hash(object));
return cache_entry != NULL
? reinterpret_cast<const char*>(cache_entry->value)
@@ -768,10 +751,10 @@ const char* HeapObjectsSet::GetTag(Object* obj) {
}
-void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+V8_NOINLINE void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
if (!obj->IsHeapObject()) return;
HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
+ base::HashMap::Entry* cache_entry =
entries_.LookupOrInsert(object, HeapEntriesMap::Hash(object));
cache_entry->value = const_cast<char*>(tag);
}
@@ -1003,8 +986,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
- intptr_t index =
- static_cast<intptr_t>(p - HeapObject::RawField(parent_obj_, 0));
+ int index = static_cast<int>(p - HeapObject::RawField(parent_obj_, 0));
++next_index_;
// |p| could be outside of the object, e.g., while visiting RelocInfo of
// code objects.
@@ -1012,7 +994,8 @@ class IndexedReferencesExtractor : public ObjectVisitor {
generator_->marks_[index] = false;
continue;
}
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p);
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p,
+ index * kPointerSize);
}
}
@@ -1058,14 +1041,14 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
} else if (obj->IsAccessorPair()) {
ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
- } else if (obj->IsCodeCache()) {
- ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
} else if (obj->IsBox()) {
ExtractBoxReferences(entry, Box::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
+ } else if (obj->IsWeakCell()) {
+ ExtractWeakCellReferences(entry, WeakCell::cast(obj));
} else if (obj->IsPropertyCell()) {
ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
} else if (obj->IsAllocationSite()) {
@@ -1108,9 +1091,11 @@ void V8HeapExplorer::ExtractJSObjectReferences(
TagObject(js_fun->bound_arguments(), "(bound arguments)");
SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
- SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
- SetNativeBindReference(js_obj, entry, "bound_function",
- js_fun->bound_target_function());
+ SetInternalReference(js_obj, entry, "bound_this", js_fun->bound_this(),
+ JSBoundFunction::kBoundThisOffset);
+ SetInternalReference(js_obj, entry, "bound_function",
+ js_fun->bound_target_function(),
+ JSBoundFunction::kBoundTargetFunctionOffset);
FixedArray* bindings = js_fun->bound_arguments();
for (int i = 0; i < bindings->length(); i++) {
const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
@@ -1119,7 +1104,7 @@ void V8HeapExplorer::ExtractJSObjectReferences(
} else if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
+ if (!proto_or_map->IsTheHole(heap_->isolate())) {
if (!proto_or_map->IsMap()) {
SetPropertyReference(
obj, entry,
@@ -1147,9 +1132,6 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetInternalReference(js_fun, entry,
"context", js_fun->context(),
JSFunction::kContextOffset);
- SetWeakReference(js_fun, entry,
- "next_function_link", js_fun->next_function_link(),
- JSFunction::kNextFunctionLinkOffset);
// Ensure no new weak references appeared in JSFunction.
STATIC_ASSERT(JSFunction::kCodeEntryOffset ==
JSFunction::kNonWeakFieldsEndOffset);
@@ -1211,16 +1193,16 @@ void V8HeapExplorer::ExtractJSCollectionReferences(int entry,
JSCollection::kTableOffset);
}
-
-void V8HeapExplorer::ExtractJSWeakCollectionReferences(
- int entry, JSWeakCollection* collection) {
- MarkAsWeakContainer(collection->table());
- SetInternalReference(collection, entry,
- "table", collection->table(),
+void V8HeapExplorer::ExtractJSWeakCollectionReferences(int entry,
+ JSWeakCollection* obj) {
+ if (obj->table()->IsHashTable()) {
+ ObjectHashTable* table = ObjectHashTable::cast(obj->table());
+ TagFixedArraySubType(table, JS_WEAK_COLLECTION_SUB_TYPE);
+ }
+ SetInternalReference(obj, entry, "table", obj->table(),
JSWeakCollection::kTableOffset);
}
-
void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
if (context == context->declaration_context()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
@@ -1264,7 +1246,6 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
optimized_functions_list);
EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list);
- EXTRACT_CONTEXT_FIELD(NEXT_CONTEXT_LINK, unused, next_context_link);
#undef EXTRACT_CONTEXT_FIELD
STATIC_ASSERT(Context::OPTIMIZED_FUNCTIONS_LIST ==
Context::FIRST_WEAK_SLOT);
@@ -1282,19 +1263,9 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
raw_transitions_or_prototype_info)) {
TransitionArray* transitions =
TransitionArray::cast(raw_transitions_or_prototype_info);
- int transitions_entry = GetEntry(transitions)->index();
-
- if (map->CanTransition()) {
- if (transitions->HasPrototypeTransitions()) {
- FixedArray* prototype_transitions =
- transitions->GetPrototypeTransitions();
- MarkAsWeakContainer(prototype_transitions);
- TagObject(prototype_transitions, "(prototype transitions");
- SetInternalReference(transitions, transitions_entry,
- "prototype_transitions", prototype_transitions);
- }
- // TODO(alph): transitions keys are strong links.
- MarkAsWeakContainer(transitions);
+ if (map->CanTransition() && transitions->HasPrototypeTransitions()) {
+ TagObject(transitions->GetPrototypeTransitions(),
+ "(prototype transitions)");
}
TagObject(transitions, "(transition array)");
@@ -1314,16 +1285,19 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
}
DescriptorArray* descriptors = map->instance_descriptors();
TagObject(descriptors, "(map descriptors)");
- SetInternalReference(map, entry,
- "descriptors", descriptors,
+ SetInternalReference(map, entry, "descriptors", descriptors,
Map::kDescriptorsOffset);
-
- MarkAsWeakContainer(map->code_cache());
- SetInternalReference(map, entry,
- "code_cache", map->code_cache(),
+ SetInternalReference(map, entry, "code_cache", map->code_cache(),
Map::kCodeCacheOffset);
- SetInternalReference(map, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(map, entry, "prototype", map->prototype(),
+ Map::kPrototypeOffset);
+#if V8_DOUBLE_FIELDS_UNBOXING
+ if (FLAG_unbox_double_fields) {
+ SetInternalReference(map, entry, "layout_descriptor",
+ map->layout_descriptor(),
+ Map::kLayoutDescriptorOffset);
+ }
+#endif
Object* constructor_or_backpointer = map->constructor_or_backpointer();
if (constructor_or_backpointer->IsMap()) {
TagObject(constructor_or_backpointer, "(back pointer)");
@@ -1334,10 +1308,11 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
Map::kConstructorOrBackPointerOffset);
}
TagObject(map->dependent_code(), "(dependent code)");
- MarkAsWeakContainer(map->dependent_code());
- SetInternalReference(map, entry,
- "dependent_code", map->dependent_code(),
+ SetInternalReference(map, entry, "dependent_code", map->dependent_code(),
Map::kDependentCodeOffset);
+ TagObject(map->weak_cell_cache(), "(weak cell)");
+ SetInternalReference(map, entry, "weak_cell_cache", map->weak_cell_cache(),
+ Map::kWeakCellCacheOffset);
}
@@ -1389,9 +1364,9 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"optimized_code_map", shared->optimized_code_map(),
SharedFunctionInfo::kOptimizedCodeMapOffset);
- SetInternalReference(obj, entry,
- "feedback_vector", shared->feedback_vector(),
- SharedFunctionInfo::kFeedbackVectorOffset);
+ SetInternalReference(obj, entry, "feedback_metadata",
+ shared->feedback_metadata(),
+ SharedFunctionInfo::kFeedbackMetadataOffset);
}
@@ -1444,19 +1419,6 @@ void V8HeapExplorer::ExtractAccessorPairReferences(
}
-void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(code_cache, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(code_cache, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
-}
-
-
void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
TagObject(code, names_->GetFormatted("(%s builtin)", name));
}
@@ -1484,37 +1446,38 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
Code::kDeoptimizationDataOffset);
+ TagObject(code->source_position_table(), "(source position table)");
+ SetInternalReference(code, entry, "source_position_table",
+ code->source_position_table(),
+ Code::kSourcePositionTableOffset);
if (code->kind() == Code::FUNCTION) {
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
+ SetInternalReference(code, entry, "type_feedback_info",
+ code->type_feedback_info(),
Code::kTypeFeedbackInfoOffset);
}
- SetInternalReference(code, entry,
- "gc_metadata", code->gc_metadata(),
+ SetInternalReference(code, entry, "gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- SetWeakReference(code, entry,
- "next_code_link", code->next_code_link(),
- Code::kNextCodeLinkOffset);
- }
}
-
void V8HeapExplorer::ExtractBoxReferences(int entry, Box* box) {
SetInternalReference(box, entry, "value", box->value(), Box::kValueOffset);
}
-
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
}
+void V8HeapExplorer::ExtractWeakCellReferences(int entry, WeakCell* weak_cell) {
+ TagObject(weak_cell, "(weak cell)");
+ SetWeakReference(weak_cell, entry, "value", weak_cell->value(),
+ WeakCell::kValueOffset);
+}
void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
PropertyCell* cell) {
SetInternalReference(cell, entry, "value", cell->value(),
PropertyCell::kValueOffset);
- MarkAsWeakContainer(cell->dependent_code());
+ TagObject(cell->dependent_code(), "(dependent code)");
SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
PropertyCell::kDependentCodeOffset);
}
@@ -1526,7 +1489,7 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite::kTransitionInfoOffset);
SetInternalReference(site, entry, "nested_site", site->nested_site(),
AllocationSite::kNestedSiteOffset);
- MarkAsWeakContainer(site->dependent_code());
+ TagObject(site->dependent_code(), "(dependent code)");
SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
// Do not visit weak_next as it is not visited by the StaticVisitor,
@@ -1558,7 +1521,7 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(
// Setup a reference to a native memory backing_store object.
if (!buffer->backing_store())
return;
- size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
+ size_t data_size = NumberToSize(buffer->byte_length());
JSArrayBufferDataEntryAllocator allocator(data_size, this);
HeapEntry* data_entry =
filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
@@ -1566,22 +1529,36 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(
entry, "backing_store", data_entry);
}
-
void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
- bool is_weak = weak_containers_.Contains(array);
- for (int i = 0, l = array->length(); i < l; ++i) {
- if (is_weak) {
- SetWeakReference(array, entry,
- i, array->get(i), array->OffsetOfElementAt(i));
- } else {
- SetInternalReference(array, entry,
- i, array->get(i), array->OffsetOfElementAt(i));
+ auto it = array_types_.find(array);
+ if (it == array_types_.end()) {
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ SetInternalReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
}
+ return;
}
-}
+ switch (it->second) {
+ case JS_WEAK_COLLECTION_SUB_TYPE:
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ SetWeakReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
+ }
+ break;
+ // TODO(alph): Add special processing for other types of FixedArrays.
+
+ default:
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ SetInternalReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
+ }
+ break;
+ }
+}
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+ Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
int real_size = js_obj->map()->NumberOfOwnDescriptors();
@@ -1598,14 +1575,8 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
- if (k != heap_->hidden_properties_symbol()) {
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
- value, NULL, field_offset);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value,
- field_offset);
- }
+ SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
+ value, NULL, field_offset);
break;
}
case kDescriptor:
@@ -1621,15 +1592,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
+ if (dictionary->IsKey(isolate, k)) {
DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
Object* value = cell->value();
- if (k == heap_->hidden_properties_symbol()) {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- continue;
- }
PropertyDetails details = cell->property_details();
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
Name::cast(k), value);
@@ -1640,13 +1606,8 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
+ if (dictionary->IsKey(isolate, k)) {
Object* value = dictionary->ValueAt(i);
- if (k == heap_->hidden_properties_symbol()) {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- continue;
- }
PropertyDetails details = dictionary->DetailsAt(i);
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
Name::cast(k), value);
@@ -1675,13 +1636,14 @@ void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
+ Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
int length = js_obj->IsJSArray() ?
Smi::cast(JSArray::cast(js_obj)->length())->value() :
elements->length();
for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole()) {
+ if (!elements->get(i)->IsTheHole(isolate)) {
SetElementReference(js_obj, entry, i, elements->get(i));
}
}
@@ -1690,7 +1652,7 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
+ if (dictionary->IsKey(isolate, k)) {
DCHECK(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
@@ -1877,6 +1839,23 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
object != heap_->two_pointer_filler_map();
}
+bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
+ int field_offset) {
+ if (parent->IsAllocationSite() &&
+ field_offset == AllocationSite::kWeakNextOffset)
+ return false;
+ if (parent->IsJSFunction() &&
+ field_offset == JSFunction::kNextFunctionLinkOffset)
+ return false;
+ if (parent->IsCode() && field_offset == Code::kNextCodeLinkOffset)
+ return false;
+ if (parent->IsContext() &&
+ field_offset == Context::OffsetOfElementAt(Context::NEXT_CONTEXT_LINK))
+ return false;
+ if (parent->IsWeakCell() && field_offset == WeakCell::kNextOffset)
+ return false;
+ return true;
+}
void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
int parent_entry,
@@ -1968,17 +1947,14 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
MarkVisitedField(parent_obj, field_offset);
}
-
void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
+ int parent_entry, int index,
+ Object* child_obj, int field_offset) {
DCHECK(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL && IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden,
- parent_entry,
- index,
+ if (child_entry != nullptr && IsEssentialObject(child_obj) &&
+ IsEssentialHiddenReference(parent_obj, field_offset)) {
+ filler_->SetIndexedReference(HeapGraphEdge::kHidden, parent_entry, index,
child_entry);
}
}
@@ -2165,14 +2141,12 @@ void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
}
}
-
-void V8HeapExplorer::MarkAsWeakContainer(Object* object) {
- if (IsEssentialObject(object) && object->IsFixedArray()) {
- weak_containers_.Insert(object);
- }
+void V8HeapExplorer::TagFixedArraySubType(const FixedArray* array,
+ FixedArraySubInstanceType type) {
+ DCHECK(array_types_.find(array) == array_types_.end());
+ array_types_[array] = type;
}
-
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) override {
@@ -2262,9 +2236,9 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
intptr_t elements = info->GetElementCount();
intptr_t size = info->GetSizeInBytes();
const char* name = elements != -1
- ? names_->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : names_->GetCopy(info->GetLabel());
+ ? names_->GetFormatted("%s / %" V8PRIdPTR " entries",
+ info->GetLabel(), elements)
+ : names_->GetCopy(info->GetLabel());
return snapshot_->AddEntry(
entries_type_,
name,
@@ -2292,8 +2266,7 @@ NativeObjectsExplorer::NativeObjectsExplorer(
NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
p = objects_by_info_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2302,8 +2275,7 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
reinterpret_cast<List<HeapObject*>* >(p->value);
delete objects;
}
- for (HashMap::Entry* p = native_groups_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = native_groups_.Start(); p != NULL;
p = native_groups_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
@@ -2375,7 +2347,8 @@ void NativeObjectsExplorer::FillImplicitReferences() {
List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
v8::RetainedObjectInfo* info) {
- HashMap::Entry* entry = objects_by_info_.LookupOrInsert(info, InfoHash(info));
+ base::HashMap::Entry* entry =
+ objects_by_info_.LookupOrInsert(info, InfoHash(info));
if (entry->value != NULL) {
info->Dispose();
} else {
@@ -2391,8 +2364,7 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
FillRetainedObjects();
FillImplicitReferences();
if (EstimateObjectsCount() > 0) {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
p = objects_by_info_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2444,7 +2416,7 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
label_copy,
static_cast<int>(strlen(label_copy)),
isolate_->heap()->HashSeed());
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
native_groups_.LookupOrInsert(const_cast<char*>(label_copy), hash);
if (entry->value == NULL) {
entry->value = new NativeGroupRetainedObjectInfo(label);
@@ -2490,8 +2462,7 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
void NativeObjectsExplorer::SetRootNativeRootsReference() {
- for (HashMap::Entry* entry = native_groups_.Start();
- entry;
+ for (base::HashMap::Entry* entry = native_groups_.Start(); entry;
entry = native_groups_.Next(entry)) {
NativeGroupRetainedObjectInfo* group_info =
static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
@@ -2759,7 +2730,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
- HashMap::Entry* cache_entry =
+ base::HashMap::Entry* cache_entry =
strings_.LookupOrInsert(const_cast<char*>(s), StringHash(s));
if (cache_entry->value == NULL) {
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
@@ -3144,8 +3115,7 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
void HeapSnapshotJSONSerializer::SerializeStrings() {
ScopedVector<const unsigned char*> sorted_strings(
strings_.occupancy() + 1);
- for (HashMap::Entry* entry = strings_.Start();
- entry != NULL;
+ for (base::HashMap::Entry* entry = strings_.Start(); entry != NULL;
entry = strings_.Next(entry)) {
int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 857f2401bf..b870fbe324 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -5,6 +5,8 @@
#ifndef V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
+#include <unordered_map>
+
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
@@ -259,7 +261,7 @@ class HeapObjectsMap {
};
SnapshotObjectId next_id_;
- HashMap entries_map_;
+ base::HashMap entries_map_;
List<EntryInfo> entries_;
List<TimeInterval> time_intervals_;
Heap* heap_;
@@ -297,7 +299,7 @@ class HeapEntriesMap {
v8::internal::kZeroHashSeed);
}
- HashMap entries_;
+ base::HashMap entries_;
friend class HeapObjectsSet;
@@ -316,7 +318,7 @@ class HeapObjectsSet {
bool is_empty() const { return entries_.occupancy() == 0; }
private:
- HashMap entries_;
+ base::HashMap entries_;
DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
};
@@ -382,10 +384,10 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractScriptReferences(int entry, Script* script);
void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
void ExtractBoxReferences(int entry, Box* box);
void ExtractCellReferences(int entry, Cell* cell);
+ void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
@@ -397,6 +399,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractInternalReferences(JSObject* js_obj, int entry);
bool IsEssentialObject(Object* object);
+ bool IsEssentialHiddenReference(Object* parent, int field_offset);
+
void SetContextReference(HeapObject* parent_obj,
int parent,
String* reference_name,
@@ -420,10 +424,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
int index,
Object* child,
int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
+ void SetHiddenReference(HeapObject* parent_obj, int parent, int index,
+ Object* child, int field_offset);
void SetWeakReference(HeapObject* parent_obj,
int parent,
const char* reference_name,
@@ -453,7 +455,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
const char* GetStrongGcSubrootName(Object* object);
void TagObject(Object* obj, const char* tag);
- void MarkAsWeakContainer(Object* object);
+ void TagFixedArraySubType(const FixedArray* array,
+ FixedArraySubInstanceType type);
HeapEntry* GetEntry(Object* obj);
@@ -466,7 +469,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
HeapObjectsSet user_roots_;
- HeapObjectsSet weak_containers_;
+ std::unordered_map<const FixedArray*, FixedArraySubInstanceType> array_types_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
std::vector<bool> marks_;
@@ -522,8 +525,8 @@ class NativeObjectsExplorer {
bool embedder_queried_;
HeapObjectsSet in_groups_;
// RetainedObjectInfo* -> List<HeapObject*>*
- HashMap objects_by_info_;
- HashMap native_groups_;
+ base::HashMap objects_by_info_;
+ base::HashMap native_groups_;
HeapEntriesAllocator* synthetic_entries_allocator_;
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
@@ -610,7 +613,7 @@ class HeapSnapshotJSONSerializer {
static const int kNodeFieldsCount;
HeapSnapshot* snapshot_;
- HashMap strings_;
+ base::HashMap strings_;
int next_node_id_;
int next_string_id_;
OutputStreamWriter* writer_;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 85edce2663..c50964d990 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
+CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name,
int line_number, int column_number,
JITLineInfoTable* line_info, Address instruction_start)
@@ -26,10 +26,10 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
deopt_position_(SourcePosition::Unknown()),
+ deopt_id_(kNoDeoptimizationId),
line_info_(line_info),
instruction_start_(instruction_start) {}
-
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index abcd9e5d88..583ef0f4e3 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -4,13 +4,12 @@
#include "src/profiler/profile-generator.h"
-#include "src/ast/scopeinfo.h"
+#include "src/base/adapters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
-#include "src/profiler/sampler.h"
-#include "src/splay-tree-inl.h"
#include "src/unicode.h"
namespace v8 {
@@ -47,6 +46,41 @@ const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
const char* const CodeEntry::kNoDeoptReason = "";
+const char* const CodeEntry::kProgramEntryName = "(program)";
+const char* const CodeEntry::kIdleEntryName = "(idle)";
+const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
+const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
+ CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
+ CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
+ CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry,
+ CodeEntry::UnresolvedEntryCreateTrait>::type
+ CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
+}
+
+CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
+}
+
+CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::BUILTIN_TAG,
+ CodeEntry::kGarbageCollectorEntryName);
+}
+
+CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::FUNCTION_TAG,
+ CodeEntry::kUnresolvedFunctionName);
+}
CodeEntry::~CodeEntry() {
delete line_info_;
@@ -93,7 +127,7 @@ bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
void CodeEntry::SetBuiltinId(Builtins::Name id) {
- bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
+ bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
bit_field_ = BuiltinIdField::update(bit_field_, id);
}
@@ -118,6 +152,19 @@ const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
return it != inline_locations_.end() ? &it->second : NULL;
}
+void CodeEntry::AddDeoptInlinedFrames(
+ int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
+ // It's better to use std::move to place the vector into the map,
+ // but it's not supported by the current stdlibc++ on MacOS.
+ deopt_inlined_frames_
+ .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
+ .first->second.swap(inlined_frames);
+}
+
+bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
+ return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
+}
+
void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
if (!shared->script()->IsScript()) return;
Script* script = Script::cast(shared->script());
@@ -131,30 +178,20 @@ CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
CpuProfileDeoptInfo info;
info.deopt_reason = deopt_reason_;
- if (inlined_function_infos_.empty()) {
+ DCHECK_NE(kNoDeoptimizationId, deopt_id_);
+ if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
info.stack.push_back(CpuProfileDeoptFrame(
{script_id_, position_ + deopt_position_.position()}));
- return info;
- }
- // Copy the only branch from the inlining tree where the deopt happened.
- SourcePosition position = deopt_position_;
- int inlining_id = InlinedFunctionInfo::kNoParentId;
- for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
- InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
- if (std::binary_search(current_info.deopt_pc_offsets.begin(),
- current_info.deopt_pc_offsets.end(), pc_offset_)) {
- inlining_id = static_cast<int>(i);
- break;
+ } else {
+ size_t deopt_position = deopt_position_.raw();
+ // Copy stack of inlined frames where the deopt happened.
+ std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
+ for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
+ info.stack.push_back(CpuProfileDeoptFrame(
+ {inlined_frame.script_id, deopt_position + inlined_frame.position}));
+ deopt_position = 0; // Done with innermost frame.
}
}
- while (inlining_id != InlinedFunctionInfo::kNoParentId) {
- InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
- info.stack.push_back(
- CpuProfileDeoptFrame({inlined_info.script_id,
- inlined_info.start_position + position.raw()}));
- position = inlined_info.inline_position;
- inlining_id = inlined_info.parent_id;
- }
return info;
}
@@ -166,14 +203,15 @@ void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
- HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
+ base::HashMap::Entry* map_entry =
+ children_.Lookup(entry, CodeEntryHash(entry));
return map_entry != NULL ?
reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
}
ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
- HashMap::Entry* map_entry =
+ base::HashMap::Entry* map_entry =
children_.LookupOrInsert(entry, CodeEntryHash(entry));
ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
if (node == NULL) {
@@ -190,7 +228,7 @@ void ProfileNode::IncrementLineTicks(int src_line) {
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
// Increment a hit counter of a certain source line.
// Add a new source line if not found.
- HashMap::Entry* e =
+ base::HashMap::Entry* e =
line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
DCHECK(e);
e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
@@ -208,7 +246,7 @@ bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
v8::CpuProfileNode::LineTick* entry = entries;
- for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
+ for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
p = line_ticks_.Next(p), entry++) {
entry->line =
static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
@@ -229,12 +267,13 @@ void ProfileNode::Print(int indent) {
base::OS::Print("\n");
for (size_t i = 0; i < deopt_infos_.size(); ++i) {
CpuProfileDeoptInfo& info = deopt_infos_[i];
- base::OS::Print(
- "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
- indent + 10, "", info.stack[0].script_id, info.stack[0].position,
- info.deopt_reason);
+ base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
+ " with reason '%s'.\n",
+ indent + 10, "", info.stack[0].script_id,
+ info.stack[0].position, info.deopt_reason);
for (size_t index = 1; index < info.stack.size(); ++index) {
- base::OS::Print("%*s;;; Inline point: script_id %d position: %d.\n",
+ base::OS::Print("%*s;;; Inline point: script_id %d position: %" PRIuS
+ ".\n",
indent + 10, "", info.stack[index].script_id,
info.stack[index].position);
}
@@ -245,8 +284,7 @@ void ProfileNode::Print(int indent) {
base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
bailout_reason);
}
- for (HashMap::Entry* p = children_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = children_.Start(); p != NULL;
p = children_.Next(p)) {
reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
}
@@ -264,16 +302,14 @@ class DeleteNodesCallback {
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
-
ProfileTree::ProfileTree(Isolate* isolate)
- : root_entry_(Logger::FUNCTION_TAG, "(root)"),
+ : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
next_node_id_(1),
root_(new ProfileNode(this, &root_entry_)),
isolate_(isolate),
next_function_id_(1),
function_ids_(ProfileNode::CodeEntriesMatch) {}
-
ProfileTree::~ProfileTree() {
DeleteNodesCallback cb;
TraverseDepthFirst(&cb);
@@ -282,7 +318,7 @@ ProfileTree::~ProfileTree() {
unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
CodeEntry* code_entry = node->entry();
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
if (!entry->value) {
entry->value = reinterpret_cast<void*>(next_function_id_++);
@@ -361,12 +397,13 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
}
-
-CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
+CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
+ bool record_samples)
: title_(title),
record_samples_(record_samples),
start_time_(base::TimeTicks::HighResolutionNow()),
- top_down_(isolate) {}
+ top_down_(profiler->isolate()),
+ profiler_(profiler) {}
void CpuProfile::AddPath(base::TimeTicks timestamp,
const std::vector<CodeEntry*>& path, int src_line,
@@ -379,92 +416,60 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
}
}
-
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
end_time_ = base::TimeTicks::HighResolutionNow();
}
-
void CpuProfile::Print() {
base::OS::Print("[Top down]:\n");
top_down_.Print();
}
-
-CodeMap::~CodeMap() {}
-
-
-const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-
-
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
DeleteAllCoveredCode(addr, addr + size);
- CodeTree::Locator locator;
- tree_.Insert(addr, &locator);
- locator.set_value(CodeEntryInfo(entry, size));
+ code_map_.insert({addr, CodeEntryInfo(entry, size)});
}
-
void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
- List<Address> to_delete;
- Address addr = end - 1;
- while (addr >= start) {
- CodeTree::Locator locator;
- if (!tree_.FindGreatestLessThan(addr, &locator)) break;
- Address start2 = locator.key(), end2 = start2 + locator.value().size;
- if (start2 < end && start < end2) to_delete.Add(start2);
- addr = start2 - 1;
+ auto left = code_map_.upper_bound(start);
+ if (left != code_map_.begin()) {
+ --left;
+ if (left->first + left->second.size <= start) ++left;
}
- for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
+ auto right = left;
+ while (right != code_map_.end() && right->first < end) ++right;
+ code_map_.erase(left, right);
}
-
CodeEntry* CodeMap::FindEntry(Address addr) {
- CodeTree::Locator locator;
- if (tree_.FindGreatestLessThan(addr, &locator)) {
- // locator.key() <= addr. Need to check that addr is within entry.
- const CodeEntryInfo& entry = locator.value();
- if (addr < (locator.key() + entry.size)) {
- return entry.entry;
- }
- }
- return NULL;
+ auto it = code_map_.upper_bound(addr);
+ if (it == code_map_.begin()) return nullptr;
+ --it;
+ Address end_address = it->first + it->second.size;
+ return addr < end_address ? it->second.entry : nullptr;
}
-
void CodeMap::MoveCode(Address from, Address to) {
if (from == to) return;
- CodeTree::Locator locator;
- if (!tree_.Find(from, &locator)) return;
- CodeEntryInfo entry = locator.value();
- tree_.Remove(from);
- AddCode(to, entry.entry, entry.size);
+ auto it = code_map_.find(from);
+ if (it == code_map_.end()) return;
+ CodeEntryInfo info = it->second;
+ code_map_.erase(it);
+ AddCode(to, info.entry, info.size);
}
-
-void CodeMap::CodeTreePrinter::Call(
- const Address& key, const CodeMap::CodeEntryInfo& value) {
- base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
-}
-
-
void CodeMap::Print() {
- CodeTreePrinter printer;
- tree_.ForEach(&printer);
+ for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
+ base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
+ it->second.size, it->second.entry->name());
+ }
}
-
-CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
- : function_and_resource_names_(heap),
- isolate_(heap->isolate()),
+CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
+ : resource_names_(isolate->heap()),
+ profiler_(nullptr),
current_profiles_semaphore_(1) {}
-
-static void DeleteCodeEntry(CodeEntry** entry_ptr) {
- delete *entry_ptr;
-}
-
-
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
delete *profile_ptr;
}
@@ -473,7 +478,6 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
CpuProfilesCollection::~CpuProfilesCollection() {
finished_profiles_.Iterate(DeleteCpuProfile);
current_profiles_.Iterate(DeleteCpuProfile);
- code_entries_.Iterate(DeleteCodeEntry);
}
@@ -492,7 +496,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
+ current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -550,43 +554,8 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(
- Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
- const char* resource_name, int line_number, int column_number,
- JITLineInfoTable* line_info, Address instruction_start) {
- CodeEntry* code_entry =
- new CodeEntry(tag, name, name_prefix, resource_name, line_number,
- column_number, line_info, instruction_start);
- code_entries_.Add(code_entry);
- return code_entry;
-}
-
-
-const char* const ProfileGenerator::kProgramEntryName =
- "(program)";
-const char* const ProfileGenerator::kIdleEntryName =
- "(idle)";
-const char* const ProfileGenerator::kGarbageCollectorEntryName =
- "(garbage collector)";
-const char* const ProfileGenerator::kUnresolvedFunctionName =
- "(unresolved function)";
-
-
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
- : profiles_(profiles),
- program_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
- idle_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
- gc_entry_(
- profiles->NewCodeEntry(Logger::BUILTIN_TAG,
- kGarbageCollectorEntryName)),
- unresolved_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG,
- kUnresolvedFunctionName)) {
-}
-
+ : profiles_(profiles) {}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
std::vector<CodeEntry*> entries;
@@ -602,30 +571,29 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
bool src_line_not_found = true;
- if (sample.pc != NULL) {
- if (sample.has_external_callback && sample.state == EXTERNAL &&
- sample.top_frame_type == StackFrame::EXIT) {
+ if (sample.pc != nullptr) {
+ if (sample.has_external_callback && sample.state == EXTERNAL) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
+ entries.push_back(code_map_.FindEntry(
+ reinterpret_cast<Address>(sample.external_callback_entry)));
} else {
- CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
+ CodeEntry* pc_entry =
+ code_map_.FindEntry(reinterpret_cast<Address>(sample.pc));
// If there is no pc_entry we're likely in native code.
// Find out, if top of stack was pointing inside a JS function
// meaning that we have encountered a frameless invocation.
- if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
- sample.top_frame_type == StackFrame::INTERPRETED ||
- sample.top_frame_type == StackFrame::OPTIMIZED)) {
- pc_entry = code_map_.FindEntry(sample.tos);
+ if (!pc_entry && !sample.has_external_callback) {
+ pc_entry = code_map_.FindEntry(reinterpret_cast<Address>(sample.tos));
}
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed SafeStackFrameIterator incorrectly thinks that
// ebp contains return address of the current function and skips caller's
// frame. Check for this case and just skip such samples.
if (pc_entry) {
- int pc_offset =
- static_cast<int>(sample.pc - pc_entry->instruction_start());
+ int pc_offset = static_cast<int>(reinterpret_cast<Address>(sample.pc) -
+ pc_entry->instruction_start());
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
@@ -641,22 +609,21 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// In the latter case we know the caller for sure but in the
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
- if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
- entries.push_back(unresolved_entry_);
+ if (!sample.has_external_callback) {
+ entries.push_back(CodeEntry::unresolved_entry());
}
}
}
}
- for (const Address *stack_pos = sample.stack,
- *stack_end = stack_pos + sample.frames_count;
- stack_pos != stack_end; ++stack_pos) {
- CodeEntry* entry = code_map_.FindEntry(*stack_pos);
+ for (unsigned i = 0; i < sample.frames_count; ++i) {
+ Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
+ CodeEntry* entry = code_map_.FindEntry(stack_pos);
if (entry) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
- static_cast<int>(*stack_pos - entry->instruction_start());
+ static_cast<int>(stack_pos - entry->instruction_start());
const std::vector<CodeEntry*>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
@@ -699,7 +666,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
switch (tag) {
case GC:
- return gc_entry_;
+ return CodeEntry::gc_entry();
case JS:
case COMPILER:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
@@ -707,9 +674,9 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
// one bucket.
case OTHER:
case EXTERNAL:
- return program_entry_;
+ return CodeEntry::program_entry();
case IDLE:
- return idle_entry_;
+ return CodeEntry::idle_entry();
default: return NULL;
}
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 194b490929..b785eaaf5f 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -6,15 +6,16 @@
#define V8_PROFILER_PROFILE_GENERATOR_H_
#include <map>
-#include "include/v8-profiler.h"
#include "src/allocation.h"
+#include "src/base/hashmap.h"
#include "src/compiler.h"
-#include "src/hashmap.h"
#include "src/profiler/strings-storage.h"
namespace v8 {
namespace internal {
+struct TickSample;
+
// Provides a mapping from the offsets within generated code to
// the source line.
class JITLineInfoTable : public Malloced {
@@ -38,7 +39,7 @@ class JITLineInfoTable : public Malloced {
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
- inline CodeEntry(Logger::LogEventsAndTags tag, const char* name,
+ inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
@@ -47,6 +48,13 @@ class CodeEntry {
Address instruction_start = NULL);
~CodeEntry();
+ // Container describing inlined frames at eager deopt points. Is eventually
+ // being translated into v8::CpuProfileDeoptFrame by the profiler.
+ struct DeoptInlinedFrame {
+ int position;
+ int script_id;
+ };
+
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
const char* name() const { return name_; }
@@ -64,31 +72,22 @@ class CodeEntry {
const char* bailout_reason() const { return bailout_reason_; }
void set_deopt_info(const char* deopt_reason, SourcePosition position,
- size_t pc_offset) {
- DCHECK(deopt_position_.IsUnknown());
+ int deopt_id) {
+ DCHECK(!has_deopt_info());
deopt_reason_ = deopt_reason;
deopt_position_ = position;
- pc_offset_ = pc_offset;
+ deopt_id_ = deopt_id;
}
CpuProfileDeoptInfo GetDeoptInfo();
- const char* deopt_reason() const { return deopt_reason_; }
- SourcePosition deopt_position() const { return deopt_position_; }
- bool has_deopt_info() const { return !deopt_position_.IsUnknown(); }
+ bool has_deopt_info() const { return deopt_id_ != kNoDeoptimizationId; }
void clear_deopt_info() {
deopt_reason_ = kNoDeoptReason;
deopt_position_ = SourcePosition::Unknown();
+ deopt_id_ = kNoDeoptimizationId;
}
void FillFunctionInfo(SharedFunctionInfo* shared);
- void set_inlined_function_infos(
- const std::vector<InlinedFunctionInfo>& infos) {
- inlined_function_infos_ = infos;
- }
- const std::vector<InlinedFunctionInfo> inlined_function_infos() {
- return inlined_function_infos_;
- }
-
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const {
return BuiltinIdField::decode(bit_field_);
@@ -102,17 +101,60 @@ class CodeEntry {
void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+ void AddDeoptInlinedFrames(int deopt_id, std::vector<DeoptInlinedFrame>&);
+ bool HasDeoptInlinedFramesFor(int deopt_id) const;
+
Address instruction_start() const { return instruction_start_; }
- Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
+ CodeEventListener::LogEventsAndTags tag() const {
+ return TagField::decode(bit_field_);
+ }
static const char* const kEmptyNamePrefix;
static const char* const kEmptyResourceName;
static const char* const kEmptyBailoutReason;
static const char* const kNoDeoptReason;
+ static const char* const kProgramEntryName;
+ static const char* const kIdleEntryName;
+ static const char* const kGarbageCollectorEntryName;
+ // Used to represent frames for which we have no reliable way to
+ // detect function.
+ static const char* const kUnresolvedFunctionName;
+
+ V8_INLINE static CodeEntry* program_entry() {
+ return kProgramEntry.Pointer();
+ }
+ V8_INLINE static CodeEntry* idle_entry() { return kIdleEntry.Pointer(); }
+ V8_INLINE static CodeEntry* gc_entry() { return kGCEntry.Pointer(); }
+ V8_INLINE static CodeEntry* unresolved_entry() {
+ return kUnresolvedEntry.Pointer();
+ }
+
private:
+ struct ProgramEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+ struct IdleEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+ struct GCEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+ struct UnresolvedEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+
+ static base::LazyDynamicInstance<CodeEntry, ProgramEntryCreateTrait>::type
+ kProgramEntry;
+ static base::LazyDynamicInstance<CodeEntry, IdleEntryCreateTrait>::type
+ kIdleEntry;
+ static base::LazyDynamicInstance<CodeEntry, GCEntryCreateTrait>::type
+ kGCEntry;
+ static base::LazyDynamicInstance<CodeEntry, UnresolvedEntryCreateTrait>::type
+ kUnresolvedEntry;
+
class TagField : public BitField<Logger::LogEventsAndTags, 0, 8> {};
- class BuiltinIdField : public BitField<Builtins::Name, 8, 8> {};
+ class BuiltinIdField : public BitField<Builtins::Name, 8, 24> {};
uint32_t bit_field_;
const char* name_prefix_;
@@ -125,13 +167,12 @@ class CodeEntry {
const char* bailout_reason_;
const char* deopt_reason_;
SourcePosition deopt_position_;
- size_t pc_offset_;
+ int deopt_id_;
JITLineInfoTable* line_info_;
Address instruction_start_;
// Should be an unordered_map, but it doesn't currently work on Win & MacOS.
std::map<int, std::vector<CodeEntry*>> inline_locations_;
-
- std::vector<InlinedFunctionInfo> inlined_function_infos_;
+ std::map<int, std::vector<DeoptInlinedFrame>> deopt_inlined_frames_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -179,10 +220,10 @@ class ProfileNode {
CodeEntry* entry_;
unsigned self_ticks_;
// Mapping from CodeEntry* to ProfileNode*
- HashMap children_;
+ base::HashMap children_;
List<ProfileNode*> children_list_;
unsigned id_;
- HashMap line_ticks_;
+ base::HashMap line_ticks_;
std::vector<CpuProfileDeoptInfo> deopt_infos_;
@@ -219,7 +260,7 @@ class ProfileTree {
Isolate* isolate_;
unsigned next_function_id_;
- HashMap function_ids_;
+ base::HashMap function_ids_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@@ -227,7 +268,7 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(Isolate* isolate, const char* title, bool record_samples);
+ CpuProfile(CpuProfiler* profiler, const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
@@ -245,6 +286,7 @@ class CpuProfile {
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
+ CpuProfiler* cpu_profiler() const { return profiler_; }
void UpdateTicksScale();
@@ -258,20 +300,18 @@ class CpuProfile {
List<ProfileNode*> samples_;
List<base::TimeTicks> timestamps_;
ProfileTree top_down_;
+ CpuProfiler* const profiler_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
-
class CodeMap {
public:
CodeMap() {}
- ~CodeMap();
+
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
CodeEntry* FindEntry(Address addr);
- int GetSharedId(Address addr);
-
void Print();
private:
@@ -282,61 +322,26 @@ class CodeMap {
unsigned size;
};
- struct CodeTreeConfig {
- typedef Address Key;
- typedef CodeEntryInfo Value;
- static const Key kNoKey;
- static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
- static int Compare(const Key& a, const Key& b) {
- return a < b ? -1 : (a > b ? 1 : 0);
- }
- };
- typedef SplayTree<CodeTreeConfig> CodeTree;
-
- class CodeTreePrinter {
- public:
- void Call(const Address& key, const CodeEntryInfo& value);
- };
-
void DeleteAllCoveredCode(Address start, Address end);
- CodeTree tree_;
+ std::map<Address, CodeEntryInfo> code_map_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};
-
class CpuProfilesCollection {
public:
- explicit CpuProfilesCollection(Heap* heap);
+ explicit CpuProfilesCollection(Isolate* isolate);
~CpuProfilesCollection();
+ void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
List<CpuProfile*>* profiles() { return &finished_profiles_; }
- const char* GetName(Name* name) {
- return function_and_resource_names_.GetName(name);
- }
- const char* GetName(int args_count) {
- return function_and_resource_names_.GetName(args_count);
- }
- const char* GetFunctionName(Name* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
+ const char* GetName(Name* name) { return resource_names_.GetName(name); }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
- CodeEntry* NewCodeEntry(
- Logger::LogEventsAndTags tag, const char* name,
- const char* name_prefix = CodeEntry::kEmptyNamePrefix,
- const char* resource_name = CodeEntry::kEmptyResourceName,
- int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
- int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
-
// Called from profile generator thread.
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
const std::vector<CodeEntry*>& path,
@@ -346,11 +351,9 @@ class CpuProfilesCollection {
static const int kMaxSimultaneousProfiles = 100;
private:
- StringsStorage function_and_resource_names_;
- List<CodeEntry*> code_entries_;
+ StringsStorage resource_names_;
List<CpuProfile*> finished_profiles_;
-
- Isolate* isolate_;
+ CpuProfiler* profiler_;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
@@ -368,22 +371,11 @@ class ProfileGenerator {
CodeMap* code_map() { return &code_map_; }
- static const char* const kProgramEntryName;
- static const char* const kIdleEntryName;
- static const char* const kGarbageCollectorEntryName;
- // Used to represent frames for which we have no reliable way to
- // detect function.
- static const char* const kUnresolvedFunctionName;
-
private:
CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
CodeMap code_map_;
- CodeEntry* program_entry_;
- CodeEntry* idle_entry_;
- CodeEntry* gc_entry_;
- CodeEntry* unresolved_entry_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
new file mode 100644
index 0000000000..7ce874e6c1
--- /dev/null
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -0,0 +1,335 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/profiler-listener.h"
+
+#include "src/deoptimizer.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/profile-generator-inl.h"
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+
+ProfilerListener::ProfilerListener(Isolate* isolate)
+ : function_and_resource_names_(isolate->heap()) {}
+
+ProfilerListener::~ProfilerListener() {
+ for (auto code_entry : code_entries_) {
+ delete code_entry;
+ }
+}
+
+void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name));
+ rec->size = 1;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, const char* name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, Name* name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code,
+ SharedFunctionInfo* shared,
+ Name* script_name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
+ GetName(InferScriptName(script_name, shared)),
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->entry->FillFunctionInfo(shared);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* abstract_code,
+ SharedFunctionInfo* shared,
+ Name* script_name, int line,
+ int column) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = abstract_code->address();
+ Script* script = Script::cast(shared->script());
+ JITLineInfoTable* line_table = NULL;
+ if (script) {
+ line_table = new JITLineInfoTable();
+ int offset = abstract_code->IsCode() ? Code::kHeaderSize
+ : BytecodeArray::kHeaderSize;
+ int start_position = shared->start_position();
+ int end_position = shared->end_position();
+ for (SourcePositionTableIterator it(abstract_code->source_position_table());
+ !it.done(); it.Advance()) {
+ int position = it.source_position();
+ // TODO(alph): in case of inlining the position may correspond to an
+ // inlined function source code. Do not collect positions that fall
+ // beyond the function source code. There's however a chance the
+ // inlined function has similar positions but in another script. So
+ // the proper fix is to store script_id in some form along with the
+ // inlined function positions.
+ if (position < start_position || position >= end_position) continue;
+ int line_number = script->GetLineNumber(position) + 1;
+ int pc_offset = it.code_offset() + offset;
+ line_table->SetPosition(pc_offset, line_number);
+ }
+ }
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
+ GetName(InferScriptName(script_name, shared)), line, column, line_table,
+ abstract_code->instruction_start());
+ RecordInliningInfo(rec->entry, abstract_code);
+ RecordDeoptInlinedFrames(rec->entry, abstract_code);
+ rec->entry->FillFunctionInfo(shared);
+ rec->size = abstract_code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, int args_count) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetName(args_count), "args_count: ", CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
+ CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+ rec->from = from->address();
+ rec->to = to;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
+ CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
+ rec->start = code->address();
+ rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeDeoptEvent(Code* code, Address pc,
+ int fp_to_sp_delta) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
+ CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
+ rec->start = code->address();
+ rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
+ rec->position = info.position;
+ rec->deopt_id = info.deopt_id;
+ rec->pc = reinterpret_cast<void*>(pc);
+ rec->fp_to_sp_delta = fp_to_sp_delta;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::GetterCallbackEvent(Name* name, Address entry_point) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry =
+ NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name), "get ");
+ rec->size = 1;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
+ String* source) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ CodeEventListener::REG_EXP_TAG, GetName(source), "RegExp: ",
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry =
+ NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name), "set ");
+ rec->size = 1;
+ DispatchCodeEvent(evt_rec);
+}
+
+Name* ProfilerListener::InferScriptName(Name* name, SharedFunctionInfo* info) {
+ if (name->IsString() && String::cast(name)->length()) return name;
+ if (!info->script()->IsScript()) return name;
+ Object* source_url = Script::cast(info->script())->source_url();
+ return source_url->IsName() ? Name::cast(source_url) : name;
+}
+
+void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
+ AbstractCode* abstract_code) {
+ if (!abstract_code->IsCode()) return;
+ Code* code = abstract_code->GetCode();
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
+ DeoptimizationInputData* deopt_input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int deopt_count = deopt_input_data->DeoptCount();
+ for (int i = 0; i < deopt_count; i++) {
+ int pc_offset = deopt_input_data->Pc(i)->value();
+ if (pc_offset == -1) continue;
+ int translation_index = deopt_input_data->TranslationIndex(i)->value();
+ TranslationIterator it(deopt_input_data->TranslationByteArray(),
+ translation_index);
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ int depth = 0;
+ std::vector<CodeEntry*> inline_stack;
+ while (it.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+ if (opcode != Translation::JS_FRAME &&
+ opcode != Translation::INTERPRETED_FRAME) {
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ continue;
+ }
+ it.Next(); // Skip ast_id
+ int shared_info_id = it.Next();
+ it.Next(); // Skip height
+ SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
+ deopt_input_data->LiteralArray()->get(shared_info_id));
+ if (!depth++) continue; // Skip the current function itself.
+ CodeEntry* inline_entry = new CodeEntry(
+ entry->tag(), GetFunctionName(shared_info->DebugName()),
+ CodeEntry::kEmptyNamePrefix, entry->resource_name(),
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ inline_entry->FillFunctionInfo(shared_info);
+ inline_stack.push_back(inline_entry);
+ }
+ if (!inline_stack.empty()) {
+ entry->AddInlineStack(pc_offset, inline_stack);
+ DCHECK(inline_stack.empty());
+ }
+ }
+}
+
+void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
+ AbstractCode* abstract_code) {
+ if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
+ Code* code = abstract_code->GetCode();
+ DeoptimizationInputData* deopt_input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
+ for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
+ RelocInfo* reloc_info = rit.rinfo();
+ DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
+ int deopt_id = static_cast<int>(reloc_info->data());
+ int translation_index =
+ deopt_input_data->TranslationIndex(deopt_id)->value();
+ TranslationIterator it(deopt_input_data->TranslationByteArray(),
+ translation_index);
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
+ while (it.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+ if (opcode != Translation::JS_FRAME &&
+ opcode != Translation::INTERPRETED_FRAME) {
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ continue;
+ }
+ BailoutId ast_id = BailoutId(it.Next());
+ int shared_info_id = it.Next();
+ it.Next(); // Skip height
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(
+ deopt_input_data->LiteralArray()->get(shared_info_id));
+ int source_position;
+ if (opcode == Translation::INTERPRETED_FRAME) {
+ source_position =
+ Deoptimizer::ComputeSourcePositionFromBytecodeArray(shared, ast_id);
+ } else {
+ DCHECK(opcode == Translation::JS_FRAME);
+ source_position =
+ Deoptimizer::ComputeSourcePositionFromBaselineCode(shared, ast_id);
+ }
+ int script_id = v8::UnboundScript::kNoScriptId;
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ script_id = script->id();
+ }
+ CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
+ inlined_frames.push_back(frame);
+ }
+ if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+ entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
+ DCHECK(inlined_frames.empty());
+ }
+ }
+}
+
+CodeEntry* ProfilerListener::NewCodeEntry(
+ CodeEventListener::LogEventsAndTags tag, const char* name,
+ const char* name_prefix, const char* resource_name, int line_number,
+ int column_number, JITLineInfoTable* line_info, Address instruction_start) {
+ CodeEntry* code_entry =
+ new CodeEntry(tag, name, name_prefix, resource_name, line_number,
+ column_number, line_info, instruction_start);
+ code_entries_.push_back(code_entry);
+ return code_entry;
+}
+
+void ProfilerListener::AddObserver(CodeEventObserver* observer) {
+ if (std::find(observers_.begin(), observers_.end(), observer) !=
+ observers_.end())
+ return;
+ observers_.push_back(observer);
+}
+
+void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
+ auto it = std::find(observers_.begin(), observers_.end(), observer);
+ if (it == observers_.end()) return;
+ observers_.erase(it);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
new file mode 100644
index 0000000000..7e24ceaa86
--- /dev/null
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_PROFILER_LISTENER_H_
+#define V8_PROFILER_PROFILER_LISTENER_H_
+
+#include <vector>
+
+#include "src/code-events.h"
+#include "src/profiler/profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeEventsContainer;
+
+class CodeEventObserver {
+ public:
+ virtual void CodeEventHandler(const CodeEventsContainer& evt_rec) = 0;
+ virtual ~CodeEventObserver() {}
+};
+
+class ProfilerListener : public CodeEventListener {
+ public:
+ explicit ProfilerListener(Isolate* isolate);
+ ~ProfilerListener() override;
+
+ void CallbackEvent(Name* name, Address entry_point) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, const char* comment) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, Name* name) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
+ Name* script_name) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
+ Name* script_name, int line, int column) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, int args_count) override;
+ void CodeMovingGCEvent() override {}
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override;
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) override;
+ void GetterCallbackEvent(Name* name, Address entry_point) override;
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void SetterCallbackEvent(Name* name, Address entry_point) override;
+ void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+
+ CodeEntry* NewCodeEntry(
+ CodeEventListener::LogEventsAndTags tag, const char* name,
+ const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+ const char* resource_name = CodeEntry::kEmptyResourceName,
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
+ JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
+
+ void AddObserver(CodeEventObserver* observer);
+ void RemoveObserver(CodeEventObserver* observer);
+ V8_INLINE bool HasObservers() { return !observers_.empty(); }
+
+ const char* GetName(Name* name) {
+ return function_and_resource_names_.GetName(name);
+ }
+ const char* GetName(int args_count) {
+ return function_and_resource_names_.GetName(args_count);
+ }
+ const char* GetFunctionName(Name* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+ const char* GetFunctionName(const char* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+
+ private:
+ void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+ void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
+ Name* InferScriptName(Name* name, SharedFunctionInfo* info);
+ V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
+ for (auto observer : observers_) {
+ observer->CodeEventHandler(evt_rec);
+ }
+ }
+
+ StringsStorage function_and_resource_names_;
+ std::vector<CodeEntry*> code_entries_;
+ std::vector<CodeEventObserver*> observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProfilerListener);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_PROFILER_LISTENER_H_
diff --git a/deps/v8/src/profiler/sampler.cc b/deps/v8/src/profiler/sampler.cc
deleted file mode 100644
index a34042453c..0000000000
--- a/deps/v8/src/profiler/sampler.cc
+++ /dev/null
@@ -1,898 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/profiler/sampler.h"
-
-#if V8_OS_POSIX && !V8_OS_CYGWIN
-
-#define USE_SIGNALS
-
-#include <errno.h>
-#include <pthread.h>
-#include <signal.h>
-#include <sys/time.h>
-
-#if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX
-#include <sys/syscall.h> // NOLINT
-#endif
-
-#if V8_OS_MACOSX
-#include <mach/mach.h>
-// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
-// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
- !V8_OS_OPENBSD && !V8_OS_NACL
-#include <ucontext.h>
-#endif
-
-#include <unistd.h>
-
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h> // NOLINT
-#endif
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-#include "src/base/win32-headers.h"
-
-#endif
-
-#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/frames-inl.h"
-#include "src/log.h"
-#include "src/profiler/cpu-profiler-inl.h"
-#include "src/simulator.h"
-#include "src/v8threads.h"
-#include "src/vm-state-inl.h"
-
-
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-// Not all versions of Android's C library provide ucontext_t.
-// Detect this and provide custom but compatible definitions. Note that these
-// follow the GLibc naming convention to access register values from
-// mcontext_t.
-//
-// See http://code.google.com/p/android/issues/detail?id=34784
-
-#if defined(__arm__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__aarch64__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint64_t uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-typedef struct {
- uint32_t regmask;
- uint32_t status;
- uint64_t pc;
- uint64_t gregs[32];
- uint64_t fpregs[32];
- uint32_t acx;
- uint32_t fpc_csr;
- uint32_t fpc_eir;
- uint32_t used_math;
- uint32_t dsp;
- uint64_t mdhi;
- uint64_t mdlo;
- uint32_t hi1;
- uint32_t lo1;
- uint32_t hi2;
- uint32_t lo2;
- uint32_t hi3;
- uint32_t lo3;
-} mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__i386__)
-// x86 version for Android.
-typedef struct {
- uint32_t gregs[19];
- void* fpregs;
- uint32_t oldmask;
- uint32_t cr2;
-} mcontext_t;
-
-typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
-
-#elif defined(__x86_64__)
-// x64 version for Android.
-typedef struct {
- uint64_t gregs[23];
- void* fpregs;
- uint64_t __reserved1[8];
-} mcontext_t;
-
-typedef struct ucontext {
- uint64_t uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
-#endif
-
-#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-class PlatformDataCommon : public Malloced {
- public:
- PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
- ThreadId profiled_thread_id() { return profiled_thread_id_; }
-
- protected:
- ~PlatformDataCommon() {}
-
- private:
- ThreadId profiled_thread_id_;
-};
-
-
-bool IsSamePage(byte* ptr1, byte* ptr2) {
- const uint32_t kPageSize = 4096;
- uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
- return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
- (reinterpret_cast<uintptr_t>(ptr2) & mask);
-}
-
-
-// Check if the code at specified address could potentially be a
-// frame setup code.
-bool IsNoFrameRegion(Address address) {
- struct Pattern {
- int bytes_count;
- byte bytes[8];
- int offsets[4];
- };
- byte* pc = reinterpret_cast<byte*>(address);
- static Pattern patterns[] = {
-#if V8_HOST_ARCH_IA32
- // push %ebp
- // mov %esp,%ebp
- {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
- // pop %ebp
- // ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
- // pop %ebp
- // ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
-#elif V8_HOST_ARCH_X64
- // pushq %rbp
- // movq %rsp,%rbp
- {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
- // popq %rbp
- // ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
- // popq %rbp
- // ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
-#endif
- {0, {}, {}}
- };
- for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
- for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
- int offset = *offset_ptr;
- if (!offset || IsSamePage(pc, pc - offset)) {
- MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
- if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
- return true;
- } else {
- // It is not safe to examine bytes on another page as it might not be
- // allocated thus causing a SEGFAULT.
- // Check the pattern part that's on the same page and
- // pessimistically assume it could be the entire pattern match.
- MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
- if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
- return true;
- }
- }
- }
- return false;
-}
-
-} // namespace
-
-#if defined(USE_SIGNALS)
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
- PlatformData() : vm_tid_(pthread_self()) {}
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support. On Cygwin we use the same sampler implementation as
-// on Win32.
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData()
- : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-#endif
-
-
-#if defined(USE_SIMULATOR)
-class SimulatorHelper {
- public:
- inline bool Init(Isolate* isolate) {
- simulator_ = isolate->thread_local_top()->simulator_;
- // Check if there is active simulator.
- return simulator_ != NULL;
- }
-
- inline void FillRegisters(v8::RegisterState* state) {
-#if V8_TARGET_ARCH_ARM
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
- state->fp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::r11));
-#elif V8_TARGET_ARCH_ARM64
- if (simulator_->sp() == 0 || simulator_->fp() == 0) {
- // It's possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to a new value.
- // Bailout if sp/fp doesn't contain the new value.
- //
- // FIXME: The above doesn't really solve the issue.
- // If a 64-bit target is executed on a 32-bit host even the final
- // write is non-atomic, so it might obtain a half of the result.
- // Moreover as long as the register set code uses memcpy (as of now),
- // it is not guaranteed to be atomic even when both host and target
- // are of same bitness.
- return;
- }
- state->pc = reinterpret_cast<Address>(simulator_->pc());
- state->sp = reinterpret_cast<Address>(simulator_->sp());
- state->fp = reinterpret_cast<Address>(simulator_->fp());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
- state->fp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::fp));
-#elif V8_TARGET_ARCH_PPC
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
- state->fp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#elif V8_TARGET_ARCH_S390
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
- state->fp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#endif
- }
-
- private:
- Simulator* simulator_;
-};
-#endif // USE_SIMULATOR
-
-
-#if defined(USE_SIGNALS)
-
-class SignalHandler : public AllStatic {
- public:
- static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
- static void TearDown() { delete mutex_; mutex_ = NULL; }
-
- static void IncreaseSamplerCount() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (++client_count_ == 1) Install();
- }
-
- static void DecreaseSamplerCount() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (--client_count_ == 0) Restore();
- }
-
- static bool Installed() {
- return signal_handler_installed_;
- }
-
- private:
- static void Install() {
-#if !V8_OS_NACL
- struct sigaction sa;
- sa.sa_sigaction = &HandleProfilerSignal;
- sigemptyset(&sa.sa_mask);
-#if V8_OS_QNX
- sa.sa_flags = SA_SIGINFO;
-#else
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
-#endif
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-#endif
- }
-
- static void Restore() {
-#if !V8_OS_NACL
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
-#endif
- }
-
-#if !V8_OS_NACL
- static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
-#endif
- // Protects the process wide state below.
- static base::Mutex* mutex_;
- static int client_count_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-};
-
-
-base::Mutex* SignalHandler::mutex_ = NULL;
-int SignalHandler::client_count_ = 0;
-struct sigaction SignalHandler::old_signal_handler_;
-bool SignalHandler::signal_handler_installed_ = false;
-
-
-// As Native Client does not support signal handling, profiling is disabled.
-#if !V8_OS_NACL
-void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
- void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UnsafeCurrent();
- if (isolate == NULL || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL) return;
-
- v8::RegisterState state;
-
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(isolate)) return;
- helper.FillRegisters(&state);
- // It possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to the new value.
- // Bailout if sp/fp doesn't contain the new value.
- if (state.sp == 0 || state.fp == 0) return;
-#else
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#endif
-#if V8_OS_LINUX
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
- // Old GLibc ARM versions used a gregs[] array to access the register
- // values from mcontext_t.
- state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
- state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
- state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
-#elif V8_HOST_ARCH_ARM64
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.sp);
- // FP is an alias for x29.
- state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
-#elif V8_HOST_ARCH_MIPS
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_MIPS64
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_PPC
- state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
- state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
- state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
-#elif V8_HOST_ARCH_S390
-#if V8_TARGET_ARCH_32_BIT
- // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
- // mode. This bit needs to be masked out to resolve actual address.
- state.pc =
- reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
-#else
- state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr);
-#endif // V8_TARGET_ARCH_32_BIT
- state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]);
- state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_MACOSX
-#if V8_HOST_ARCH_X64
-#if __DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
- state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
- state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
-#else // !__DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
- state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
- state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
-#if __DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
- state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
- state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
-#else // !__DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
- state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
- state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
-#endif // __DARWIN_UNIX03
-#endif // V8_HOST_ARCH_IA32
-#elif V8_OS_FREEBSD
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
- state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
- state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
- state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
- state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
- state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_NETBSD
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
- state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
- state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
- state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
- state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_OPENBSD
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
- state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
- state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
- state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
- state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_SOLARIS
- state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#elif V8_OS_QNX
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
- state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
- state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
-#elif V8_HOST_ARCH_ARM
- state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
- state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
- state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_AIX
- state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar);
- state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]);
- state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]);
-#endif // V8_OS_AIX
-#endif // USE_SIMULATOR
- sampler->SampleStack(state);
-}
-#endif // V8_OS_NACL
-
-#endif
-
-
-class SamplerThread : public base::Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
- static void TearDown() { delete mutex_; mutex_ = NULL; }
-
- static void AddActiveSampler(Sampler* sampler) {
- bool need_to_start = false;
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SamplerThread(sampler->interval());
- need_to_start = true;
- }
-
- DCHECK(sampler->IsActive());
- DCHECK(!instance_->active_samplers_.Contains(sampler));
- DCHECK(instance_->interval_ == sampler->interval());
- instance_->active_samplers_.Add(sampler);
-
- if (need_to_start) instance_->StartSynchronously();
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- SamplerThread* instance_to_remove = NULL;
- {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
-
- DCHECK(sampler->IsActive());
- bool removed = instance_->active_samplers_.RemoveElement(sampler);
- DCHECK(removed);
- USE(removed);
-
- // We cannot delete the instance immediately as we need to Join() the
- // thread but we are holding mutex_ and the thread may try to acquire it.
- if (instance_->active_samplers_.is_empty()) {
- instance_to_remove = instance_;
- instance_ = NULL;
- }
- }
-
- if (!instance_to_remove) return;
- instance_to_remove->Join();
- delete instance_to_remove;
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- while (true) {
- {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (active_samplers_.is_empty()) break;
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- for (int i = 0; i < active_samplers_.length(); ++i) {
- Sampler* sampler = active_samplers_.at(i);
- if (!sampler->IsProfiling()) continue;
- sampler->DoSample();
- }
- }
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
- }
- }
-
- private:
- // Protects the process wide state below.
- static base::Mutex* mutex_;
- static SamplerThread* instance_;
-
- const int interval_;
- List<Sampler*> active_samplers_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-base::Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-//
-// StackTracer implementation
-//
-DISABLE_ASAN void TickSample::Init(Isolate* isolate,
- const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame,
- bool update_stats) {
- timestamp = base::TimeTicks::HighResolutionNow();
- pc = reinterpret_cast<Address>(regs.pc);
- state = isolate->current_vm_state();
- this->update_stats = update_stats;
-
- // Avoid collecting traces while doing GC.
- if (state == GC) return;
-
- Address js_entry_sp = isolate->js_entry_sp();
- if (js_entry_sp == 0) return; // Not executing JS now.
-
- if (pc && IsNoFrameRegion(pc)) {
- // Can't collect stack. Mark the sample as spoiled.
- timestamp = base::TimeTicks();
- pc = 0;
- return;
- }
-
- ExternalCallbackScope* scope = isolate->external_callback_scope();
- Address handler = Isolate::handler(isolate->thread_local_top());
- // If there is a handler on top of the external callback scope then
- // we have already entrered JavaScript again and the external callback
- // is not the top function.
- if (scope && scope->scope_address() < handler) {
- external_callback_entry = *scope->callback_entrypoint_address();
- has_external_callback = true;
- } else {
- // sp register may point at an arbitrary place in memory, make
- // sure MSAN doesn't complain about it.
- MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
- has_external_callback = false;
- }
-
- SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
- reinterpret_cast<Address>(regs.sp), js_entry_sp);
- top_frame_type = it.top_frame_type();
-
- SampleInfo info;
- GetStackSample(isolate, regs, record_c_entry_frame,
- reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
- frames_count = static_cast<unsigned>(info.frames_count);
- if (!frames_count) {
- // It is executing JS but failed to collect a stack trace.
- // Mark the sample as spoiled.
- timestamp = base::TimeTicks();
- pc = 0;
- }
-}
-
-
-void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame,
- void** frames, size_t frames_limit,
- v8::SampleInfo* sample_info) {
- sample_info->frames_count = 0;
- sample_info->vm_state = isolate->current_vm_state();
- if (sample_info->vm_state == GC) return;
-
- Address js_entry_sp = isolate->js_entry_sp();
- if (js_entry_sp == 0) return; // Not executing JS now.
-
- SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
- reinterpret_cast<Address>(regs.sp), js_entry_sp);
- size_t i = 0;
- if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
- it.top_frame_type() == StackFrame::EXIT) {
- frames[i++] = isolate->c_function();
- }
- while (!it.done() && i < frames_limit) {
- if (it.frame()->is_interpreted()) {
- // For interpreted frames use the bytecode array pointer as the pc.
- InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
- // Since the sampler can interrupt execution at any point the
- // bytecode_array might be garbage, so don't dereference it.
- Address bytecode_array =
- reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
- frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
- frame->GetBytecodeOffset();
- } else {
- frames[i++] = it.frame()->pc();
- }
- it.Advance();
- }
- sample_info->frames_count = i;
-}
-
-
-void Sampler::SetUp() {
-#if defined(USE_SIGNALS)
- SignalHandler::SetUp();
-#endif
- SamplerThread::SetUp();
-}
-
-
-void Sampler::TearDown() {
- SamplerThread::TearDown();
-#if defined(USE_SIGNALS)
- SignalHandler::TearDown();
-#endif
-}
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- has_processing_thread_(false),
- active_(false),
- is_counting_samples_(false),
- js_sample_count_(0),
- external_sample_count_(0) {
- data_ = new PlatformData;
-}
-
-Sampler::~Sampler() {
- DCHECK(!IsActive());
- delete data_;
-}
-
-void Sampler::Start() {
- DCHECK(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- DCHECK(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-void Sampler::IncreaseProfilingDepth() {
- base::NoBarrier_AtomicIncrement(&profiling_, 1);
-#if defined(USE_SIGNALS)
- SignalHandler::IncreaseSamplerCount();
-#endif
-}
-
-
-void Sampler::DecreaseProfilingDepth() {
-#if defined(USE_SIGNALS)
- SignalHandler::DecreaseSamplerCount();
-#endif
- base::NoBarrier_AtomicIncrement(&profiling_, -1);
-}
-
-
-void Sampler::SampleStack(const v8::RegisterState& state) {
- TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
- TickSample sample_obj;
- if (sample == NULL) sample = &sample_obj;
- sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
- if (is_counting_samples_ && !sample->timestamp.IsNull()) {
- if (sample->state == JS) ++js_sample_count_;
- if (sample->state == EXTERNAL) ++external_sample_count_;
- }
- Tick(sample);
- if (sample != &sample_obj) {
- isolate_->cpu_profiler()->FinishTickSample();
- }
-}
-
-
-#if defined(USE_SIGNALS)
-
-void Sampler::DoSample() {
- if (!SignalHandler::Installed()) return;
- pthread_kill(platform_data()->vm_tid(), SIGPROF);
-}
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-void Sampler::DoSample() {
- HANDLE profiled_thread = platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(isolate())) return;
-#endif
-
- const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
- v8::RegisterState state;
-#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
-#else
-#if V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(context.Rip);
- state.sp = reinterpret_cast<Address>(context.Rsp);
- state.fp = reinterpret_cast<Address>(context.Rbp);
-#else
- state.pc = reinterpret_cast<Address>(context.Eip);
- state.sp = reinterpret_cast<Address>(context.Esp);
- state.fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-#endif // USE_SIMULATOR
- SampleStack(state);
- }
- ResumeThread(profiled_thread);
-}
-
-#endif // USE_SIGNALS
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/profiler/sampler.h b/deps/v8/src/profiler/sampler.h
deleted file mode 100644
index dcd1255d75..0000000000
--- a/deps/v8/src/profiler/sampler.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PROFILER_SAMPLER_H_
-#define V8_PROFILER_SAMPLER_H_
-
-#include "include/v8.h"
-
-#include "src/base/atomicops.h"
-#include "src/base/platform/time.h"
-#include "src/frames.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-struct TickSample {
- // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
- // include the runtime function we're calling. Externally exposed tick
- // samples don't care.
- enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
-
- TickSample()
- : state(OTHER),
- pc(NULL),
- external_callback_entry(NULL),
- frames_count(0),
- has_external_callback(false),
- update_stats(true),
- top_frame_type(StackFrame::NONE) {}
- void Init(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame, bool update_stats);
- static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame,
- void** frames, size_t frames_limit,
- v8::SampleInfo* sample_info);
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback_entry;
- };
- static const unsigned kMaxFramesCountLog2 = 8;
- static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
- Address stack[kMaxFramesCount]; // Call stack.
- base::TimeTicks timestamp;
- unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
- bool has_external_callback : 1;
- bool update_stats : 1; // Whether the sample should update aggregated stats.
- StackFrame::Type top_frame_type : 5;
-};
-
-class Sampler {
- public:
- // Initializes the Sampler support. Called once at VM startup.
- static void SetUp();
- static void TearDown();
-
- // Initialize sampler.
- Sampler(Isolate* isolate, int interval);
- virtual ~Sampler();
-
- Isolate* isolate() const { return isolate_; }
- int interval() const { return interval_; }
-
- // Performs stack sampling.
- void SampleStack(const v8::RegisterState& regs);
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Whether the sampling thread should use this Sampler for CPU profiling?
- bool IsProfiling() const {
- return base::NoBarrier_Load(&profiling_) > 0 &&
- !base::NoBarrier_Load(&has_processing_thread_);
- }
- void IncreaseProfilingDepth();
- void DecreaseProfilingDepth();
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return base::NoBarrier_Load(&active_); }
-
- void DoSample();
- // If true next sample must be initiated on the profiler event processor
- // thread right after latest sample is processed.
- void SetHasProcessingThread(bool value) {
- base::NoBarrier_Store(&has_processing_thread_, value);
- }
-
- // Used in tests to make sure that stack sampling is performed.
- unsigned js_sample_count() const { return js_sample_count_; }
- unsigned external_sample_count() const { return external_sample_count_; }
- void StartCountingSamples() {
- js_sample_count_ = 0;
- external_sample_count_ = 0;
- is_counting_samples_ = true;
- }
-
- class PlatformData;
- PlatformData* platform_data() const { return data_; }
-
- protected:
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- private:
- void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
-
- Isolate* isolate_;
- const int interval_;
- base::Atomic32 profiling_;
- base::Atomic32 has_processing_thread_;
- base::Atomic32 active_;
- PlatformData* data_; // Platform specific data.
- // Counts stack samples taken in various VM states.
- bool is_counting_samples_;
- unsigned js_sample_count_;
- unsigned external_sample_count_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PROFILER_SAMPLER_H_
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index a32cae3ef9..b4361ee849 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -7,6 +7,7 @@
#include <stdint.h>
#include <memory>
#include "src/api.h"
+#include "src/base/ieee754.h"
#include "src/base/utils/random-number-generator.h"
#include "src/frames-inl.h"
#include "src/heap/heap.h"
@@ -27,7 +28,7 @@ intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
return static_cast<intptr_t>(rate);
}
double u = random_->NextDouble();
- double next = (-std::log(u)) * rate;
+ double next = (-base::ieee754::log(u)) * rate;
return next < kPointerSize
? kPointerSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
@@ -47,8 +48,9 @@ v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
return {size, static_cast<unsigned int>(count * scale + 0.5)};
}
-SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
- uint64_t rate, int stack_depth)
+SamplingHeapProfiler::SamplingHeapProfiler(
+ Heap* heap, StringsStorage* names, uint64_t rate, int stack_depth,
+ v8::HeapProfiler::SamplingFlags flags)
: isolate_(heap->isolate()),
heap_(heap),
new_space_observer_(new SamplingAllocationObserver(
@@ -58,14 +60,15 @@ SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
heap_, static_cast<intptr_t>(rate), rate, this,
heap->isolate()->random_number_generator())),
names_(names),
- profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+ profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0),
samples_(),
stack_depth_(stack_depth),
- rate_(rate) {
+ rate_(rate),
+ flags_(flags) {
CHECK_GT(rate_, 0);
heap->new_space()->AddAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
if (space != heap->new_space()) {
space->AddAllocationObserver(other_spaces_observer_.get());
}
@@ -76,7 +79,7 @@ SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
if (space != heap_->new_space()) {
space->RemoveAllocationObserver(other_spaces_observer_.get());
}
@@ -109,6 +112,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
Sample* sample = new Sample(size, node, loc, this);
samples_.insert(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+ sample->global.MarkIndependent();
}
void SamplingHeapProfiler::OnWeakCallback(
@@ -117,22 +121,34 @@ void SamplingHeapProfiler::OnWeakCallback(
AllocationNode* node = sample->owner;
DCHECK(node->allocations_[sample->size] > 0);
node->allocations_[sample->size]--;
+ if (node->allocations_[sample->size] == 0) {
+ node->allocations_.erase(sample->size);
+ while (node->allocations_.empty() && node->children_.empty() &&
+ node->parent_ && !node->parent_->pinned_) {
+ AllocationNode* parent = node->parent_;
+ AllocationNode::FunctionId id = AllocationNode::function_id(
+ node->script_id_, node->script_position_, node->name_);
+ parent->children_.erase(id);
+ delete node;
+ node = parent;
+ }
+ }
sample->profiler->samples_.erase(sample);
delete sample;
}
-SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
- AllocationNode* parent, const char* name, int script_id,
- int start_position) {
- for (AllocationNode* child : parent->children_) {
- if (child->script_id_ == script_id &&
- child->script_position_ == start_position &&
- strcmp(child->name_, name) == 0) {
- return child;
- }
+SamplingHeapProfiler::AllocationNode*
+SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
+ int script_id,
+ int start_position) {
+ FunctionId id = function_id(script_id, start_position, name);
+ auto it = children_.find(id);
+ if (it != children_.end()) {
+ DCHECK(strcmp(it->second->name_, name) == 0);
+ return it->second;
}
- AllocationNode* child = new AllocationNode(name, script_id, start_position);
- parent->children_.push_back(child);
+ auto child = new AllocationNode(this, name, script_id, start_position);
+ children_.insert(std::make_pair(id, child));
return child;
}
@@ -140,7 +156,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
AllocationNode* node = &profile_root_;
std::vector<SharedFunctionInfo*> stack;
- StackTraceFrameIterator it(isolate_);
+ JavaScriptFrameIterator it(isolate_);
int frames_captured = 0;
while (!it.done() && frames_captured < stack_depth_) {
JavaScriptFrame* frame = it.frame();
@@ -173,7 +189,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
name = "(JS)";
break;
}
- return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+ return node->FindOrAddChildNode(name, v8::UnboundScript::kNoScriptId, 0);
}
// We need to process the stack in reverse order as the top of the stack is
@@ -186,14 +202,17 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
Script* script = Script::cast(shared->script());
script_id = script->id();
}
- node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+ node = node->FindOrAddChildNode(name, script_id, shared->start_position());
}
return node;
}
v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
- const std::map<int, Script*>& scripts) {
+ const std::map<int, Handle<Script>>& scripts) {
+ // By pinning the node we make sure its children won't get disposed if
+ // a GC kicks in during the tree retrieval.
+ node->pinned_ = true;
Local<v8::String> script_name =
ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
int line = v8::AllocationProfile::kNoLineNumberInfo;
@@ -203,23 +222,22 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
scripts.find(node->script_id_) != scripts.end()) {
// Cannot use std::map<T>::at because it is not available on android.
- auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
- Script* script = non_const_scripts[node->script_id_];
- if (script) {
+ auto non_const_scripts =
+ const_cast<std::map<int, Handle<Script>>&>(scripts);
+ Handle<Script> script = non_const_scripts[node->script_id_];
+ if (!script.is_null()) {
if (script->name()->IsName()) {
Name* name = Name::cast(script->name());
script_name = ToApiHandle<v8::String>(
isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
}
- Handle<Script> script_handle(script);
- line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
- column =
- 1 + Script::GetColumnNumber(script_handle, node->script_position_);
- }
- for (auto alloc : node->allocations_) {
- allocations.push_back(ScaleSample(alloc.first, alloc.second));
+ line = 1 + Script::GetLineNumber(script, node->script_position_);
+ column = 1 + Script::GetColumnNumber(script, node->script_position_);
}
}
+ for (auto alloc : node->allocations_) {
+ allocations.push_back(ScaleSample(alloc.first, alloc.second));
+ }
profile->nodes().push_back(v8::AllocationProfile::Node(
{ToApiHandle<v8::String>(
@@ -227,35 +245,34 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
script_name, node->script_id_, node->script_position_, line, column,
std::vector<v8::AllocationProfile::Node*>(), allocations}));
v8::AllocationProfile::Node* current = &profile->nodes().back();
- size_t child_len = node->children_.size();
- // The children vector may have nodes appended to it during translation
+ // The children map may have nodes inserted into it during translation
// because the translation may allocate strings on the JS heap that have
- // the potential to be sampled. We cache the length of the vector before
- // iteration so that nodes appended to the vector during iteration are
- // not processed.
- for (size_t i = 0; i < child_len; i++) {
+ // the potential to be sampled. That's ok since map iterators are not
+ // invalidated upon std::map insertion.
+ for (auto it : node->children_) {
current->children.push_back(
- TranslateAllocationNode(profile, node->children_[i], scripts));
+ TranslateAllocationNode(profile, it.second, scripts));
}
+ node->pinned_ = false;
return current;
}
v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+ if (flags_ & v8::HeapProfiler::kSamplingForceGC) {
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ "SamplingHeapProfiler");
+ }
// To resolve positions to line/column numbers, we will need to look up
// scripts. Build a map to allow fast mapping from script id to script.
- std::map<int, Script*> scripts;
+ std::map<int, Handle<Script>> scripts;
{
Script::Iterator iterator(isolate_);
- Script* script;
- while ((script = iterator.Next())) {
- scripts[script->id()] = script;
+ while (Script* script = iterator.Next()) {
+ scripts[script->id()] = handle(script);
}
}
-
auto profile = new v8::internal::AllocationProfile();
-
TranslateAllocationNode(profile, &profile_root_, scripts);
-
return profile;
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 0b538b070c..07840244ce 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -7,6 +7,7 @@
#include <deque>
#include <map>
+#include <memory>
#include <set>
#include "include/v8-profiler.h"
#include "src/heap/heap.h"
@@ -41,7 +42,7 @@ class AllocationProfile : public v8::AllocationProfile {
class SamplingHeapProfiler {
public:
SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
- int stack_depth);
+ int stack_depth, v8::HeapProfiler::SamplingFlags flags);
~SamplingHeapProfiler();
v8::AllocationProfile* GetAllocationProfile();
@@ -71,23 +72,47 @@ class SamplingHeapProfiler {
class AllocationNode {
public:
- AllocationNode(const char* const name, int script_id,
- const int start_position)
- : script_id_(script_id),
+ AllocationNode(AllocationNode* parent, const char* name, int script_id,
+ int start_position)
+ : parent_(parent),
+ script_id_(script_id),
script_position_(start_position),
- name_(name) {}
+ name_(name),
+ pinned_(false) {}
~AllocationNode() {
for (auto child : children_) {
- delete child;
+ delete child.second;
}
}
private:
+ typedef uint64_t FunctionId;
+ static FunctionId function_id(int script_id, int start_position,
+ const char* name) {
+ // script_id == kNoScriptId case:
+ // Use function name pointer as an id. Names derived from VM state
+ // must not collide with the builtin names. The least significant bit
+ // of the id is set to 1.
+ if (script_id == v8::UnboundScript::kNoScriptId) {
+ return reinterpret_cast<intptr_t>(name) | 1;
+ }
+ // script_id != kNoScriptId case:
+ // Use script_id, start_position pair to uniquelly identify the node.
+ // The least significant bit of the id is set to 0.
+ DCHECK(static_cast<unsigned>(start_position) < (1u << 31));
+ return (static_cast<uint64_t>(script_id) << 32) + (start_position << 1);
+ }
+ AllocationNode* FindOrAddChildNode(const char* name, int script_id,
+ int start_position);
+ // TODO(alph): make use of unordered_map's here. Pay attention to
+ // iterator invalidation during TranslateAllocationNode.
std::map<size_t, unsigned int> allocations_;
- std::vector<AllocationNode*> children_;
+ std::map<FunctionId, AllocationNode*> children_;
+ AllocationNode* const parent_;
const int script_id_;
const int script_position_;
const char* const name_;
+ bool pinned_;
friend class SamplingHeapProfiler;
@@ -110,24 +135,25 @@ class SamplingHeapProfiler {
// loaded scripts keyed by their script id.
v8::AllocationProfile::Node* TranslateAllocationNode(
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
- const std::map<int, Script*>& scripts);
+ const std::map<int, Handle<Script>>& scripts);
v8::AllocationProfile::Allocation ScaleSample(size_t size,
unsigned int count);
AllocationNode* AddStack();
- AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
- int script_id, int start_position);
Isolate* const isolate_;
Heap* const heap_;
- base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
- base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
+ std::unique_ptr<SamplingAllocationObserver> new_space_observer_;
+ std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
AllocationNode profile_root_;
std::set<Sample*> samples_;
const int stack_depth_;
const uint64_t rate_;
+ v8::HeapProfiler::SamplingFlags flags_;
friend class SamplingAllocationObserver;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplingHeapProfiler);
};
class SamplingAllocationObserver : public AllocationObserver {
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 9f095b8866..edb01b5fd0 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -4,7 +4,8 @@
#include "src/profiler/strings-storage.h"
-#include "src/base/smart-pointers.h"
+#include <memory>
+
#include "src/objects-inl.h"
namespace v8 {
@@ -22,7 +23,8 @@ StringsStorage::StringsStorage(Heap* heap)
StringsStorage::~StringsStorage() {
- for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+ p = names_.Next(p)) {
DeleteArray(reinterpret_cast<const char*>(p->value));
}
}
@@ -30,7 +32,7 @@ StringsStorage::~StringsStorage() {
const char* StringsStorage::GetCopy(const char* src) {
int len = static_cast<int>(strlen(src));
- HashMap::Entry* entry = GetEntry(src, len);
+ base::HashMap::Entry* entry = GetEntry(src, len);
if (entry->value == NULL) {
Vector<char> dst = Vector<char>::New(len + 1);
StrNCpy(dst, src, len);
@@ -52,7 +54,7 @@ const char* StringsStorage::GetFormatted(const char* format, ...) {
const char* StringsStorage::AddOrDisposeString(char* str, int len) {
- HashMap::Entry* entry = GetEntry(str, len);
+ base::HashMap::Entry* entry = GetEntry(str, len);
if (entry->value == NULL) {
// New entry added.
entry->key = str;
@@ -80,9 +82,9 @@ const char* StringsStorage::GetName(Name* name) {
String* str = String::cast(name);
int length = Min(kMaxNameSize, str->length());
int actual_length = 0;
- base::SmartArrayPointer<char> data = str->ToCString(
+ std::unique_ptr<char[]> data = str->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
- return AddOrDisposeString(data.Detach(), actual_length);
+ return AddOrDisposeString(data.release(), actual_length);
} else if (name->IsSymbol()) {
return "<symbol>";
}
@@ -107,15 +109,15 @@ const char* StringsStorage::GetFunctionName(const char* name) {
size_t StringsStorage::GetUsedMemorySize() const {
size_t size = sizeof(*this);
- size += sizeof(HashMap::Entry) * names_.capacity();
- for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ size += sizeof(base::HashMap::Entry) * names_.capacity();
+ for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+ p = names_.Next(p)) {
size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
}
return size;
}
-
-HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
}
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 7164caef63..f98aa5e038 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -5,8 +5,11 @@
#ifndef V8_PROFILER_STRINGS_STORAGE_H_
#define V8_PROFILER_STRINGS_STORAGE_H_
+#include <stdarg.h>
+
#include "src/allocation.h"
-#include "src/hashmap.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/hashmap.h"
namespace v8 {
namespace internal {
@@ -19,7 +22,8 @@ class StringsStorage {
~StringsStorage();
const char* GetCopy(const char* src);
- const char* GetFormatted(const char* format, ...);
+ PRINTF_FORMAT(2, 3) const char* GetFormatted(const char* format, ...);
+ PRINTF_FORMAT(2, 0)
const char* GetVFormatted(const char* format, va_list args);
const char* GetName(Name* name);
const char* GetName(int index);
@@ -32,10 +36,10 @@ class StringsStorage {
static bool StringsMatch(void* key1, void* key2);
const char* AddOrDisposeString(char* str, int len);
- HashMap::Entry* GetEntry(const char* str, int len);
+ base::HashMap::Entry* GetEntry(const char* str, int len);
uint32_t hash_seed_;
- HashMap names_;
+ base::HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
new file mode 100644
index 0000000000..ecb2bf46f7
--- /dev/null
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -0,0 +1,272 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/tick-sample.h"
+
+#include "include/v8-profiler.h"
+#include "src/frames-inl.h"
+#include "src/msan.h"
+#include "src/simulator.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace {
+
+bool IsSamePage(i::byte* ptr1, i::byte* ptr2) {
+ const uint32_t kPageSize = 4096;
+ uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
+ return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
+ (reinterpret_cast<uintptr_t>(ptr2) & mask);
+}
+
+// Check if the code at specified address could potentially be a
+// frame setup code.
+bool IsNoFrameRegion(i::Address address) {
+ struct Pattern {
+ int bytes_count;
+ i::byte bytes[8];
+ int offsets[4];
+ };
+ i::byte* pc = reinterpret_cast<i::byte*>(address);
+ static Pattern patterns[] = {
+#if V8_HOST_ARCH_IA32
+ // push %ebp
+ // mov %esp,%ebp
+ {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+ // pop %ebp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // pop %ebp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#elif V8_HOST_ARCH_X64
+ // pushq %rbp
+ // movq %rsp,%rbp
+ {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+ // popq %rbp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // popq %rbp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#endif
+ {0, {}, {}}
+ };
+ for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
+ for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
+ int offset = *offset_ptr;
+ if (!offset || IsSamePage(pc, pc - offset)) {
+ MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
+ if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
+ return true;
+ } else {
+ // It is not safe to examine bytes on another page as it might not be
+ // allocated thus causing a SEGFAULT.
+ // Check the pattern part that's on the same page and
+ // pessimistically assume it could be the entire pattern match.
+ MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
+ if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+namespace internal {
+namespace {
+
+#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+ // Returns true if register values were successfully retrieved
+ // from the simulator, otherwise returns false.
+ static bool FillRegisters(Isolate* isolate, v8::RegisterState* state);
+};
+
+bool SimulatorHelper::FillRegisters(Isolate* isolate,
+ v8::RegisterState* state) {
+ Simulator* simulator = isolate->thread_local_top()->simulator_;
+ // Check if there is active simulator.
+ if (simulator == NULL) return false;
+#if V8_TARGET_ARCH_ARM
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp =
+ reinterpret_cast<Address>(simulator->get_register(Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+ state->pc = reinterpret_cast<Address>(simulator->pc());
+ state->sp = reinterpret_cast<Address>(simulator->sp());
+ state->fp = reinterpret_cast<Address>(simulator->fp());
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_PPC
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_S390
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#endif
+ if (state->sp == 0 || state->fp == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ //
+ // FIXME: The above doesn't really solve the issue.
+ // If a 64-bit target is executed on a 32-bit host even the final
+ // write is non-atomic, so it might obtain a half of the result.
+ // Moreover as long as the register set code uses memcpy (as of now),
+ // it is not guaranteed to be atomic even when both host and target
+ // are of same bitness.
+ return false;
+ }
+ return true;
+}
+#endif // USE_SIMULATOR
+
+} // namespace
+} // namespace internal
+
+//
+// StackTracer implementation
+//
+DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
+ const RegisterState& reg_state,
+ RecordCEntryFrame record_c_entry_frame,
+ bool update_stats,
+ bool use_simulator_reg_state) {
+ this->update_stats = update_stats;
+ SampleInfo info;
+ RegisterState regs = reg_state;
+ if (!GetStackSample(v8_isolate, &regs, record_c_entry_frame, stack,
+ kMaxFramesCount, &info, use_simulator_reg_state)) {
+ // It is executing JS but failed to collect a stack trace.
+ // Mark the sample as spoiled.
+ pc = nullptr;
+ return;
+ }
+
+ state = info.vm_state;
+ pc = regs.pc;
+ frames_count = static_cast<unsigned>(info.frames_count);
+ has_external_callback = info.external_callback_entry != nullptr;
+ if (has_external_callback) {
+ external_callback_entry = info.external_callback_entry;
+ } else if (frames_count) {
+ // sp register may point at an arbitrary place in memory, make
+ // sure MSAN doesn't complain about it.
+ MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(void*));
+ // Sample potential return address value for frameless invocation of
+ // stubs (we'll figure out later, if this value makes sense).
+ tos = i::Memory::Address_at(reinterpret_cast<i::Address>(regs.sp));
+ } else {
+ tos = nullptr;
+ }
+}
+
+bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
+ RecordCEntryFrame record_c_entry_frame,
+ void** frames, size_t frames_limit,
+ v8::SampleInfo* sample_info,
+ bool use_simulator_reg_state) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ sample_info->frames_count = 0;
+ sample_info->vm_state = isolate->current_vm_state();
+ sample_info->external_callback_entry = nullptr;
+ if (sample_info->vm_state == GC) return true;
+
+ i::Address js_entry_sp = isolate->js_entry_sp();
+ if (js_entry_sp == nullptr) return true; // Not executing JS now.
+
+#if defined(USE_SIMULATOR)
+ if (use_simulator_reg_state) {
+ if (!i::SimulatorHelper::FillRegisters(isolate, regs)) return false;
+ }
+#else
+ USE(use_simulator_reg_state);
+#endif
+ DCHECK(regs->sp);
+
+ if (regs->pc && IsNoFrameRegion(static_cast<i::Address>(regs->pc))) {
+ // The frame is not setup, so it'd be hard to iterate the stack. Bailout.
+ return false;
+ }
+
+ i::ExternalCallbackScope* scope = isolate->external_callback_scope();
+ i::Address handler = i::Isolate::handler(isolate->thread_local_top());
+ // If there is a handler on top of the external callback scope then
+ // we have already entrered JavaScript again and the external callback
+ // is not the top function.
+ if (scope && scope->scope_address() < handler) {
+ i::Address* external_callback_entry_ptr =
+ scope->callback_entrypoint_address();
+ sample_info->external_callback_entry =
+ external_callback_entry_ptr == nullptr ? nullptr
+ : *external_callback_entry_ptr;
+ }
+
+ i::SafeStackFrameIterator it(isolate, reinterpret_cast<i::Address>(regs->fp),
+ reinterpret_cast<i::Address>(regs->sp),
+ js_entry_sp);
+
+ // If at this point iterator does not see any frames,
+ // is usually means something is wrong with the FP,
+ // e.g. it is used as a general purpose register in the function.
+ // Bailout.
+ if (it.done()) return false;
+
+ size_t i = 0;
+ if (record_c_entry_frame == kIncludeCEntryFrame &&
+ (it.top_frame_type() == internal::StackFrame::EXIT ||
+ it.top_frame_type() == internal::StackFrame::BUILTIN_EXIT)) {
+ frames[i++] = isolate->c_function();
+ }
+ for (; !it.done() && i < frames_limit; it.Advance()) {
+ if (!it.frame()->is_interpreted()) {
+ frames[i++] = it.frame()->pc();
+ continue;
+ }
+ // For interpreted frames use the bytecode array pointer as the pc.
+ i::InterpretedFrame* frame = static_cast<i::InterpretedFrame*>(it.frame());
+ // Since the sampler can interrupt execution at any point the
+ // bytecode_array might be garbage, so don't dereference it.
+ i::Address bytecode_array =
+ reinterpret_cast<i::Address>(frame->GetBytecodeArray()) -
+ i::kHeapObjectTag;
+ frames[i++] = bytecode_array + i::BytecodeArray::kHeaderSize +
+ frame->GetBytecodeOffset();
+ }
+ sample_info->frames_count = i;
+ return true;
+}
+
+namespace internal {
+
+void TickSample::Init(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame, bool update_stats,
+ bool use_simulator_reg_state) {
+ v8::TickSample::Init(reinterpret_cast<v8::Isolate*>(isolate), state,
+ record_c_entry_frame, update_stats,
+ use_simulator_reg_state);
+ if (pc == nullptr) return;
+ timestamp = base::TimeTicks::HighResolutionNow();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
new file mode 100644
index 0000000000..819b862388
--- /dev/null
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -0,0 +1,27 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_TICK_SAMPLE_H_
+#define V8_PROFILER_TICK_SAMPLE_H_
+
+#include "include/v8-profiler.h"
+#include "src/base/platform/time.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+struct TickSample : public v8::TickSample {
+ void Init(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame, bool update_stats,
+ bool use_simulator_reg_state = true);
+ base::TimeTicks timestamp;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_TICK_SAMPLE_H_