summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/api/cli.md14
-rw-r--r--doc/api/debugger.md2
-rw-r--r--doc/api/tracing.md19
-rw-r--r--node.gyp5
-rw-r--r--src/node.cc112
-rw-r--r--src/node.h1
-rw-r--r--src/tracing/agent.cc72
-rw-r--r--src/tracing/agent.h31
-rw-r--r--src/tracing/node_trace_buffer.cc177
-rw-r--r--src/tracing/node_trace_buffer.h89
-rw-r--r--src/tracing/node_trace_writer.cc185
-rw-r--r--src/tracing/node_trace_writer.h74
-rw-r--r--src/tracing/trace_event.cc17
-rw-r--r--src/tracing/trace_event.h1710
-rw-r--r--test/parallel/test-trace-event.js35
15 files changed, 2503 insertions, 40 deletions
diff --git a/doc/api/cli.md b/doc/api/cli.md
index d868d70533..2000c2b333 100644
--- a/doc/api/cli.md
+++ b/doc/api/cli.md
@@ -129,6 +129,20 @@ added: v2.1.0
Prints a stack trace whenever synchronous I/O is detected after the first turn
of the event loop.
+### `--trace-events-enabled`
+<!-- YAML
+added: REPLACEME
+-->
+
+Enables the collection of trace event tracing information.
+
+### `--trace-event-categories`
+<!-- YAML
+added: REPLACEME
+-->
+
+A comma separated list of categories that should be traced when trace event
+tracing is enabled using `--trace-events-enabled`.
### `--zero-fill-buffers`
<!-- YAML
diff --git a/doc/api/debugger.md b/doc/api/debugger.md
index c8a61d2ce7..e81a0e46ea 100644
--- a/doc/api/debugger.md
+++ b/doc/api/debugger.md
@@ -201,4 +201,4 @@ To start debugging, open the following URL in Chrome:
chrome-devtools://devtools/remote/serve_file/@60cd6e859b9f557d2312f5bf532f6aec5f284980/inspector.html?experiments=true&v8only=true&ws=localhost:9229/node
```
-[TCP-based protocol]: https://github.com/v8/v8/wiki/Debugging-Protocol
+[TCP-based protocol]: https://github.com/v8/v8/wiki/Debugging-Protocol \ No newline at end of file
diff --git a/doc/api/tracing.md b/doc/api/tracing.md
new file mode 100644
index 0000000000..28e488201e
--- /dev/null
+++ b/doc/api/tracing.md
@@ -0,0 +1,19 @@
+# Tracing
+
+Trace Event provides a mechanism to centralize tracing information generated by
+V8, Node core, and userspace code.
+
+Tracing can be enabled by passing the `--trace-events-enabled` flag when starting a
+Node.js application.
+
+The set of categories for which traces are recorded can be specified using the
+`--trace-event-categories` flag followed by a list of comma separated category names.
+By default the `node` and `v8` categories are enabled.
+
+```txt
+node --trace-events-enabled --trace-event-categories v8,node server.js
+```
+
+Running Node.js with tracing enabled will produce log files that can be opened
+in the [`chrome://tracing`](https://www.chromium.org/developers/how-tos/trace-event-profiling-tool)
+tab of Chrome.
diff --git a/node.gyp b/node.gyp
index 8f5510d44c..5669091c90 100644
--- a/node.gyp
+++ b/node.gyp
@@ -140,6 +140,10 @@
],
'sources': [
+ 'src/tracing/agent.cc',
+ 'src/tracing/node_trace_buffer.cc',
+ 'src/tracing/node_trace_writer.cc',
+ 'src/tracing/trace_event.cc',
'src/debug-agent.cc',
'src/async-wrap.cc',
'src/env.cc',
@@ -217,6 +221,7 @@
'src/stream_base.h',
'src/stream_base-inl.h',
'src/stream_wrap.h',
+ 'src/tracing/trace_event.h'
'src/tree.h',
'src/util.h',
'src/util-inl.h',
diff --git a/src/node.cc b/src/node.cc
index 3147a90d3e..1302dfed16 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -38,6 +38,7 @@
#include "req-wrap.h"
#include "req-wrap-inl.h"
#include "string_bytes.h"
+#include "tracing/agent.h"
#include "util.h"
#include "uv.h"
#if NODE_USE_V8_PLATFORM
@@ -150,6 +151,8 @@ static node_module* modpending;
static node_module* modlist_builtin;
static node_module* modlist_linked;
static node_module* modlist_addon;
+static bool trace_enabled = false;
+static const char* trace_enabled_categories = nullptr;
#if defined(NODE_HAVE_I18N_SUPPORT)
// Path to ICU data (for i18n / Intl)
@@ -186,6 +189,7 @@ static uv_async_t dispatch_debug_messages_async;
static Mutex node_isolate_mutex;
static v8::Isolate* node_isolate;
+static tracing::Agent* tracing_agent;
static node::DebugOptions debug_options;
@@ -194,6 +198,7 @@ static struct {
void Initialize(int thread_pool_size) {
platform_ = v8::platform::CreateDefaultPlatform(thread_pool_size);
V8::InitializePlatform(platform_);
+ tracing::TraceEventHelper::SetCurrentPlatform(platform_);
}
void PumpMessageLoop(Isolate* isolate) {
@@ -3368,6 +3373,9 @@ void SetupProcessObject(Environment* env,
void SignalExit(int signo) {
uv_tty_reset_mode();
+ if (trace_enabled) {
+ tracing_agent->Stop();
+ }
#ifdef __FreeBSD__
// FreeBSD has a nasty bug, see RegisterSignalHandler for details
struct sigaction sa;
@@ -3463,64 +3471,70 @@ static void PrintHelp() {
" node debug script.js [arguments] \n"
"\n"
"Options:\n"
- " -v, --version print Node.js version\n"
- " -e, --eval script evaluate script\n"
- " -p, --print evaluate script and print result\n"
- " -c, --check syntax check script without executing\n"
- " -i, --interactive always enter the REPL even if stdin\n"
- " does not appear to be a terminal\n"
- " -r, --require module to preload (option can be repeated)\n"
- " --no-deprecation silence deprecation warnings\n"
- " --trace-deprecation show stack traces on deprecations\n"
- " --throw-deprecation throw an exception anytime a deprecated "
+ " -v, --version print Node.js version\n"
+ " -e, --eval script evaluate script\n"
+ " -p, --print evaluate script and print result\n"
+ " -c, --check syntax check script without executing\n"
+ " -i, --interactive always enter the REPL even if stdin\n"
+ " does not appear to be a terminal\n"
+ " -r, --require module to preload (option can be "
+ "repeated)\n"
+ " --no-deprecation silence deprecation warnings\n"
+ " --trace-deprecation show stack traces on deprecations\n"
+ " --throw-deprecation throw an exception anytime a deprecated "
"function is used\n"
- " --no-warnings silence all process warnings\n"
- " --trace-warnings show stack traces on process warnings\n"
- " --trace-sync-io show stack trace when use of sync IO\n"
- " is detected after the first tick\n"
- " --track-heap-objects track heap object allocations for heap "
+ " --no-warnings silence all process warnings\n"
+ " --trace-warnings show stack traces on process warnings\n"
+ " --trace-sync-io show stack trace when use of sync IO\n"
+ " is detected after the first tick\n"
+ " --trace-events-enabled track trace events\n"
+ " --trace-event-categories comma separated list of trace event\n"
+ " categories to record\n"
+ " --track-heap-objects track heap object allocations for heap "
"snapshots\n"
- " --prof-process process v8 profiler output generated\n"
- " using --prof\n"
- " --zero-fill-buffers automatically zero-fill all newly allocated\n"
- " Buffer and SlowBuffer instances\n"
- " --v8-options print v8 command line options\n"
- " --v8-pool-size=num set v8's thread pool size\n"
+ " --prof-process process v8 profiler output generated\n"
+ " using --prof\n"
+ " --zero-fill-buffers automatically zero-fill all newly "
+ "allocated\n"
+ " Buffer and SlowBuffer instances\n"
+ " --v8-options print v8 command line options\n"
+ " --v8-pool-size=num set v8's thread pool size\n"
#if HAVE_OPENSSL
- " --tls-cipher-list=val use an alternative default TLS cipher list\n"
+ " --tls-cipher-list=val use an alternative default TLS cipher "
+ "list\n"
#if NODE_FIPS_MODE
- " --enable-fips enable FIPS crypto at startup\n"
- " --force-fips force FIPS crypto (cannot be disabled)\n"
+ " --enable-fips enable FIPS crypto at startup\n"
+ " --force-fips force FIPS crypto (cannot be disabled)\n"
#endif /* NODE_FIPS_MODE */
- " --openssl-config=path load OpenSSL configuration file from the\n"
- " specified path\n"
+ " --openssl-config=path load OpenSSL configuration file from the\n"
+ " specified path\n"
#endif /* HAVE_OPENSSL */
#if defined(NODE_HAVE_I18N_SUPPORT)
- " --icu-data-dir=dir set ICU data load path to dir\n"
- " (overrides NODE_ICU_DATA)\n"
+ " --icu-data-dir=dir set ICU data load path to dir\n"
+ " (overrides NODE_ICU_DATA)\n"
#if !defined(NODE_HAVE_SMALL_ICU)
- " note: linked-in ICU data is\n"
- " present.\n"
+ " note: linked-in ICU data is\n"
+ " present.\n"
#endif
- " --preserve-symlinks preserve symbolic links when resolving\n"
- " and caching modules.\n"
+ " --preserve-symlinks preserve symbolic links when resolving\n"
+ " and caching modules.\n"
#endif
"\n"
"Environment variables:\n"
#ifdef _WIN32
- "NODE_PATH ';'-separated list of directories\n"
+ "NODE_PATH ';'-separated list of directories\n"
#else
- "NODE_PATH ':'-separated list of directories\n"
+ "NODE_PATH ':'-separated list of directories\n"
#endif
- " prefixed to the module search path.\n"
- "NODE_DISABLE_COLORS set to 1 to disable colors in the REPL\n"
+ " prefixed to the module search path.\n"
+ "NODE_DISABLE_COLORS set to 1 to disable colors in the REPL\n"
#if defined(NODE_HAVE_I18N_SUPPORT)
- "NODE_ICU_DATA data path for ICU (Intl object) data\n"
+ "NODE_ICU_DATA data path for ICU (Intl object) data\n"
#if !defined(NODE_HAVE_SMALL_ICU)
- " (will extend linked-in data)\n"
+ " (will extend linked-in data)\n"
#endif
#endif
- "NODE_REPL_HISTORY path to the persistent REPL history file\n"
+ "NODE_REPL_HISTORY path to the persistent REPL history file\n"
"\n"
"Documentation can be found at https://nodejs.org/\n");
}
@@ -3626,6 +3640,16 @@ static void ParseArgs(int* argc,
trace_deprecation = true;
} else if (strcmp(arg, "--trace-sync-io") == 0) {
trace_sync_io = true;
+ } else if (strcmp(arg, "--trace-events-enabled") == 0) {
+ trace_enabled = true;
+ } else if (strcmp(arg, "--trace-event-categories") == 0) {
+ const char* categories = argv[index + 1];
+ if (categories == nullptr) {
+ fprintf(stderr, "%s: %s requires an argument\n", argv[0], arg);
+ exit(9);
+ }
+ args_consumed += 1;
+ trace_enabled_categories = categories;
} else if (strcmp(arg, "--track-heap-objects") == 0) {
track_heap_objects = true;
} else if (strcmp(arg, "--throw-deprecation") == 0) {
@@ -4454,10 +4478,20 @@ int Start(int argc, char** argv) {
#endif
v8_platform.Initialize(v8_thread_pool_size);
+ // Enable tracing when argv has --trace-events-enabled.
+ if (trace_enabled) {
+ fprintf(stderr, "Warning: Trace event is an experimental feature "
+ "and could change at any time.\n");
+ tracing_agent = new tracing::Agent();
+ tracing_agent->Start(v8_platform.platform_, trace_enabled_categories);
+ }
V8::Initialize();
v8_initialized = true;
const int exit_code =
Start(uv_default_loop(), argc, argv, exec_argc, exec_argv);
+ if (trace_enabled) {
+ tracing_agent->Stop();
+ }
v8_initialized = false;
V8::Dispose();
diff --git a/src/node.h b/src/node.h
index 873551fa33..098339da1a 100644
--- a/src/node.h
+++ b/src/node.h
@@ -41,6 +41,7 @@
#include "v8.h" // NOLINT(build/include_order)
#include "node_version.h" // NODE_MODULE_VERSION
+#include "tracing/trace_event.h"
#define NODE_MAKE_VERSION(major, minor, patch) \
((major) * 0x1000 + (minor) * 0x100 + (patch))
diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc
new file mode 100644
index 0000000000..97a3e11a2c
--- /dev/null
+++ b/src/tracing/agent.cc
@@ -0,0 +1,72 @@
+#include "tracing/agent.h"
+
+#include <sstream>
+#include <string>
+
+#include "env-inl.h"
+#include "libplatform/libplatform.h"
+
+namespace node {
+namespace tracing {
+
+using v8::platform::tracing::TraceConfig;
+
+Agent::Agent() {}
+
+void Agent::Start(v8::Platform* platform, const char* enabled_categories) {
+ platform_ = platform;
+
+ int err = uv_loop_init(&tracing_loop_);
+ CHECK_EQ(err, 0);
+
+ NodeTraceWriter* trace_writer = new NodeTraceWriter(&tracing_loop_);
+ TraceBuffer* trace_buffer = new NodeTraceBuffer(
+ NodeTraceBuffer::kBufferChunks, trace_writer, &tracing_loop_);
+
+ tracing_controller_ = new TracingController();
+
+ TraceConfig* trace_config = new TraceConfig();
+ if (enabled_categories) {
+ std::stringstream category_list(enabled_categories);
+ while (category_list.good()) {
+ std::string category;
+ getline(category_list, category, ',');
+ trace_config->AddIncludedCategory(category.c_str());
+ }
+ } else {
+ trace_config->AddIncludedCategory("v8");
+ trace_config->AddIncludedCategory("node");
+ }
+
+ // This thread should be created *after* async handles are created
+ // (within NodeTraceWriter and NodeTraceBuffer constructors).
+ // Otherwise the thread could shut down prematurely.
+ err = uv_thread_create(&thread_, ThreadCb, this);
+ CHECK_EQ(err, 0);
+
+ tracing_controller_->Initialize(trace_buffer);
+ tracing_controller_->StartTracing(trace_config);
+ v8::platform::SetTracingController(platform, tracing_controller_);
+}
+
+void Agent::Stop() {
+ if (!IsStarted()) {
+ return;
+ }
+ // Perform final Flush on TraceBuffer. We don't want the tracing controller
+ // to flush the buffer again on destruction of the V8::Platform.
+ tracing_controller_->StopTracing();
+ delete tracing_controller_;
+ // Thread should finish when the tracing loop is stopped.
+ uv_thread_join(&thread_);
+ v8::platform::SetTracingController(platform_, nullptr);
+}
+
+// static
+void Agent::ThreadCb(void* arg) {
+ Agent* agent = static_cast<Agent*>(arg);
+ uv_run(&agent->tracing_loop_, UV_RUN_DEFAULT);
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/agent.h b/src/tracing/agent.h
new file mode 100644
index 0000000000..098f955192
--- /dev/null
+++ b/src/tracing/agent.h
@@ -0,0 +1,31 @@
+#ifndef SRC_TRACING_AGENT_H_
+#define SRC_TRACING_AGENT_H_
+
+#include "tracing/node_trace_buffer.h"
+#include "tracing/node_trace_writer.h"
+#include "uv.h"
+#include "v8.h"
+
+namespace node {
+namespace tracing {
+
+class Agent {
+ public:
+ explicit Agent();
+ void Start(v8::Platform* platform, const char* enabled_categories);
+ void Stop();
+
+ private:
+ bool IsStarted() { return platform_ != nullptr; }
+ static void ThreadCb(void* arg);
+
+ uv_thread_t thread_;
+ uv_loop_t tracing_loop_;
+ v8::Platform* platform_ = nullptr;
+ TracingController* tracing_controller_;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_TRACING_AGENT_H_
diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc
new file mode 100644
index 0000000000..4773e08325
--- /dev/null
+++ b/src/tracing/node_trace_buffer.cc
@@ -0,0 +1,177 @@
+#include "tracing/node_trace_buffer.h"
+
+namespace node {
+namespace tracing {
+
+InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
+ NodeTraceWriter* trace_writer, NodeTraceBuffer* external_buffer)
+ : id_(id), flushing_(false), max_chunks_(max_chunks),
+ trace_writer_(trace_writer), external_buffer_(external_buffer) {
+ chunks_.resize(max_chunks);
+}
+
+TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
+ Mutex::ScopedLock scoped_lock(mutex_);
+ // Create new chunk if last chunk is full or there is no chunk.
+ if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
+ auto& chunk = chunks_[total_chunks_++];
+ if (chunk) {
+ chunk->Reset(current_chunk_seq_++);
+ } else {
+ chunk.reset(new TraceBufferChunk(current_chunk_seq_++));
+ }
+ }
+ auto& chunk = chunks_[total_chunks_ - 1];
+ size_t event_index;
+ TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
+ *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
+ return trace_object;
+}
+
+TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
+ Mutex::ScopedLock scoped_lock(mutex_);
+ if (handle == 0) {
+ // A handle value of zero never has a trace event associated with it.
+ return NULL;
+ }
+ size_t chunk_index, event_index;
+ uint32_t buffer_id, chunk_seq;
+ ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
+ if (buffer_id != id_ || chunk_index >= total_chunks_) {
+ // Either the chunk belongs to the other buffer, or is outside the current
+ // range of chunks loaded in memory (the latter being true suggests that
+ // the chunk has already been flushed and is no longer in memory.)
+ return NULL;
+ }
+ auto& chunk = chunks_[chunk_index];
+ if (chunk->seq() != chunk_seq) {
+ // Chunk is no longer in memory.
+ return NULL;
+ }
+ return chunk->GetEventAt(event_index);
+}
+
+void InternalTraceBuffer::Flush(bool blocking) {
+ {
+ Mutex::ScopedLock scoped_lock(mutex_);
+ if (total_chunks_ > 0) {
+ flushing_ = true;
+ for (size_t i = 0; i < total_chunks_; ++i) {
+ auto& chunk = chunks_[i];
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ trace_writer_->AppendTraceEvent(chunk->GetEventAt(j));
+ }
+ }
+ total_chunks_ = 0;
+ flushing_ = false;
+ }
+ }
+ trace_writer_->Flush(blocking);
+}
+
+uint64_t InternalTraceBuffer::MakeHandle(
+ size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
+ return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
+ chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
+}
+
+void InternalTraceBuffer::ExtractHandle(
+ uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
+ uint32_t* chunk_seq, size_t* event_index) const {
+ *buffer_id = static_cast<uint32_t>(handle & 0x1);
+ handle >>= 1;
+ *chunk_seq = static_cast<uint32_t>(handle / Capacity());
+ size_t indices = handle % Capacity();
+ *chunk_index = indices / TraceBufferChunk::kChunkSize;
+ *event_index = indices % TraceBufferChunk::kChunkSize;
+}
+
+NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
+ NodeTraceWriter* trace_writer, uv_loop_t* tracing_loop)
+ : tracing_loop_(tracing_loop), trace_writer_(trace_writer),
+ buffer1_(max_chunks, 0, trace_writer, this),
+ buffer2_(max_chunks, 1, trace_writer, this) {
+ current_buf_.store(&buffer1_);
+
+ flush_signal_.data = this;
+ int err = uv_async_init(tracing_loop_, &flush_signal_, NonBlockingFlushSignalCb);
+ CHECK_EQ(err, 0);
+
+ exit_signal_.data = this;
+ err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
+ CHECK_EQ(err, 0);
+}
+
+NodeTraceBuffer::~NodeTraceBuffer() {
+ uv_async_send(&exit_signal_);
+ Mutex::ScopedLock scoped_lock(exit_mutex_);
+ while(!exited_) {
+ exit_cond_.Wait(scoped_lock);
+ }
+}
+
+TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
+ // If the buffer is full, attempt to perform a flush.
+ if (!TryLoadAvailableBuffer()) {
+ // Assign a value of zero as the trace event handle.
+ // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
+ // and will cause GetEventByHandle to return NULL if passed as an argument.
+ *handle = 0;
+ return nullptr;
+ }
+ return current_buf_.load()->AddTraceEvent(handle);
+}
+
+TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
+ return current_buf_.load()->GetEventByHandle(handle);
+}
+
+bool NodeTraceBuffer::Flush() {
+ buffer1_.Flush(true);
+ buffer2_.Flush(true);
+ return true;
+}
+
+// Attempts to set current_buf_ such that it references a buffer that can
+// can write at least one trace event. If both buffers are unavailable this
+// method returns false; otherwise it returns true.
+bool NodeTraceBuffer::TryLoadAvailableBuffer() {
+ InternalTraceBuffer* prev_buf = current_buf_.load();
+ if (prev_buf->IsFull()) {
+ uv_async_send(&flush_signal_); // trigger flush on a separate thread
+ InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
+ &buffer2_ : &buffer1_;
+ if (!other_buf->IsFull()) {
+ current_buf_.store(other_buf);
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+// static
+void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
+ NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
+ if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
+ buffer->buffer1_.Flush(false);
+ }
+ if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
+ buffer->buffer2_.Flush(false);
+ }
+}
+
+// static
+void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
+ NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
+ uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_), nullptr);
+ uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_), [](uv_handle_t* signal) {
+ NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
+ Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
+ buffer->exited_ = true;
+ buffer->exit_cond_.Signal(scoped_lock);
+ });
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/node_trace_buffer.h b/src/tracing/node_trace_buffer.h
new file mode 100644
index 0000000000..619799fdb2
--- /dev/null
+++ b/src/tracing/node_trace_buffer.h
@@ -0,0 +1,89 @@
+#ifndef SRC_NODE_TRACE_BUFFER_H_
+#define SRC_NODE_TRACE_BUFFER_H_
+
+#include "node_mutex.h"
+#include "tracing/node_trace_writer.h"
+#include "libplatform/v8-tracing.h"
+
+#include <atomic>
+
+namespace node {
+namespace tracing {
+
+using v8::platform::tracing::TraceBuffer;
+using v8::platform::tracing::TraceBufferChunk;
+using v8::platform::tracing::TraceObject;
+
+// forward declaration
+class NodeTraceBuffer;
+
+class InternalTraceBuffer {
+ public:
+ InternalTraceBuffer(size_t max_chunks, uint32_t id,
+ NodeTraceWriter* trace_writer,
+ NodeTraceBuffer* external_buffer);
+
+ TraceObject* AddTraceEvent(uint64_t* handle);
+ TraceObject* GetEventByHandle(uint64_t handle);
+ void Flush(bool blocking);
+ bool IsFull() const {
+ return total_chunks_ == max_chunks_ && chunks_[total_chunks_ - 1]->IsFull();
+ }
+ bool IsFlushing() const {
+ return flushing_;
+ }
+
+ private:
+ uint64_t MakeHandle(size_t chunk_index, uint32_t chunk_seq,
+ size_t event_index) const;
+ void ExtractHandle(uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
+ uint32_t* chunk_seq, size_t* event_index) const;
+ size_t Capacity() const { return max_chunks_ * TraceBufferChunk::kChunkSize; }
+
+ Mutex mutex_;
+ bool flushing_;
+ size_t max_chunks_;
+ NodeTraceWriter* trace_writer_;
+ NodeTraceBuffer* external_buffer_;
+ std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
+ size_t total_chunks_ = 0;
+ uint32_t current_chunk_seq_ = 1;
+ uint32_t id_;
+};
+
+class NodeTraceBuffer : public TraceBuffer {
+ public:
+ NodeTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer,
+ uv_loop_t* tracing_loop);
+ ~NodeTraceBuffer();
+
+ TraceObject* AddTraceEvent(uint64_t* handle) override;
+ TraceObject* GetEventByHandle(uint64_t handle) override;
+ bool Flush() override;
+
+ static const size_t kBufferChunks = 1024;
+
+ private:
+ bool TryLoadAvailableBuffer();
+ static void NonBlockingFlushSignalCb(uv_async_t* signal);
+ static void ExitSignalCb(uv_async_t* signal);
+
+ uv_loop_t* tracing_loop_;
+ uv_async_t flush_signal_;
+ uv_async_t exit_signal_;
+ bool exited_ = false;
+ // Used exclusively for exit logic.
+ Mutex exit_mutex_;
+ // Used to wait until async handles have been closed.
+ ConditionVariable exit_cond_;
+ std::unique_ptr<NodeTraceWriter> trace_writer_;
+ // TODO: Change std::atomic to something less contentious.
+ std::atomic<InternalTraceBuffer*> current_buf_;
+ InternalTraceBuffer buffer1_;
+ InternalTraceBuffer buffer2_;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_NODE_TRACING_CONTROLLER_H_
diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc
new file mode 100644
index 0000000000..41753c5e65
--- /dev/null
+++ b/src/tracing/node_trace_writer.cc
@@ -0,0 +1,185 @@
+#include "tracing/node_trace_writer.h"
+
+#include <string.h>
+#include <fcntl.h>
+
+#include "util.h"
+
+namespace node {
+namespace tracing {
+
+NodeTraceWriter::NodeTraceWriter(uv_loop_t* tracing_loop)
+ : tracing_loop_(tracing_loop) {
+ flush_signal_.data = this;
+ int err = uv_async_init(tracing_loop_, &flush_signal_, FlushSignalCb);
+ CHECK_EQ(err, 0);
+
+ exit_signal_.data = this;
+ err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
+ CHECK_EQ(err, 0);
+}
+
+void NodeTraceWriter::WriteSuffix() {
+ // If our final log file has traces, then end the file appropriately.
+ // This means that if no trace events are recorded, then no trace file is
+ // produced.
+ bool should_flush = false;
+ {
+ Mutex::ScopedLock scoped_lock(stream_mutex_);
+ if (total_traces_ > 0) {
+ total_traces_ = 0; // so we don't write it again in FlushPrivate
+ // Appends "]}" to stream_.
+ delete json_trace_writer_;
+ should_flush = true;
+ }
+ }
+ if (should_flush) {
+ Flush(true);
+ }
+}
+
+NodeTraceWriter::~NodeTraceWriter() {
+ WriteSuffix();
+ uv_fs_t req;
+ int err;
+ if (fd_ != -1) {
+ err = uv_fs_close(tracing_loop_, &req, fd_, nullptr);
+ CHECK_EQ(err, 0);
+ uv_fs_req_cleanup(&req);
+ }
+ uv_async_send(&exit_signal_);
+ Mutex::ScopedLock scoped_lock(request_mutex_);
+ while(!exited_) {
+ exit_cond_.Wait(scoped_lock);
+ }
+}
+
+void NodeTraceWriter::OpenNewFileForStreaming() {
+ ++file_num_;
+ uv_fs_t req;
+ std::ostringstream log_file;
+ log_file << "node_trace." << file_num_ << ".log";
+ fd_ = uv_fs_open(tracing_loop_, &req, log_file.str().c_str(),
+ O_CREAT | O_WRONLY | O_TRUNC, 0644, NULL);
+ CHECK_NE(fd_, -1);
+ uv_fs_req_cleanup(&req);
+}
+
+void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) {
+ Mutex::ScopedLock scoped_lock(stream_mutex_);
+ // If this is the first trace event, open a new file for streaming.
+ if (total_traces_ == 0) {
+ OpenNewFileForStreaming();
+ // Constructing a new JSONTraceWriter object appends "{\"traceEvents\":["
+ // to stream_.
+ // In other words, the constructor initializes the serialization stream
+ // to a state where we can start writing trace events to it.
+ // Repeatedly constructing and destroying json_trace_writer_ allows
+ // us to use V8's JSON writer instead of implementing our own.
+ json_trace_writer_ = TraceWriter::CreateJSONTraceWriter(stream_);
+ }
+ ++total_traces_;
+ json_trace_writer_->AppendTraceEvent(trace_event);
+}
+
+void NodeTraceWriter::FlushPrivate() {
+ std::string str;
+ int highest_request_id;
+ {
+ Mutex::ScopedLock stream_scoped_lock(stream_mutex_);
+ if (total_traces_ >= kTracesPerFile) {
+ total_traces_ = 0;
+ // Destroying the member JSONTraceWriter object appends "]}" to
+ // stream_ - in other words, ending a JSON file.
+ delete json_trace_writer_;
+ }
+ // str() makes a copy of the contents of the stream.
+ str = stream_.str();
+ stream_.str("");
+ stream_.clear();
+ }
+ {
+ Mutex::ScopedLock request_scoped_lock(request_mutex_);
+ highest_request_id = num_write_requests_;
+ }
+ WriteToFile(std::move(str), highest_request_id);
+}
+
+void NodeTraceWriter::FlushSignalCb(uv_async_t* signal) {
+ NodeTraceWriter* trace_writer = static_cast<NodeTraceWriter*>(signal->data);
+ trace_writer->FlushPrivate();
+}
+
+// TODO: Remove (is it necessary to change the API? Since because of WriteSuffix
+// it no longer matters whether it's true or false)
+void NodeTraceWriter::Flush() {
+ Flush(true);
+}
+
+void NodeTraceWriter::Flush(bool blocking) {
+ Mutex::ScopedLock scoped_lock(request_mutex_);
+ if (!json_trace_writer_) {
+ return;
+ }
+ int request_id = ++num_write_requests_;
+ int err = uv_async_send(&flush_signal_);
+ CHECK_EQ(err, 0);
+ if (blocking) {
+ // Wait until data associated with this request id has been written to disk.
+ // This guarantees that data from all earlier requests have also been
+ // written.
+ while (request_id > highest_request_id_completed_) {
+ request_cond_.Wait(scoped_lock);
+ }
+ }
+}
+
+void NodeTraceWriter::WriteToFile(std::string&& str, int highest_request_id) {
+ WriteRequest* write_req = new WriteRequest();
+ write_req->str = std::move(str);
+ write_req->writer = this;
+ write_req->highest_request_id = highest_request_id;
+ uv_buf_t uv_buf = uv_buf_init(const_cast<char*>(write_req->str.c_str()),
+ write_req->str.length());
+ request_mutex_.Lock();
+ // Manage a queue of WriteRequest objects because the behavior of uv_write is
+ // is undefined if the same WriteRequest object is used more than once
+ // between WriteCb calls. In addition, this allows us to keep track of the id
+ // of the latest write request that actually been completed.
+ write_req_queue_.push(write_req);
+ request_mutex_.Unlock();
+ int err = uv_fs_write(tracing_loop_, reinterpret_cast<uv_fs_t*>(write_req),
+ fd_, &uv_buf, 1, -1, WriteCb);
+ CHECK_EQ(err, 0);
+}
+
+void NodeTraceWriter::WriteCb(uv_fs_t* req) {
+ WriteRequest* write_req = reinterpret_cast<WriteRequest*>(req);
+ CHECK_GE(write_req->req.result, 0);
+
+ NodeTraceWriter* writer = write_req->writer;
+ int highest_request_id = write_req->highest_request_id;
+ {
+ Mutex::ScopedLock scoped_lock(writer->request_mutex_);
+ CHECK_EQ(write_req, writer->write_req_queue_.front());
+ writer->write_req_queue_.pop();
+ writer->highest_request_id_completed_ = highest_request_id;
+ writer->request_cond_.Broadcast(scoped_lock);
+ }
+ delete write_req;
+}
+
+// static
+void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) {
+ NodeTraceWriter* trace_writer = static_cast<NodeTraceWriter*>(signal->data);
+ uv_close(reinterpret_cast<uv_handle_t*>(&trace_writer->flush_signal_), nullptr);
+ uv_close(reinterpret_cast<uv_handle_t*>(&trace_writer->exit_signal_), [](uv_handle_t* signal) {
+ NodeTraceWriter* trace_writer = static_cast<NodeTraceWriter*>(signal->data);
+ Mutex::ScopedLock scoped_lock(trace_writer->request_mutex_);
+ trace_writer->exited_ = true;
+ trace_writer->exit_cond_.Signal(scoped_lock);
+ });
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h
new file mode 100644
index 0000000000..5813db0ab1
--- /dev/null
+++ b/src/tracing/node_trace_writer.h
@@ -0,0 +1,74 @@
+#ifndef SRC_NODE_TRACE_WRITER_H_
+#define SRC_NODE_TRACE_WRITER_H_
+
+#include <sstream>
+#include <queue>
+
+#include "node_mutex.h"
+#include "libplatform/v8-tracing.h"
+#include "uv.h"
+
+namespace node {
+namespace tracing {
+
+using v8::platform::tracing::TraceObject;
+using v8::platform::tracing::TraceWriter;
+using v8::platform::tracing::TracingController;
+
+class NodeTraceWriter : public TraceWriter {
+ public:
+ NodeTraceWriter(uv_loop_t* tracing_loop);
+ ~NodeTraceWriter();
+
+ void AppendTraceEvent(TraceObject* trace_event) override;
+ void Flush() override;
+ void Flush(bool blocking);
+
+ static const int kTracesPerFile = 1 << 19;
+
+ private:
+ struct WriteRequest {
+ uv_fs_t req;
+ NodeTraceWriter* writer;
+ std::string str;
+ int highest_request_id;
+ };
+
+ static void WriteCb(uv_fs_t* req);
+ void OpenNewFileForStreaming();
+ void WriteToFile(std::string&& str, int highest_request_id);
+ void WriteSuffix();
+ static void FlushSignalCb(uv_async_t* signal);
+ void FlushPrivate();
+ static void ExitSignalCb(uv_async_t* signal);
+
+ uv_loop_t* tracing_loop_;
+ // Triggers callback to initiate writing the contents of stream_ to disk.
+ uv_async_t flush_signal_;
+ // Triggers callback to close async objects, ending the tracing thread.
+ uv_async_t exit_signal_;
+ // Prevents concurrent R/W on state related to serialized trace data
+ // before it's written to disk, namely stream_ and total_traces_.
+ Mutex stream_mutex_;
+ // Prevents concurrent R/W on state related to write requests.
+ Mutex request_mutex_;
+ // Allows blocking calls to Flush() to wait on a condition for
+ // trace events to be written to disk.
+ ConditionVariable request_cond_;
+ // Used to wait until async handles have been closed.
+ ConditionVariable exit_cond_;
+ int fd_ = -1;
+ std::queue<WriteRequest*> write_req_queue_;
+ int num_write_requests_ = 0;
+ int highest_request_id_completed_ = 0;
+ int total_traces_ = 0;
+ int file_num_ = 0;
+ std::ostringstream stream_;
+ TraceWriter* json_trace_writer_ = nullptr;
+ bool exited_ = false;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_NODE_TRACE_WRITER_H_
diff --git a/src/tracing/trace_event.cc b/src/tracing/trace_event.cc
new file mode 100644
index 0000000000..b83cae6e05
--- /dev/null
+++ b/src/tracing/trace_event.cc
@@ -0,0 +1,17 @@
+#include "tracing/trace_event.h"
+
+namespace node {
+namespace tracing {
+
+v8::Platform* platform_ = nullptr;
+
+void TraceEventHelper::SetCurrentPlatform(v8::Platform* platform) {
+ platform_ = platform;
+}
+
+v8::Platform* TraceEventHelper::GetCurrentPlatform() {
+ return platform_;
+}
+
+} // namespace tracing
+} // namespace node
diff --git a/src/tracing/trace_event.h b/src/tracing/trace_event.h
new file mode 100644
index 0000000000..a035fd5150
--- /dev/null
+++ b/src/tracing/trace_event.h
@@ -0,0 +1,1710 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_TRACING_TRACE_EVENT_H_
+#define SRC_TRACING_TRACE_EVENT_H_
+
+#include <stddef.h>
+
+#include "v8-platform.h"
+
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_macros_common.h instead of here.
+
+// From v8/base/trace_event/common/trace_event_common.h
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
+// land your change in base/ first, and then copy-and-paste it.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+// TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("importantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+// class MyData : public base::trace_event::ConvertableToTraceFormat {
+// public:
+// MyData() {}
+// void AppendAsTraceFormat(std::string* out) const override {
+// out->append("{\"foo\":1}");
+// }
+// private:
+// ~MyData() override {}
+// DISALLOW_COPY_AND_ASSIGN(MyData);
+// };
+//
+// TRACE_EVENT1("foo", "bar", "data",
+// std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE | scope)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_COPY)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+
+// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "value", static_cast<int>(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp, \
+ value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
+ value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \
+ category_group, name, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \
+ thread_id, timestamp, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1( \
+ category_group, name, id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+ step, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "step", step)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id, \
+ timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+// considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+// be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with one associated argument. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, \
+ TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
+// Records a clock sync event.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \
+ INTERNAL_TRACE_EVENT_ADD( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ TRACE_EVENT_FLAG_NONE, "sync_id", sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
+ TRACE_ID_DONT_MANGLE(context))
+
+// Macro to specify that two trace IDs are identical. For example,
+// TRACE_BIND_IDS(
+// "category", "name",
+// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
+// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
+// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
+// the current process have the same ID as events with ID
+// ("blink::ResourceFetcher::FetchRequest", 0x2000).
+#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
+ INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Macro to explicitly warm up a given category group. This could be useful in
+// cases where we want to initialize a category group before any trace events
+// for that category group is reported. For example, to have a category group
+// always show up in the "record categories" list for manually selecting
+// settings in about://tracing.
+#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+ do { \
+ static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
+ int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
+ if (num_traces_recorded != -1 && \
+ num_traces_recorded != \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
+#define TRACE_EVENT_PHASE_MARK ('R')
+#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+#define TRACE_EVENT_PHASE_BIND_IDS ('=')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+
+// The pointer returned from GetCategoryGroupEnabled() points to a
+// value with zero or more of the following bits. Used in this class only.
+// The TRACE_EVENT macros should only use the value as a bool.
+// These values must be in sync with macro values in trace_log.h in
+// chromium.
+enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ // Category group enabled to export events to ETW.
+ kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
+};
+
+// By default, const char* asrgument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) node::tracing::TraceStringWithCopy(str)
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) node::tracing::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) node::tracing::TraceID::DontMangle(id)
+
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+ node::tracing::TraceID::WithScope(scope, id)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ node::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \
+ category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ node::tracing::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ node::tracing::TraceEventSamplingStateScope<bucket_number> \
+ traceEventSamplingScope(category "\0" name);
+
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (kEnabledForRecording_CategoryGroupEnabledFlags | \
+ kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// The following macro has no implementation, but it needs to exist since
+// it gets called from scoped trace events. It cannot call UNIMPLEMENTED()
+// since an empty implementation is a valid one.
+#define INTERNAL_TRACE_MEMORY(category, name)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ node::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED UNIMPLEMENTED()
+
+// Add a trace event to the platform tracing system.
+// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// const char* scope,
+// uint64_t id,
+// uint64_t bind_id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ node::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ node::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->UpdateTraceEventDuration
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD intptr_t
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) (var)
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) (var) = (value)
+
+// The thread buckets for the sampling profiler.
+extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+// TODO(fmeawad): This implementation contradicts that we can have a different
+// configuration for each isolate,
+// https://code.google.com/p/v8/issues/detail?id=4563
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE( \
+ atomic, reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ node::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ node::tracing::kGlobalScope, node::tracing::kNoId, \
+ node::tracing::kNoId, flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ node::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ uint64_t h = node::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ node::tracing::kGlobalScope, node::tracing::kNoId, \
+ node::tracing::kNoId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ node::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flow_flags; \
+ node::tracing::TraceID trace_event_bind_id(bind_id, \
+ &trace_event_flags); \
+ uint64_t h = node::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ node::tracing::kGlobalScope, node::tracing::kNoId, \
+ trace_event_bind_id.raw_id(), trace_event_flags, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ node::tracing::TraceID trace_event_trace_id(id, \
+ &trace_event_flags); \
+ node::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ node::tracing::kNoId, trace_event_flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Adds a trace event with a given timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
+ timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id and timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ phase, category_group, name, id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id, thread_id, and timestamp. Not
+// Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Enter and leave a context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ struct INTERNAL_TRACE_EVENT_UID(ScopedContext) { \
+ public: \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) { \
+ TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_); \
+ } \
+ ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() { \
+ TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_); \
+ } \
+ \
+ private: \
+ /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
+ void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
+ uint64_t cid_; \
+ }; \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
+namespace node {
+namespace tracing {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const decltype(nullptr) kGlobalScope = nullptr;
+const uint64_t kNoId = 0;
+
+class TraceEventHelper {
+ public:
+ static void SetCurrentPlatform(v8::Platform* platform);
+ static v8::Platform* GetCurrentPlatform();
+};
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class WithScope {
+ public:
+ WithScope(const char* scope, uint64_t raw_id)
+ : scope_(scope), raw_id_(raw_id) {}
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+ };
+
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* raw_id)
+ : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {}
+ explicit DontMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(int64_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(int raw_id) : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(int16_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(signed char raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(WithScope scoped_id)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+ const char* scope() const { return scope_; }
+ uint64_t raw_id() const { return raw_id_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(int64_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit ForceMangle(int raw_id) : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit ForceMangle(int16_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit ForceMangle(signed char raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ uint64_t raw_id() const { return raw_id_; }
+
+ private:
+ uint64_t raw_id_;
+ };
+
+ TraceID(const void* raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle maybe_scoped_id, unsigned int* flags)
+ : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+ TraceID(uint64_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(uint16_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(int64_t raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
+ (void)flags;
+ }
+ TraceID(int raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
+ (void)flags;
+ }
+ TraceID(int16_t raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
+ (void)flags;
+ }
+ TraceID(signed char raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
+ (void)flags;
+ }
+ TraceID(WithScope scoped_id, unsigned int* flags)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ int64_t as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char*() const { return str_; }
+
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \
+ value_type_id) \
+ static inline void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
+ static inline void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<uint64_t>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent template
+// function is defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static inline uint64_t AddTraceEvent(char phase,
+ const uint8_t* category_group_enabled,
+ const char* name, const char* scope,
+ uint64_t id, uint64_t bind_id,
+ unsigned int flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ scope, id, bind_id, kZeroNumArgs, NULL,
+ NULL, NULL, flags);
+}
+
+template <class ARG1_TYPE>
+static inline uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+ const char* arg1_name, const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
+ &arg1_name, arg_types, arg_values, flags);
+}
+
+template <class ARG1_TYPE, class ARG2_TYPE>
+static inline uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+ const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = {arg1_name, arg2_name};
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(NULL) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled, const char* name,
+ uint64_t event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ uint64_t event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const uint8_t* category_group_enabled_;
+ const char* name_;
+ uint64_t event_handle_;
+};
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template <size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+ explicit TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+ TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+ }
+
+ static inline const char* Current() {
+ return reinterpret_cast<const char*>(
+ TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber]));
+ }
+
+ static inline void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber],
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+ const_cast<char*>(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace tracing
+} // namespace node
+
+#endif // SRC_TRACING_TRACE_EVENT_H_
diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js
new file mode 100644
index 0000000000..1873470583
--- /dev/null
+++ b/test/parallel/test-trace-event.js
@@ -0,0 +1,35 @@
+'use strict';
+const common = require('../common');
+const assert = require('assert');
+const cp = require('child_process');
+const fs = require('fs');
+
+const CODE = 'for (var i = 0; i < 100000; i++) { "test" + i }';
+const FILE_NAME = 'node_trace.1.log';
+
+common.refreshTmpDir();
+process.chdir(common.tmpDir);
+
+const proc_no_categories = cp.spawn(process.execPath,
+ [ '--trace-events-enabled', '--trace-event-categories', '""', '-e', CODE ]);
+
+proc_no_categories.once('exit', common.mustCall(() => {
+ assert(!common.fileExists(FILE_NAME));
+
+ const proc = cp.spawn(process.execPath,
+ [ '--trace-events-enabled', '-e', CODE ]);
+
+ proc.once('exit', common.mustCall(() => {
+ assert(common.fileExists(FILE_NAME));
+ fs.readFile(FILE_NAME, (err, data) => {
+ const traces = JSON.parse(data.toString()).traceEvents;
+ assert(traces.length > 0);
+ // Values that should be present on all runs to approximate correctness.
+ assert(traces.some((trace) => { return trace.pid === proc.pid; }));
+ assert(traces.some((trace) => { return trace.cat === 'v8'; }));
+ assert(traces.some((trace) => {
+ return trace.name === 'V8.ScriptCompiler';
+ }));
+ });
+ }));
+}));