summaryrefslogtreecommitdiff
path: root/src/tracing/node_trace_buffer.cc
blob: e187a1d78c81972b69cd4e03f7079cdb727956ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#include "tracing/node_trace_buffer.h"

#include <memory>
#include "util-inl.h"

namespace node {
namespace tracing {

InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
                                         Agent* agent)
    : flushing_(false), max_chunks_(max_chunks),
      agent_(agent), id_(id) {
  chunks_.resize(max_chunks);
}

TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
  Mutex::ScopedLock scoped_lock(mutex_);
  // Create new chunk if last chunk is full or there is no chunk.
  if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
    auto& chunk = chunks_[total_chunks_++];
    if (chunk) {
      chunk->Reset(current_chunk_seq_++);
    } else {
      chunk = std::make_unique<TraceBufferChunk>(current_chunk_seq_++);
    }
  }
  auto& chunk = chunks_[total_chunks_ - 1];
  size_t event_index;
  TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
  *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
  return trace_object;
}

TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
  Mutex::ScopedLock scoped_lock(mutex_);
  if (handle == 0) {
    // A handle value of zero never has a trace event associated with it.
    return nullptr;
  }
  size_t chunk_index, event_index;
  uint32_t buffer_id, chunk_seq;
  ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
  if (buffer_id != id_ || chunk_index >= total_chunks_) {
    // Either the chunk belongs to the other buffer, or is outside the current
    // range of chunks loaded in memory (the latter being true suggests that
    // the chunk has already been flushed and is no longer in memory.)
    return nullptr;
  }
  auto& chunk = chunks_[chunk_index];
  if (chunk->seq() != chunk_seq) {
    // Chunk is no longer in memory.
    return nullptr;
  }
  return chunk->GetEventAt(event_index);
}

void InternalTraceBuffer::Flush(bool blocking) {
  {
    Mutex::ScopedLock scoped_lock(mutex_);
    if (total_chunks_ > 0) {
      flushing_ = true;
      for (size_t i = 0; i < total_chunks_; ++i) {
        auto& chunk = chunks_[i];
        for (size_t j = 0; j < chunk->size(); ++j) {
          TraceObject* trace_event = chunk->GetEventAt(j);
          // Another thread may have added a trace that is yet to be
          // initialized. Skip such traces.
          // https://github.com/nodejs/node/issues/21038.
          if (trace_event->name()) {
            agent_->AppendTraceEvent(trace_event);
          }
        }
      }
      total_chunks_ = 0;
      flushing_ = false;
    }
  }
  agent_->Flush(blocking);
}

uint64_t InternalTraceBuffer::MakeHandle(
    size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
  return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
          chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
}

void InternalTraceBuffer::ExtractHandle(
    uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
    uint32_t* chunk_seq, size_t* event_index) const {
  *buffer_id = static_cast<uint32_t>(handle & 0x1);
  handle >>= 1;
  *chunk_seq = static_cast<uint32_t>(handle / Capacity());
  size_t indices = handle % Capacity();
  *chunk_index = indices / TraceBufferChunk::kChunkSize;
  *event_index = indices % TraceBufferChunk::kChunkSize;
}

NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
    Agent* agent, uv_loop_t* tracing_loop)
    : tracing_loop_(tracing_loop),
      buffer1_(max_chunks, 0, agent),
      buffer2_(max_chunks, 1, agent) {
  current_buf_.store(&buffer1_);

  flush_signal_.data = this;
  int err = uv_async_init(tracing_loop_, &flush_signal_,
                          NonBlockingFlushSignalCb);
  CHECK_EQ(err, 0);

  exit_signal_.data = this;
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
  CHECK_EQ(err, 0);
}

NodeTraceBuffer::~NodeTraceBuffer() {
  uv_async_send(&exit_signal_);
  Mutex::ScopedLock scoped_lock(exit_mutex_);
  while (!exited_) {
    exit_cond_.Wait(scoped_lock);
  }
}

TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
  // If the buffer is full, attempt to perform a flush.
  if (!TryLoadAvailableBuffer()) {
    // Assign a value of zero as the trace event handle.
    // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
    // and will cause GetEventByHandle to return NULL if passed as an argument.
    *handle = 0;
    return nullptr;
  }
  return current_buf_.load()->AddTraceEvent(handle);
}

TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
  return current_buf_.load()->GetEventByHandle(handle);
}

bool NodeTraceBuffer::Flush() {
  buffer1_.Flush(true);
  buffer2_.Flush(true);
  return true;
}

// Attempts to set current_buf_ such that it references a buffer that can
// write at least one trace event. If both buffers are unavailable this
// method returns false; otherwise it returns true.
bool NodeTraceBuffer::TryLoadAvailableBuffer() {
  InternalTraceBuffer* prev_buf = current_buf_.load();
  if (prev_buf->IsFull()) {
    uv_async_send(&flush_signal_);  // trigger flush on a separate thread
    InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
      &buffer2_ : &buffer1_;
    if (!other_buf->IsFull()) {
      current_buf_.store(other_buf);
    } else {
      return false;
    }
  }
  return true;
}

// static
void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
  NodeTraceBuffer* buffer = static_cast<NodeTraceBuffer*>(signal->data);
  if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
    buffer->buffer1_.Flush(false);
  }
  if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
    buffer->buffer2_.Flush(false);
  }
}

// static
void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
  NodeTraceBuffer* buffer =
      ContainerOf(&NodeTraceBuffer::exit_signal_, signal);

  // Close both flush_signal_ and exit_signal_.
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_),
           [](uv_handle_t* signal) {
    NodeTraceBuffer* buffer =
        ContainerOf(&NodeTraceBuffer::flush_signal_,
                    reinterpret_cast<uv_async_t*>(signal));

    uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_),
             [](uv_handle_t* signal) {
      NodeTraceBuffer* buffer =
          ContainerOf(&NodeTraceBuffer::exit_signal_,
                      reinterpret_cast<uv_async_t*>(signal));
        Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
        buffer->exited_ = true;
        buffer->exit_cond_.Signal(scoped_lock);
    });
  });
}

}  // namespace tracing
}  // namespace node