qom: Add object_child_foreach()
[qemu-kvm.git] / trace / simple.c
blobb4a3c6e9501db07a541f8e5cf98cf359adfa57ec
1 /*
2 * Simple trace backend
4 * Copyright IBM, Corp. 2010
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
9 */
11 #include <stdlib.h>
12 #include <stdint.h>
13 #include <stdio.h>
14 #include <time.h>
15 #ifndef _WIN32
16 #include <signal.h>
17 #include <pthread.h>
18 #endif
19 #include "qemu-timer.h"
20 #include "trace.h"
21 #include "trace/control.h"
23 /** Trace file header event ID */
24 #define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
26 /** Trace file magic number */
27 #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
29 /** Trace file version number, bump if format changes */
30 #define HEADER_VERSION 0
32 /** Records were dropped event ID */
33 #define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
35 /** Trace record is valid */
36 #define TRACE_RECORD_VALID ((uint64_t)1 << 63)
38 /** Trace buffer entry */
39 typedef struct {
40 uint64_t event;
41 uint64_t timestamp_ns;
42 uint64_t x1;
43 uint64_t x2;
44 uint64_t x3;
45 uint64_t x4;
46 uint64_t x5;
47 uint64_t x6;
48 } TraceRecord;
50 enum {
51 TRACE_BUF_LEN = 4096,
52 TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
56 * Trace records are written out by a dedicated thread. The thread waits for
57 * records to become available, writes them out, and then waits again.
59 static GStaticMutex trace_lock = G_STATIC_MUTEX_INIT;
60 static GCond *trace_available_cond;
61 static GCond *trace_empty_cond;
62 static bool trace_available;
63 static bool trace_writeout_enabled;
65 static TraceRecord trace_buf[TRACE_BUF_LEN];
66 static unsigned int trace_idx;
67 static FILE *trace_fp;
68 static char *trace_file_name = NULL;
70 /**
71 * Read a trace record from the trace buffer
73 * @idx Trace buffer index
74 * @record Trace record to fill
76 * Returns false if the record is not valid.
78 static bool get_trace_record(unsigned int idx, TraceRecord *record)
80 if (!(trace_buf[idx].event & TRACE_RECORD_VALID)) {
81 return false;
84 __sync_synchronize(); /* read memory barrier before accessing record */
86 *record = trace_buf[idx];
87 record->event &= ~TRACE_RECORD_VALID;
88 return true;
91 /**
92 * Kick writeout thread
94 * @wait Whether to wait for writeout thread to complete
96 static void flush_trace_file(bool wait)
98 g_static_mutex_lock(&trace_lock);
99 trace_available = true;
100 g_cond_signal(trace_available_cond);
102 if (wait) {
103 g_cond_wait(trace_empty_cond, g_static_mutex_get_mutex(&trace_lock));
106 g_static_mutex_unlock(&trace_lock);
109 static void wait_for_trace_records_available(void)
111 g_static_mutex_lock(&trace_lock);
112 while (!(trace_available && trace_writeout_enabled)) {
113 g_cond_signal(trace_empty_cond);
114 g_cond_wait(trace_available_cond,
115 g_static_mutex_get_mutex(&trace_lock));
117 trace_available = false;
118 g_static_mutex_unlock(&trace_lock);
121 static gpointer writeout_thread(gpointer opaque)
123 TraceRecord record;
124 unsigned int writeout_idx = 0;
125 unsigned int num_available, idx;
126 size_t unused __attribute__ ((unused));
128 for (;;) {
129 wait_for_trace_records_available();
131 num_available = trace_idx - writeout_idx;
132 if (num_available > TRACE_BUF_LEN) {
133 record = (TraceRecord){
134 .event = DROPPED_EVENT_ID,
135 .x1 = num_available,
137 unused = fwrite(&record, sizeof(record), 1, trace_fp);
138 writeout_idx += num_available;
141 idx = writeout_idx % TRACE_BUF_LEN;
142 while (get_trace_record(idx, &record)) {
143 trace_buf[idx].event = 0; /* clear valid bit */
144 unused = fwrite(&record, sizeof(record), 1, trace_fp);
145 idx = ++writeout_idx % TRACE_BUF_LEN;
148 fflush(trace_fp);
150 return NULL;
153 static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3,
154 uint64_t x4, uint64_t x5, uint64_t x6)
156 unsigned int idx;
157 uint64_t timestamp;
159 if (!trace_list[event].state) {
160 return;
163 timestamp = get_clock();
164 #if GLIB_CHECK_VERSION(2, 30, 0)
165 idx = g_atomic_int_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
166 #else
167 idx = g_atomic_int_exchange_and_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
168 #endif
169 trace_buf[idx] = (TraceRecord){
170 .event = event,
171 .timestamp_ns = timestamp,
172 .x1 = x1,
173 .x2 = x2,
174 .x3 = x3,
175 .x4 = x4,
176 .x5 = x5,
177 .x6 = x6,
179 __sync_synchronize(); /* write barrier before marking as valid */
180 trace_buf[idx].event |= TRACE_RECORD_VALID;
182 if ((idx + 1) % TRACE_BUF_FLUSH_THRESHOLD == 0) {
183 flush_trace_file(false);
187 void trace0(TraceEventID event)
189 trace(event, 0, 0, 0, 0, 0, 0);
192 void trace1(TraceEventID event, uint64_t x1)
194 trace(event, x1, 0, 0, 0, 0, 0);
197 void trace2(TraceEventID event, uint64_t x1, uint64_t x2)
199 trace(event, x1, x2, 0, 0, 0, 0);
202 void trace3(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3)
204 trace(event, x1, x2, x3, 0, 0, 0);
207 void trace4(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4)
209 trace(event, x1, x2, x3, x4, 0, 0);
212 void trace5(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5)
214 trace(event, x1, x2, x3, x4, x5, 0);
217 void trace6(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6)
219 trace(event, x1, x2, x3, x4, x5, x6);
222 void st_set_trace_file_enabled(bool enable)
224 if (enable == !!trace_fp) {
225 return; /* no change */
228 /* Halt trace writeout */
229 flush_trace_file(true);
230 trace_writeout_enabled = false;
231 flush_trace_file(true);
233 if (enable) {
234 static const TraceRecord header = {
235 .event = HEADER_EVENT_ID,
236 .timestamp_ns = HEADER_MAGIC,
237 .x1 = HEADER_VERSION,
240 trace_fp = fopen(trace_file_name, "wb");
241 if (!trace_fp) {
242 return;
245 if (fwrite(&header, sizeof header, 1, trace_fp) != 1) {
246 fclose(trace_fp);
247 trace_fp = NULL;
248 return;
251 /* Resume trace writeout */
252 trace_writeout_enabled = true;
253 flush_trace_file(false);
254 } else {
255 fclose(trace_fp);
256 trace_fp = NULL;
261 * Set the name of a trace file
263 * @file The trace file name or NULL for the default name-<pid> set at
264 * config time
266 bool st_set_trace_file(const char *file)
268 st_set_trace_file_enabled(false);
270 free(trace_file_name);
272 if (!file) {
273 if (asprintf(&trace_file_name, CONFIG_TRACE_FILE, getpid()) < 0) {
274 trace_file_name = NULL;
275 return false;
277 } else {
278 if (asprintf(&trace_file_name, "%s", file) < 0) {
279 trace_file_name = NULL;
280 return false;
284 st_set_trace_file_enabled(true);
285 return true;
288 void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
290 stream_printf(stream, "Trace file \"%s\" %s.\n",
291 trace_file_name, trace_fp ? "on" : "off");
294 void st_print_trace(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
296 unsigned int i;
298 for (i = 0; i < TRACE_BUF_LEN; i++) {
299 TraceRecord record;
301 if (!get_trace_record(i, &record)) {
302 continue;
304 stream_printf(stream, "Event %" PRIu64 " : %" PRIx64 " %" PRIx64
305 " %" PRIx64 " %" PRIx64 " %" PRIx64 " %" PRIx64 "\n",
306 record.event, record.x1, record.x2,
307 record.x3, record.x4, record.x5,
308 record.x6);
312 void st_flush_trace_buffer(void)
314 flush_trace_file(true);
317 void trace_print_events(FILE *stream, fprintf_function stream_printf)
319 unsigned int i;
321 for (i = 0; i < NR_TRACE_EVENTS; i++) {
322 stream_printf(stream, "%s [Event ID %u] : state %u\n",
323 trace_list[i].tp_name, i, trace_list[i].state);
327 bool trace_event_set_state(const char *name, bool state)
329 unsigned int i;
330 unsigned int len;
331 bool wildcard = false;
332 bool matched = false;
334 len = strlen(name);
335 if (len > 0 && name[len - 1] == '*') {
336 wildcard = true;
337 len -= 1;
339 for (i = 0; i < NR_TRACE_EVENTS; i++) {
340 if (wildcard) {
341 if (!strncmp(trace_list[i].tp_name, name, len)) {
342 trace_list[i].state = state;
343 matched = true;
345 continue;
347 if (!strcmp(trace_list[i].tp_name, name)) {
348 trace_list[i].state = state;
349 return true;
352 return matched;
355 /* Helper function to create a thread with signals blocked. Use glib's
356 * portable threads since QEMU abstractions cannot be used due to reentrancy in
357 * the tracer. Also note the signal masking on POSIX hosts so that the thread
358 * does not steal signals when the rest of the program wants them blocked.
360 static GThread *trace_thread_create(GThreadFunc fn)
362 GThread *thread;
363 #ifndef _WIN32
364 sigset_t set, oldset;
366 sigfillset(&set);
367 pthread_sigmask(SIG_SETMASK, &set, &oldset);
368 #endif
369 thread = g_thread_create(fn, NULL, FALSE, NULL);
370 #ifndef _WIN32
371 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
372 #endif
374 return thread;
377 bool trace_backend_init(const char *events, const char *file)
379 GThread *thread;
381 if (!g_thread_supported()) {
382 #if !GLIB_CHECK_VERSION(2, 31, 0)
383 g_thread_init(NULL);
384 #else
385 fprintf(stderr, "glib threading failed to initialize.\n");
386 exit(1);
387 #endif
390 trace_available_cond = g_cond_new();
391 trace_empty_cond = g_cond_new();
393 thread = trace_thread_create(writeout_thread);
394 if (!thread) {
395 fprintf(stderr, "warning: unable to initialize simple trace backend\n");
396 return false;
399 atexit(st_flush_trace_buffer);
400 trace_backend_init_events(events);
401 st_set_trace_file(file);
402 return true;