add sgabios blob and submodule
[qemu.git] / trace / simple.c
blob6339152d279c542aae3644ebae195a56460ccd5b
1 /*
2 * Simple trace backend
4 * Copyright IBM, Corp. 2010
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
9 */
11 #include <stdlib.h>
12 #include <stdint.h>
13 #include <stdio.h>
14 #include <time.h>
15 #ifndef _WIN32
16 #include <signal.h>
17 #include <pthread.h>
18 #endif
19 #include "qemu-timer.h"
20 #include "trace.h"
21 #include "trace/control.h"
23 /** Trace file header event ID */
24 #define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
26 /** Trace file magic number */
27 #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
29 /** Trace file version number, bump if format changes */
30 #define HEADER_VERSION 0
32 /** Records were dropped event ID */
33 #define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
35 /** Trace record is valid */
36 #define TRACE_RECORD_VALID ((uint64_t)1 << 63)
38 /** Trace buffer entry */
39 typedef struct {
40 uint64_t event;
41 uint64_t timestamp_ns;
42 uint64_t x1;
43 uint64_t x2;
44 uint64_t x3;
45 uint64_t x4;
46 uint64_t x5;
47 uint64_t x6;
48 } TraceRecord;
50 enum {
51 TRACE_BUF_LEN = 4096,
52 TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
56 * Trace records are written out by a dedicated thread. The thread waits for
57 * records to become available, writes them out, and then waits again.
59 static GStaticMutex trace_lock = G_STATIC_MUTEX_INIT;
60 static GCond *trace_available_cond;
61 static GCond *trace_empty_cond;
62 static bool trace_available;
63 static bool trace_writeout_enabled;
65 static TraceRecord trace_buf[TRACE_BUF_LEN];
66 static unsigned int trace_idx;
67 static FILE *trace_fp;
68 static char *trace_file_name = NULL;
70 /**
71 * Read a trace record from the trace buffer
73 * @idx Trace buffer index
74 * @record Trace record to fill
76 * Returns false if the record is not valid.
78 static bool get_trace_record(unsigned int idx, TraceRecord *record)
80 if (!(trace_buf[idx].event & TRACE_RECORD_VALID)) {
81 return false;
84 __sync_synchronize(); /* read memory barrier before accessing record */
86 *record = trace_buf[idx];
87 record->event &= ~TRACE_RECORD_VALID;
88 return true;
91 /**
92 * Kick writeout thread
94 * @wait Whether to wait for writeout thread to complete
96 static void flush_trace_file(bool wait)
98 g_static_mutex_lock(&trace_lock);
99 trace_available = true;
100 g_cond_signal(trace_available_cond);
102 if (wait) {
103 g_cond_wait(trace_empty_cond, g_static_mutex_get_mutex(&trace_lock));
106 g_static_mutex_unlock(&trace_lock);
109 static void wait_for_trace_records_available(void)
111 g_static_mutex_lock(&trace_lock);
112 while (!(trace_available && trace_writeout_enabled)) {
113 g_cond_signal(trace_empty_cond);
114 g_cond_wait(trace_available_cond,
115 g_static_mutex_get_mutex(&trace_lock));
117 trace_available = false;
118 g_static_mutex_unlock(&trace_lock);
121 static gpointer writeout_thread(gpointer opaque)
123 TraceRecord record;
124 unsigned int writeout_idx = 0;
125 unsigned int num_available, idx;
126 size_t unused __attribute__ ((unused));
128 for (;;) {
129 wait_for_trace_records_available();
131 num_available = trace_idx - writeout_idx;
132 if (num_available > TRACE_BUF_LEN) {
133 record = (TraceRecord){
134 .event = DROPPED_EVENT_ID,
135 .x1 = num_available,
137 unused = fwrite(&record, sizeof(record), 1, trace_fp);
138 writeout_idx += num_available;
141 idx = writeout_idx % TRACE_BUF_LEN;
142 while (get_trace_record(idx, &record)) {
143 trace_buf[idx].event = 0; /* clear valid bit */
144 unused = fwrite(&record, sizeof(record), 1, trace_fp);
145 idx = ++writeout_idx % TRACE_BUF_LEN;
148 fflush(trace_fp);
150 return NULL;
153 static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3,
154 uint64_t x4, uint64_t x5, uint64_t x6)
156 unsigned int idx;
157 uint64_t timestamp;
159 if (!trace_list[event].state) {
160 return;
163 timestamp = get_clock();
165 idx = g_atomic_int_exchange_and_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
166 trace_buf[idx] = (TraceRecord){
167 .event = event,
168 .timestamp_ns = timestamp,
169 .x1 = x1,
170 .x2 = x2,
171 .x3 = x3,
172 .x4 = x4,
173 .x5 = x5,
174 .x6 = x6,
176 __sync_synchronize(); /* write barrier before marking as valid */
177 trace_buf[idx].event |= TRACE_RECORD_VALID;
179 if ((idx + 1) % TRACE_BUF_FLUSH_THRESHOLD == 0) {
180 flush_trace_file(false);
184 void trace0(TraceEventID event)
186 trace(event, 0, 0, 0, 0, 0, 0);
189 void trace1(TraceEventID event, uint64_t x1)
191 trace(event, x1, 0, 0, 0, 0, 0);
194 void trace2(TraceEventID event, uint64_t x1, uint64_t x2)
196 trace(event, x1, x2, 0, 0, 0, 0);
199 void trace3(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3)
201 trace(event, x1, x2, x3, 0, 0, 0);
204 void trace4(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4)
206 trace(event, x1, x2, x3, x4, 0, 0);
209 void trace5(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5)
211 trace(event, x1, x2, x3, x4, x5, 0);
214 void trace6(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6)
216 trace(event, x1, x2, x3, x4, x5, x6);
219 void st_set_trace_file_enabled(bool enable)
221 if (enable == !!trace_fp) {
222 return; /* no change */
225 /* Halt trace writeout */
226 flush_trace_file(true);
227 trace_writeout_enabled = false;
228 flush_trace_file(true);
230 if (enable) {
231 static const TraceRecord header = {
232 .event = HEADER_EVENT_ID,
233 .timestamp_ns = HEADER_MAGIC,
234 .x1 = HEADER_VERSION,
237 trace_fp = fopen(trace_file_name, "wb");
238 if (!trace_fp) {
239 return;
242 if (fwrite(&header, sizeof header, 1, trace_fp) != 1) {
243 fclose(trace_fp);
244 trace_fp = NULL;
245 return;
248 /* Resume trace writeout */
249 trace_writeout_enabled = true;
250 flush_trace_file(false);
251 } else {
252 fclose(trace_fp);
253 trace_fp = NULL;
258 * Set the name of a trace file
260 * @file The trace file name or NULL for the default name-<pid> set at
261 * config time
263 bool st_set_trace_file(const char *file)
265 st_set_trace_file_enabled(false);
267 free(trace_file_name);
269 if (!file) {
270 if (asprintf(&trace_file_name, CONFIG_TRACE_FILE, getpid()) < 0) {
271 trace_file_name = NULL;
272 return false;
274 } else {
275 if (asprintf(&trace_file_name, "%s", file) < 0) {
276 trace_file_name = NULL;
277 return false;
281 st_set_trace_file_enabled(true);
282 return true;
285 void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
287 stream_printf(stream, "Trace file \"%s\" %s.\n",
288 trace_file_name, trace_fp ? "on" : "off");
291 void st_print_trace(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
293 unsigned int i;
295 for (i = 0; i < TRACE_BUF_LEN; i++) {
296 TraceRecord record;
298 if (!get_trace_record(i, &record)) {
299 continue;
301 stream_printf(stream, "Event %" PRIu64 " : %" PRIx64 " %" PRIx64
302 " %" PRIx64 " %" PRIx64 " %" PRIx64 " %" PRIx64 "\n",
303 record.event, record.x1, record.x2,
304 record.x3, record.x4, record.x5,
305 record.x6);
309 void st_flush_trace_buffer(void)
311 flush_trace_file(true);
314 void trace_print_events(FILE *stream, fprintf_function stream_printf)
316 unsigned int i;
318 for (i = 0; i < NR_TRACE_EVENTS; i++) {
319 stream_printf(stream, "%s [Event ID %u] : state %u\n",
320 trace_list[i].tp_name, i, trace_list[i].state);
324 bool trace_event_set_state(const char *name, bool state)
326 unsigned int i;
327 unsigned int len;
328 bool wildcard = false;
329 bool matched = false;
331 len = strlen(name);
332 if (len > 0 && name[len - 1] == '*') {
333 wildcard = true;
334 len -= 1;
336 for (i = 0; i < NR_TRACE_EVENTS; i++) {
337 if (wildcard) {
338 if (!strncmp(trace_list[i].tp_name, name, len)) {
339 trace_list[i].state = state;
340 matched = true;
342 continue;
344 if (!strcmp(trace_list[i].tp_name, name)) {
345 trace_list[i].state = state;
346 return true;
349 return matched;
352 /* Helper function to create a thread with signals blocked. Use glib's
353 * portable threads since QEMU abstractions cannot be used due to reentrancy in
354 * the tracer. Also note the signal masking on POSIX hosts so that the thread
355 * does not steal signals when the rest of the program wants them blocked.
357 static GThread *trace_thread_create(GThreadFunc fn)
359 GThread *thread;
360 #ifndef _WIN32
361 sigset_t set, oldset;
363 sigfillset(&set);
364 pthread_sigmask(SIG_SETMASK, &set, &oldset);
365 #endif
366 thread = g_thread_create(writeout_thread, NULL, FALSE, NULL);
367 #ifndef _WIN32
368 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
369 #endif
371 return thread;
374 bool trace_backend_init(const char *events, const char *file)
376 GThread *thread;
378 if (!g_thread_supported()) {
379 g_thread_init(NULL);
382 trace_available_cond = g_cond_new();
383 trace_empty_cond = g_cond_new();
385 thread = trace_thread_create(writeout_thread);
386 if (!thread) {
387 fprintf(stderr, "warning: unable to initialize simple trace backend\n");
388 return false;
391 atexit(st_flush_trace_buffer);
392 trace_backend_init_events(events);
393 st_set_trace_file(file);
394 return true;