4 * Copyright IBM, Corp. 2010
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
19 #include "qemu/timer.h"
21 #include "trace/control.h"
22 #include "trace/simple.h"
24 /** Trace file header event ID */
25 #define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
27 /** Trace file magic number */
28 #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
30 /** Trace file version number, bump if format changes */
31 #define HEADER_VERSION 3
33 /** Records were dropped event ID */
34 #define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
36 /** Trace record is valid */
37 #define TRACE_RECORD_VALID ((uint64_t)1 << 63)
40 * Trace records are written out by a dedicated thread. The thread waits for
41 * records to become available, writes them out, and then waits again.
43 #if GLIB_CHECK_VERSION(2, 32, 0)
44 static GMutex trace_lock
;
45 #define lock_trace_lock() g_mutex_lock(&trace_lock)
46 #define unlock_trace_lock() g_mutex_unlock(&trace_lock)
47 #define get_trace_lock_mutex() (&trace_lock)
49 static GStaticMutex trace_lock
= G_STATIC_MUTEX_INIT
;
50 #define lock_trace_lock() g_static_mutex_lock(&trace_lock)
51 #define unlock_trace_lock() g_static_mutex_unlock(&trace_lock)
52 #define get_trace_lock_mutex() g_static_mutex_get_mutex(&trace_lock)
55 /* g_cond_new() was deprecated in glib 2.31 but we still need to support it */
56 #if GLIB_CHECK_VERSION(2, 31, 0)
57 static GCond the_trace_available_cond
;
58 static GCond the_trace_empty_cond
;
59 static GCond
*trace_available_cond
= &the_trace_available_cond
;
60 static GCond
*trace_empty_cond
= &the_trace_empty_cond
;
62 static GCond
*trace_available_cond
;
63 static GCond
*trace_empty_cond
;
66 static bool trace_available
;
67 static bool trace_writeout_enabled
;
70 TRACE_BUF_LEN
= 4096 * 64,
71 TRACE_BUF_FLUSH_THRESHOLD
= TRACE_BUF_LEN
/ 4,
74 uint8_t trace_buf
[TRACE_BUF_LEN
];
75 static volatile gint trace_idx
;
76 static unsigned int writeout_idx
;
77 static volatile gint dropped_events
;
78 static uint32_t trace_pid
;
79 static FILE *trace_fp
;
80 static char *trace_file_name
;
82 /* * Trace buffer entry */
84 uint64_t event
; /* TraceEventID */
85 uint64_t timestamp_ns
;
86 uint32_t length
; /* in bytes */
92 uint64_t header_event_id
; /* HEADER_EVENT_ID */
93 uint64_t header_magic
; /* HEADER_MAGIC */
94 uint64_t header_version
; /* HEADER_VERSION */
98 static void read_from_buffer(unsigned int idx
, void *dataptr
, size_t size
);
99 static unsigned int write_to_buffer(unsigned int idx
, void *dataptr
, size_t size
);
101 static void clear_buffer_range(unsigned int idx
, size_t len
)
105 if (idx
>= TRACE_BUF_LEN
) {
106 idx
= idx
% TRACE_BUF_LEN
;
108 trace_buf
[idx
++] = 0;
113 * Read a trace record from the trace buffer
115 * @idx Trace buffer index
116 * @record Trace record to fill
118 * Returns false if the record is not valid.
120 static bool get_trace_record(unsigned int idx
, TraceRecord
**recordptr
)
122 uint64_t event_flag
= 0;
124 /* read the event flag to see if its a valid record */
125 read_from_buffer(idx
, &record
, sizeof(event_flag
));
127 if (!(record
.event
& TRACE_RECORD_VALID
)) {
131 smp_rmb(); /* read memory barrier before accessing record */
132 /* read the record header to know record length */
133 read_from_buffer(idx
, &record
, sizeof(TraceRecord
));
134 *recordptr
= malloc(record
.length
); /* dont use g_malloc, can deadlock when traced */
135 /* make a copy of record to avoid being overwritten */
136 read_from_buffer(idx
, *recordptr
, record
.length
);
137 smp_rmb(); /* memory barrier before clearing valid flag */
138 (*recordptr
)->event
&= ~TRACE_RECORD_VALID
;
139 /* clear the trace buffer range for consumed record otherwise any byte
140 * with its MSB set may be considered as a valid event id when the writer
141 * thread crosses this range of buffer again.
143 clear_buffer_range(idx
, record
.length
);
148 * Kick writeout thread
150 * @wait Whether to wait for writeout thread to complete
152 static void flush_trace_file(bool wait
)
155 trace_available
= true;
156 g_cond_signal(trace_available_cond
);
159 g_cond_wait(trace_empty_cond
, get_trace_lock_mutex());
165 static void wait_for_trace_records_available(void)
168 while (!(trace_available
&& trace_writeout_enabled
)) {
169 g_cond_signal(trace_empty_cond
);
170 g_cond_wait(trace_available_cond
, get_trace_lock_mutex());
172 trace_available
= false;
176 static gpointer
writeout_thread(gpointer opaque
)
178 TraceRecord
*recordptr
;
181 uint8_t bytes
[sizeof(TraceRecord
) + sizeof(uint64_t)];
183 unsigned int idx
= 0;
185 size_t unused
__attribute__ ((unused
));
188 wait_for_trace_records_available();
190 if (g_atomic_int_get(&dropped_events
)) {
191 dropped
.rec
.event
= DROPPED_EVENT_ID
,
192 dropped
.rec
.timestamp_ns
= get_clock();
193 dropped
.rec
.length
= sizeof(TraceRecord
) + sizeof(uint64_t),
194 dropped
.rec
.pid
= trace_pid
;
196 dropped_count
= g_atomic_int_get(&dropped_events
);
197 } while (!g_atomic_int_compare_and_exchange(&dropped_events
,
199 dropped
.rec
.arguments
[0] = dropped_count
;
200 unused
= fwrite(&dropped
.rec
, dropped
.rec
.length
, 1, trace_fp
);
203 while (get_trace_record(idx
, &recordptr
)) {
204 unused
= fwrite(recordptr
, recordptr
->length
, 1, trace_fp
);
205 writeout_idx
+= recordptr
->length
;
206 free(recordptr
); /* dont use g_free, can deadlock when traced */
207 idx
= writeout_idx
% TRACE_BUF_LEN
;
215 void trace_record_write_u64(TraceBufferRecord
*rec
, uint64_t val
)
217 rec
->rec_off
= write_to_buffer(rec
->rec_off
, &val
, sizeof(uint64_t));
220 void trace_record_write_str(TraceBufferRecord
*rec
, const char *s
, uint32_t slen
)
222 /* Write string length first */
223 rec
->rec_off
= write_to_buffer(rec
->rec_off
, &slen
, sizeof(slen
));
224 /* Write actual string now */
225 rec
->rec_off
= write_to_buffer(rec
->rec_off
, (void*)s
, slen
);
228 int trace_record_start(TraceBufferRecord
*rec
, TraceEventID event
, size_t datasize
)
230 unsigned int idx
, rec_off
, old_idx
, new_idx
;
231 uint32_t rec_len
= sizeof(TraceRecord
) + datasize
;
232 uint64_t event_u64
= event
;
233 uint64_t timestamp_ns
= get_clock();
236 old_idx
= g_atomic_int_get(&trace_idx
);
238 new_idx
= old_idx
+ rec_len
;
240 if (new_idx
- writeout_idx
> TRACE_BUF_LEN
) {
241 /* Trace Buffer Full, Event dropped ! */
242 g_atomic_int_inc(&dropped_events
);
245 } while (!g_atomic_int_compare_and_exchange(&trace_idx
, old_idx
, new_idx
));
247 idx
= old_idx
% TRACE_BUF_LEN
;
250 rec_off
= write_to_buffer(rec_off
, &event_u64
, sizeof(event_u64
));
251 rec_off
= write_to_buffer(rec_off
, ×tamp_ns
, sizeof(timestamp_ns
));
252 rec_off
= write_to_buffer(rec_off
, &rec_len
, sizeof(rec_len
));
253 rec_off
= write_to_buffer(rec_off
, &trace_pid
, sizeof(trace_pid
));
256 rec
->rec_off
= (idx
+ sizeof(TraceRecord
)) % TRACE_BUF_LEN
;
260 static void read_from_buffer(unsigned int idx
, void *dataptr
, size_t size
)
262 uint8_t *data_ptr
= dataptr
;
265 if (idx
>= TRACE_BUF_LEN
) {
266 idx
= idx
% TRACE_BUF_LEN
;
268 data_ptr
[x
++] = trace_buf
[idx
++];
272 static unsigned int write_to_buffer(unsigned int idx
, void *dataptr
, size_t size
)
274 uint8_t *data_ptr
= dataptr
;
277 if (idx
>= TRACE_BUF_LEN
) {
278 idx
= idx
% TRACE_BUF_LEN
;
280 trace_buf
[idx
++] = data_ptr
[x
++];
282 return idx
; /* most callers wants to know where to write next */
285 void trace_record_finish(TraceBufferRecord
*rec
)
288 read_from_buffer(rec
->tbuf_idx
, &record
, sizeof(TraceRecord
));
289 smp_wmb(); /* write barrier before marking as valid */
290 record
.event
|= TRACE_RECORD_VALID
;
291 write_to_buffer(rec
->tbuf_idx
, &record
, sizeof(TraceRecord
));
293 if (((unsigned int)g_atomic_int_get(&trace_idx
) - writeout_idx
)
294 > TRACE_BUF_FLUSH_THRESHOLD
) {
295 flush_trace_file(false);
299 void st_set_trace_file_enabled(bool enable
)
301 if (enable
== !!trace_fp
) {
302 return; /* no change */
305 /* Halt trace writeout */
306 flush_trace_file(true);
307 trace_writeout_enabled
= false;
308 flush_trace_file(true);
311 static const TraceLogHeader header
= {
312 .header_event_id
= HEADER_EVENT_ID
,
313 .header_magic
= HEADER_MAGIC
,
314 /* Older log readers will check for version at next location */
315 .header_version
= HEADER_VERSION
,
318 trace_fp
= fopen(trace_file_name
, "wb");
323 if (fwrite(&header
, sizeof header
, 1, trace_fp
) != 1) {
329 /* Resume trace writeout */
330 trace_writeout_enabled
= true;
331 flush_trace_file(false);
339 * Set the name of a trace file
341 * @file The trace file name or NULL for the default name-<pid> set at
344 bool st_set_trace_file(const char *file
)
346 st_set_trace_file_enabled(false);
348 g_free(trace_file_name
);
351 trace_file_name
= g_strdup_printf(CONFIG_TRACE_FILE
, getpid());
353 trace_file_name
= g_strdup_printf("%s", file
);
356 st_set_trace_file_enabled(true);
360 void st_print_trace_file_status(FILE *stream
, int (*stream_printf
)(FILE *stream
, const char *fmt
, ...))
362 stream_printf(stream
, "Trace file \"%s\" %s.\n",
363 trace_file_name
, trace_fp
? "on" : "off");
366 void st_flush_trace_buffer(void)
368 flush_trace_file(true);
371 /* Helper function to create a thread with signals blocked. Use glib's
372 * portable threads since QEMU abstractions cannot be used due to reentrancy in
373 * the tracer. Also note the signal masking on POSIX hosts so that the thread
374 * does not steal signals when the rest of the program wants them blocked.
376 static GThread
*trace_thread_create(GThreadFunc fn
)
380 sigset_t set
, oldset
;
383 pthread_sigmask(SIG_SETMASK
, &set
, &oldset
);
386 #if GLIB_CHECK_VERSION(2, 31, 0)
387 thread
= g_thread_new("trace-thread", fn
, NULL
);
389 thread
= g_thread_create(fn
, NULL
, FALSE
, NULL
);
393 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
399 bool st_init(const char *file
)
403 trace_pid
= getpid();
405 #if !GLIB_CHECK_VERSION(2, 31, 0)
406 trace_available_cond
= g_cond_new();
407 trace_empty_cond
= g_cond_new();
410 thread
= trace_thread_create(writeout_thread
);
412 fprintf(stderr
, "warning: unable to initialize simple trace backend\n");
416 atexit(st_flush_trace_buffer
);
417 st_set_trace_file(file
);