2 * Copyright (c) 2009, 2010 Aggelos Economopoulos. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include <sys/param.h>
43 #include <sys/queue.h>
55 printd_set_flags(const char *str
, unsigned int *flags
)
58 * This is suboptimal as we don't detect
67 err(2, "invalid debug flag %c\n", *str
);
68 *flags
|= 1 << (*str
- 'a');
74 MAX_EVHDR_SIZE
= PATH_MAX
+ 200,
75 /* string namespaces */
80 NR_BUCKETS
= 1021, /* prime */
81 PARSE_ERR_BUFSIZE
= 256,
83 REC_BOUNDARY
= 1 << 14,
85 EVTRF_WR
= 0x1, /* open for writing */
89 typedef uint16_t fileid_t
;
90 typedef uint16_t funcid_t
;
91 typedef uint16_t fmtid_t
;
93 struct trace_event_header
{
95 uint64_t ts
; /* XXX: this should only be part of probe */
96 } __attribute__((packed
));
98 struct probe_event_header
{
99 struct trace_event_header eh
;
101 * For these fields, 0 implies "not available"
110 uint8_t cpu
; /* -1 if n/a */
111 } __attribute__((packed
));
113 struct string_event_header
{
114 struct trace_event_header eh
;
118 } __attribute__((packed
));
120 struct fmt_event_header
{
121 struct trace_event_header eh
;
125 } __attribute__((packed
));
127 struct cpuinfo_event_header
{
130 } __attribute__((packed
));
135 struct hashentry
*next
;
139 struct hashentry
*buckets
[NR_BUCKETS
];
140 uintptr_t (*hashfunc
)(uintptr_t);
141 uintptr_t (*cmpfunc
)(uintptr_t, uintptr_t);
153 struct event_filter_unresolved
{
154 TAILQ_ENTRY(event_filter_unresolved
) link
;
159 RB_ENTRY(id_map
) rb_node
;
164 RB_HEAD(id_tree
, id_map
);
173 RB_HEAD(thread_tree
, evtr_thread
);
176 struct thread_tree root
;
179 struct event_callback
{
180 void (*cb
)(evtr_event_t
, void *data
);
181 void *data
; /* this field must be malloc()ed */
185 struct evtr_thread
*td
; /* currently executing thread */
197 * When writing, we keep track of the strings we've
198 * already dumped so we only dump them once.
199 * Paths, function names etc belong to different
202 struct hashtab_str
*strings
[EVTR_NS_MAX
- 1];
204 * When reading, we build a map from id to string.
205 * Every id must be defined at the point of use.
207 struct string_map maps
[EVTR_NS_MAX
- 1];
210 /* same as above, but for subsys+fmt pairs */
211 struct fmt_map fmtmap
;
212 struct hashtab_str
*fmts
;
214 struct thread_map threads
;
228 struct symtab
*symtab
;
230 struct event_callback
**cbs
;
232 * Filters that have a format specified and we
233 * need to resolve that to an fmtid
235 TAILQ_HEAD(, event_filter_unresolved
) unresolved_filtq
;
238 char parse_err_buf
[PARSE_ERR_BUFSIZE
];
240 struct evtr_event pending_event
;
244 evtr_set_debug(const char *str
)
246 printd_set_flags(str
, &evtr_debug
);
249 static int id_map_cmp(struct id_map
*, struct id_map
*);
250 RB_PROTOTYPE2(id_tree
, id_map
, rb_node
, id_map_cmp
, int);
251 RB_GENERATE2(id_tree
, id_map
, rb_node
, id_map_cmp
, int, id
);
253 static int thread_cmp(struct evtr_thread
*, struct evtr_thread
*);
254 RB_PROTOTYPE2(thread_tree
, evtr_thread
, rb_node
, thread_cmp
, void *);
255 RB_GENERATE2(thread_tree
, evtr_thread
, rb_node
, thread_cmp
, void *, id
);
259 validate_string(const char *str
)
261 if (!(evtr_debug
& MISC
))
264 assert(isprint(*str
));
269 id_tree_free(struct id_tree
*root
)
271 struct id_map
*v
, *n
;
273 for (v
= RB_MIN(id_tree
, root
); v
; v
= n
) {
274 n
= RB_NEXT(id_tree
, root
, v
);
275 RB_REMOVE(id_tree
, root
, v
);
281 evtr_register_callback(evtr_query_t q
, void (*fn
)(evtr_event_t
, void *), void *d
)
283 struct event_callback
*cb
;
286 if (!(cb
= malloc(sizeof(*cb
)))) {
292 if (!(cbs
= realloc(q
->cbs
, (++q
->ncbs
) * sizeof(cb
)))) {
299 q
->cbs
[q
->ncbs
- 1] = cb
;
305 evtr_deregister_callbacks(evtr_query_t q
)
309 for (i
= 0; i
< q
->ncbs
; ++i
) {
318 evtr_run_callbacks(evtr_event_t ev
, evtr_query_t q
)
320 struct event_callback
*cb
;
323 for (i
= 0; i
< q
->ncbs
; ++i
) {
325 cb
->cb(ev
, cb
->data
);
331 evtr_cpu(evtr_t evtr
, int c
)
333 if ((c
< 0) || (c
>= evtr
->ncpus
))
335 return &evtr
->cpus
[c
];
338 static int parse_format_data(evtr_event_t ev
, const char *fmt
, ...)
339 __printflike(2, 3) __scanflike(2, 3);
343 parse_format_data(evtr_event_t ev
, const char *fmt
, ...)
348 if (strcmp(fmt
, ev
->fmt
))
350 vsnprintf(buf
, sizeof(buf
), fmt
, __DECONST(void *, ev
->fmtdata
));
351 printd(MISC
, "string is: %s\n", buf
);
353 return vsscanf(buf
, fmt
, ap
);
358 evtr_deregister_filters(evtr_query_t q
, evtr_filter_t filt
, int nfilt
)
360 struct event_filter_unresolved
*u
, *tmp
;
362 TAILQ_FOREACH_MUTABLE(u
, &q
->unresolved_filtq
, link
, tmp
) {
363 for (i
= 0; i
< nfilt
; ++i
) {
364 if (u
->filt
== &filt
[i
]) {
365 TAILQ_REMOVE(&q
->unresolved_filtq
, u
, link
);
373 evtr_filter_register(evtr_query_t q
, evtr_filter_t filt
)
375 struct event_filter_unresolved
*res
;
377 if (!(res
= malloc(sizeof(*res
)))) {
382 TAILQ_INSERT_TAIL(&q
->unresolved_filtq
, res
, link
);
388 evtr_query_needs_parsing(evtr_query_t q
)
392 for (i
= 0; i
< q
->nfilt
; ++i
)
393 if (q
->filt
[i
].ev_type
== EVTR_TYPE_STMT
)
399 evtr_event_data(evtr_event_t ev
, char *buf
, size_t len
)
402 * XXX: we implicitly trust the format string.
405 if (ev
->fmtdatalen
) {
406 vsnprintf(buf
, len
, ev
->fmt
, __DECONST(void *, ev
->fmtdata
));
408 strlcpy(buf
, ev
->fmt
, len
);
413 evtr_error(evtr_t evtr
)
415 return evtr
->err
|| (evtr
->errmsg
!= NULL
);
419 evtr_errmsg(evtr_t evtr
)
421 return evtr
->errmsg
? evtr
->errmsg
: strerror(evtr
->err
);
425 evtr_query_error(evtr_query_t q
)
427 return q
->err
|| (q
->errmsg
!= NULL
) || evtr_error(q
->evtr
);
431 evtr_query_errmsg(evtr_query_t q
)
433 return q
->errmsg
? q
->errmsg
:
434 (q
->err
? strerror(q
->err
) :
435 (evtr_errmsg(q
->evtr
)));
440 id_map_cmp(struct id_map
*a
, struct id_map
*b
)
442 return a
->id
- b
->id
;
447 thread_cmp(struct evtr_thread
*a
, struct evtr_thread
*b
)
458 #define DEFINE_MAP_FIND(prefix, type) \
461 prefix ## _map_find(struct id_tree *tree, int id)\
463 struct id_map *sid; \
465 sid = id_tree_RB_LOOKUP(tree, id); \
466 return sid ? sid->data : NULL; \
469 DEFINE_MAP_FIND(string
, const char *)
470 DEFINE_MAP_FIND(fmt
, const struct event_fmt
*)
474 thread_map_find(struct thread_map
*map
, void *id
)
476 return thread_tree_RB_LOOKUP(&map
->root
, id
);
479 #define DEFINE_MAP_INSERT(prefix, type, _cmp, _dup) \
482 prefix ## _map_insert(struct id_tree *tree, type data, int id) \
484 struct id_map *sid, *osid; \
486 sid = malloc(sizeof(*sid)); \
492 if ((osid = id_tree_RB_INSERT(tree, sid))) { \
494 if (_cmp((type)osid->data, data)) { \
497 printd(DS, "mapping already exists, skipping\n"); \
498 /* we're OK with redefinitions of an id to the same string */ \
501 /* only do the strdup if we're inserting a new string */ \
502 sid->data = _dup(data); /* XXX: oom */ \
508 thread_map_insert(struct thread_map
*map
, struct evtr_thread
*td
)
510 struct evtr_thread
*otd
;
512 if ((otd
= thread_tree_RB_INSERT(&map
->root
, td
))) {
514 * Thread addresses might be reused, we're
516 * DANGER, Will Robinson: this means the user
517 * of the API needs to copy event->td if they
518 * want it to remain stable.
520 free((void *)otd
->comm
);
521 otd
->comm
= td
->comm
;
528 event_fmt_cmp(const struct event_fmt
*a
, const struct event_fmt
*b
)
534 ret
= strcmp(a
->subsys
, b
->subsys
);
536 ret
= strcmp(a
->subsys
, "");
538 } else if (b
->subsys
) {
539 ret
= strcmp("", b
->subsys
);
543 return strcmp(a
->fmt
, b
->fmt
);
548 event_fmt_dup(const struct event_fmt
*o
)
552 if (!(n
= malloc(sizeof(*n
)))) {
555 memcpy(n
, o
, sizeof(*n
));
559 DEFINE_MAP_INSERT(string
, const char *, strcmp
, strdup
)
560 DEFINE_MAP_INSERT(fmt
, const struct event_fmt
*, event_fmt_cmp
, event_fmt_dup
)
563 hash_find(const struct hashtab
*tab
, uintptr_t key
, uintptr_t *val
)
565 struct hashentry
*ent
;
567 for(ent
= tab
->buckets
[tab
->hashfunc(key
)];
568 ent
&& tab
->cmpfunc(ent
->key
, key
);
578 hash_insert(struct hashtab
*tab
, uintptr_t key
, uintptr_t val
)
580 struct hashentry
*ent
;
583 if (!(ent
= malloc(sizeof(*ent
)))) {
584 fprintf(stderr
, "out of memory\n");
587 hsh
= tab
->hashfunc(key
);
588 ent
->next
= tab
->buckets
[hsh
];
591 tab
->buckets
[hsh
] = ent
;
597 cmpfunc_pointer(uintptr_t a
, uintptr_t b
)
604 hashfunc_pointer(uintptr_t p
)
606 return p
% NR_BUCKETS
;
613 if (!(tab
= calloc(sizeof(struct hashtab
), 1)))
615 tab
->hashfunc
= &hashfunc_pointer
;
616 tab
->cmpfunc
= &cmpfunc_pointer
;
620 struct hashtab_str
{ /* string -> id map */
627 hashfunc_string(uintptr_t p
)
629 const char *str
= (char *)p
;
630 unsigned long hash
= 5381;
634 hash
= ((hash
<< 5) + hash
) + c
; /* hash * 33 + c */
635 return hash
% NR_BUCKETS
;
640 cmpfunc_string(uintptr_t a
, uintptr_t b
)
642 return strcmp((char *)a
, (char *)b
);
650 struct hashtab_str
*strtab
;
651 if (!(strtab
= calloc(sizeof(struct hashtab_str
), 1)))
653 strtab
->tab
.hashfunc
= &hashfunc_string
;
654 strtab
->tab
.cmpfunc
= &cmpfunc_string
;
660 strhash_destroy(struct hashtab_str
*strtab
)
667 strhash_find(struct hashtab_str
*strtab
, const char *str
, uint16_t *id
)
671 if (hash_find(&strtab
->tab
, (uintptr_t)str
, &val
))
679 strhash_insert(struct hashtab_str
*strtab
, const char *str
, uint16_t *id
)
684 if (strtab
->id
== 0) {
685 fprintf(stderr
, "too many strings\n");
690 fprintf(stderr
, "out of memory\n");
694 hash_insert(&strtab
->tab
, (uintptr_t)str
, (uintptr_t)val
);
702 struct symtab
*symtab
;
703 if (!(symtab
= calloc(sizeof(struct symtab
), 1)))
705 symtab
->tab
.hashfunc
= &hashfunc_string
;
706 symtab
->tab
.cmpfunc
= &cmpfunc_string
;
711 symtab_destroy(struct symtab
*symtab
)
716 struct evtr_variable
*
717 symtab_find(const struct symtab
*symtab
, const char *str
)
721 if (hash_find(&symtab
->tab
, (uintptr_t)str
, &val
))
723 return (struct evtr_variable
*)val
;
727 symtab_insert(struct symtab
*symtab
, const char *name
,
728 struct evtr_variable
*var
)
732 fprintf(stderr
, "out of memory\n");
735 hash_insert(&symtab
->tab
, (uintptr_t)name
, (uintptr_t)var
);
741 evtr_filter_match(evtr_query_t q
, evtr_filter_t f
, evtr_event_t ev
)
743 if ((f
->cpu
!= -1) && (f
->cpu
!= ev
->cpu
))
746 assert(!(f
->flags
& FILTF_ID
));
747 if (ev
->type
!= f
->ev_type
)
749 if (ev
->type
== EVTR_TYPE_PROBE
) {
750 if (f
->fmt
&& strcmp(ev
->fmt
, f
->fmt
))
752 } else if (ev
->type
== EVTR_TYPE_STMT
) {
753 struct evtr_variable
*var
;
755 /* XXX: no need to do that *every* time */
756 parse_var(f
->var
, q
->symtab
, &var
, &q
->parse_err_buf
[0],
759 * Ignore errors, they're expected since the
760 * variable might not be instantiated yet
762 if (var
!= ev
->stmt
.var
)
770 evtr_match_filters(struct evtr_query
*q
, evtr_event_t ev
)
774 /* no filters means we're interested in all events */
778 for (i
= 0; i
< q
->nfilt
; ++i
) {
779 if (evtr_filter_match(q
, &q
->filt
[i
], ev
)) {
789 parse_callback(evtr_event_t ev
, void *d
)
791 evtr_query_t q
= (evtr_query_t
)d
;
792 if (ev
->type
!= EVTR_TYPE_PROBE
)
794 if (!ev
->fmt
|| (ev
->fmt
[0] != '#'))
797 * Copy the event to ->pending_event, then call
798 * the parser to convert it into a synthesized
799 * EVTR_TYPE_STMT event.
801 memcpy(&q
->pending_event
, ev
, sizeof(*ev
));
802 parse_string(&q
->pending_event
, q
->symtab
, &ev
->fmt
[1],
803 &q
->parse_err_buf
[0], PARSE_ERR_BUFSIZE
);
804 if (q
->parse_err_buf
[0]) { /* parse error */
805 q
->errmsg
= &q
->parse_err_buf
[0];
808 if (!evtr_match_filters(q
, &q
->pending_event
))
811 * This will cause us to return ->pending_event next time
814 q
->flags
|= EVTRQF_PENDING
;
819 thread_creation_callback(evtr_event_t ev
, void *d
)
821 evtr_query_t q
= (evtr_query_t
)d
;
822 evtr_t evtr
= q
->evtr
;
823 struct evtr_thread
*td
;
827 if (parse_format_data(ev
, "new_td %p %s", &ktd
, buf
) != 2) {
832 if (!(td
= malloc(sizeof(*td
)))) {
838 if (!(td
->comm
= strdup(buf
))) {
843 printd(DS
, "inserting new thread %p: %s\n", td
->id
, td
->comm
);
844 thread_map_insert(&evtr
->threads
, td
);
849 thread_switch_callback(evtr_event_t ev
, void *d
)
851 evtr_t evtr
= ((evtr_query_t
)d
)->evtr
;
852 struct evtr_thread
*tdp
, *tdn
;
855 static struct evtr_event tdcr
;
856 static char *fmt
= "new_td %p %s";
860 cpu
= evtr_cpu(evtr
, ev
->cpu
);
862 printw("invalid cpu %d\n", ev
->cpu
);
865 if (parse_format_data(ev
, "sw %p > %p", &ktdp
, &ktdn
) != 2) {
868 tdp
= thread_map_find(&evtr
->threads
, ktdp
);
870 printd(DS
, "switching from unknown thread %p\n", ktdp
);
872 tdn
= thread_map_find(&evtr
->threads
, ktdn
);
875 * Fake a thread creation event for threads we
876 * haven't seen before.
878 tdcr
.type
= EVTR_TYPE_PROBE
;
884 tdcr
.fmtdata
= &fmtdata
;
885 tdcr
.fmtdatalen
= sizeof(fmtdata
);
888 snprintf(tidstr
, sizeof(tidstr
), "%p", ktdn
);
891 thread_creation_callback(&tdcr
, d
);
893 tdn
= thread_map_find(&evtr
->threads
, ktdn
);
895 printd(DS
, "switching to unknown thread %p\n", ktdn
);
899 printd(DS
, "cpu %d: switching to thread %p\n", ev
->cpu
, ktdn
);
905 assert_foff_in_sync(evtr_t evtr
)
910 * We keep our own offset because we
911 * might want to support mmap()
913 off
= ftello(evtr
->f
);
914 if (evtr
->bytes
!= off
) {
915 fprintf(stderr
, "bytes %jd, off %jd\n", evtr
->bytes
, off
);
922 evtr_write(evtr_t evtr
, const void *buf
, size_t bytes
)
924 assert_foff_in_sync(evtr
);
925 if (fwrite(buf
, bytes
, 1, evtr
->f
) != 1) {
927 evtr
->errmsg
= strerror(errno
);
930 evtr
->bytes
+= bytes
;
931 assert_foff_in_sync(evtr
);
936 * Called after dumping a record to make sure the next
937 * record is REC_ALIGN aligned. This does not make much sense,
938 * as we shouldn't be using packed structs anyway.
942 evtr_dump_pad(evtr_t evtr
)
945 static char buf
[REC_ALIGN
];
947 pad
= REC_ALIGN
- (evtr
->bytes
% REC_ALIGN
);
949 return evtr_write(evtr
, buf
, pad
);
955 * We make sure that there is a new record every REC_BOUNDARY
956 * bytes, this costs next to nothing in space and allows for
961 evtr_dump_avoid_boundary(evtr_t evtr
, size_t bytes
)
964 static char buf
[256];
966 pad
= REC_BOUNDARY
- (evtr
->bytes
% REC_BOUNDARY
);
967 /* if adding @bytes would cause us to cross a boundary... */
969 /* then pad to the boundary */
970 for (i
= 0; i
< (pad
/ sizeof(buf
)); ++i
) {
971 if (evtr_write(evtr
, buf
, sizeof(buf
))) {
975 i
= pad
% sizeof(buf
);
977 if (evtr_write(evtr
, buf
, i
)) {
987 evtr_dump_fmt(evtr_t evtr
, uint64_t ts
, const evtr_event_t ev
)
989 struct fmt_event_header fmt
;
992 char *subsys
= "", buf
[1024];
994 if (strlcpy(buf
, subsys
, sizeof(buf
)) >= sizeof(buf
)) {
995 evtr
->errmsg
= "name of subsystem is too large";
999 if (strlcat(buf
, ev
->fmt
, sizeof(buf
)) >= sizeof(buf
)) {
1000 evtr
->errmsg
= "fmt + name of subsystem is too large";
1005 if (!strhash_find(evtr
->fmts
, buf
, &id
)) {
1008 if ((err
= strhash_insert(evtr
->fmts
, buf
, &id
))) {
1013 fmt
.eh
.type
= EVTR_TYPE_FMT
;
1015 fmt
.subsys_len
= strlen(subsys
);
1016 fmt
.fmt_len
= strlen(ev
->fmt
);
1018 if (evtr_dump_avoid_boundary(evtr
, sizeof(fmt
) + fmt
.subsys_len
+
1021 if (evtr_write(evtr
, &fmt
, sizeof(fmt
)))
1023 if (evtr_write(evtr
, subsys
, fmt
.subsys_len
))
1025 if (evtr_write(evtr
, ev
->fmt
, fmt
.fmt_len
))
1027 if (evtr_dump_pad(evtr
))
1033 * Replace string pointers or string ids in fmtdata
1037 mangle_string_ptrs(const char *fmt
, uint8_t *fmtdata
,
1038 const char *(*replace
)(void *, const char *), void *ctx
)
1041 size_t skipsize
, intsz
;
1044 for (f
= fmt
; f
[0] != '\0'; ++f
) {
1049 for (p
= f
; p
[0]; ++p
) {
1052 * Eat flags. Notice this will accept duplicate
1068 /* Eat minimum field width, if any */
1069 for (; isdigit(p
[0]); ++p
)
1073 /* Eat precision, if any */
1074 for (; isdigit(p
[0]); ++p
)
1081 intsz
= sizeof(long long);
1083 intsz
= sizeof(long);
1087 intsz
= sizeof(intmax_t);
1090 intsz
= sizeof(ptrdiff_t);
1093 intsz
= sizeof(size_t);
1101 intsz
= sizeof(int);
1114 skipsize
= sizeof(void *);
1118 skipsize
= sizeof(double);
1120 skipsize
= sizeof(float);
1123 ((const char **)fmtdata
)[0] =
1124 replace(ctx
, ((char **)fmtdata
)[0]);
1125 skipsize
= sizeof(char *);
1129 fprintf(stderr
, "Unknown conversion specifier %c "
1130 "in fmt starting with %s", p
[0], f
- 1);
1133 fmtdata
+= skipsize
;
1138 /* XXX: do we really want the timestamp? */
1141 evtr_dump_string(evtr_t evtr
, uint64_t ts
, const char *str
, int ns
)
1143 struct string_event_header s
;
1147 assert((0 <= ns
) && (ns
< EVTR_NS_MAX
));
1148 if (!strhash_find(evtr
->strings
[ns
], str
, &id
)) {
1151 if ((err
= strhash_insert(evtr
->strings
[ns
], str
, &id
))) {
1156 printd(DS
, "hash_insert %s ns %d id %d\n", str
, ns
, id
);
1157 s
.eh
.type
= EVTR_TYPE_STR
;
1161 s
.len
= strnlen(str
, PATH_MAX
);
1163 if (evtr_dump_avoid_boundary(evtr
, sizeof(s
) + s
.len
))
1165 if (evtr_write(evtr
, &s
, sizeof(s
)))
1167 if (evtr_write(evtr
, str
, s
.len
))
1169 if (evtr_dump_pad(evtr
))
1174 struct replace_ctx
{
1181 replace_strptr(void *_ctx
, const char *s
)
1183 struct replace_ctx
*ctx
= _ctx
;
1184 return (const char *)(uintptr_t)evtr_dump_string(ctx
->evtr
, ctx
->ts
, s
,
1190 replace_strid(void *_ctx
, const char *s
)
1192 struct replace_ctx
*ctx
= _ctx
;
1195 ret
= string_map_find(&ctx
->evtr
->maps
[EVTR_NS_DSTR
- 1].root
,
1198 fprintf(stderr
, "Unknown id for data string\n");
1199 ctx
->evtr
->errmsg
= "unknown id for data string";
1200 ctx
->evtr
->err
= !0;
1202 validate_string(ret
);
1203 printd(DS
, "replacing strid %d (ns %d) with string '%s' (or int %#x)\n",
1204 (int)(uintptr_t)s
, EVTR_NS_DSTR
, ret
? ret
: "NULL", (int)(uintptr_t)ret
);
1210 evtr_dump_probe(evtr_t evtr
, evtr_event_t ev
)
1212 struct probe_event_header kev
;
1215 memset(&kev
, '\0', sizeof(kev
));
1216 kev
.eh
.type
= ev
->type
;
1218 kev
.line
= ev
->line
;
1221 kev
.file
= evtr_dump_string(evtr
, kev
.eh
.ts
, ev
->file
,
1225 kev
.func
= evtr_dump_string(evtr
, kev
.eh
.ts
, ev
->func
,
1229 kev
.fmt
= evtr_dump_fmt(evtr
, kev
.eh
.ts
, ev
);
1232 struct replace_ctx replctx
= {
1236 assert(ev
->fmtdatalen
<= (int)sizeof(buf
));
1237 kev
.datalen
= ev
->fmtdatalen
;
1239 * Replace all string pointers with string ids before dumping
1242 memcpy(buf
, ev
->fmtdata
, ev
->fmtdatalen
);
1243 if (mangle_string_ptrs(ev
->fmt
, buf
,
1244 replace_strptr
, &replctx
) < 0)
1249 if (evtr_dump_avoid_boundary(evtr
, sizeof(kev
) + ev
->fmtdatalen
))
1251 if (evtr_write(evtr
, &kev
, sizeof(kev
)))
1253 if (evtr_write(evtr
, buf
, ev
->fmtdatalen
))
1255 if (evtr_dump_pad(evtr
))
1262 evtr_dump_sysinfo(evtr_t evtr
, evtr_event_t ev
)
1264 uint8_t type
= EVTR_TYPE_SYSINFO
;
1265 uint16_t ncpus
= ev
->ncpus
;
1268 evtr
->errmsg
= "invalid number of cpus";
1271 if (evtr_dump_avoid_boundary(evtr
, sizeof(type
) + sizeof(ncpus
)))
1273 if (evtr_write(evtr
, &type
, sizeof(type
))) {
1276 if (evtr_write(evtr
, &ncpus
, sizeof(ncpus
))) {
1279 if (evtr_dump_pad(evtr
))
1285 evtr_dump_cpuinfo(evtr_t evtr
, evtr_event_t ev
)
1287 struct cpuinfo_event_header ci
;
1290 if (evtr_dump_avoid_boundary(evtr
, sizeof(type
) + sizeof(ci
)))
1292 type
= EVTR_TYPE_CPUINFO
;
1293 if (evtr_write(evtr
, &type
, sizeof(type
))) {
1297 ci
.freq
= ev
->cpuinfo
.freq
;
1298 if (evtr_dump_avoid_boundary(evtr
, sizeof(ci
)))
1300 if (evtr_write(evtr
, &ci
, sizeof(ci
))) {
1303 if (evtr_dump_pad(evtr
))
1309 evtr_rewind(evtr_t evtr
)
1311 assert((evtr
->flags
& EVTRF_WR
) == 0);
1313 if (fseek(evtr
->f
, 0, SEEK_SET
)) {
1321 evtr_dump_event(evtr_t evtr
, evtr_event_t ev
)
1324 case EVTR_TYPE_PROBE
:
1325 return evtr_dump_probe(evtr
, ev
);
1326 case EVTR_TYPE_SYSINFO
:
1327 return evtr_dump_sysinfo(evtr
, ev
);
1328 case EVTR_TYPE_CPUINFO
:
1329 return evtr_dump_cpuinfo(evtr
, ev
);
1331 evtr
->errmsg
= "unknown event type";
1340 if (!(evtr
= malloc(sizeof(*evtr
)))) {
1346 evtr
->errmsg
= NULL
;
1351 static int evtr_next_event(evtr_t
, evtr_event_t
);
1354 evtr_open_read(FILE *f
)
1357 struct evtr_event ev
;
1360 if (!(evtr
= evtr_alloc(f
))) {
1364 for (i
= 0; i
< (EVTR_NS_MAX
- 1); ++i
) {
1365 RB_INIT(&evtr
->maps
[i
].root
);
1367 RB_INIT(&evtr
->fmtmap
.root
);
1368 RB_INIT(&evtr
->threads
.root
);
1372 * Load the first event so we can pick up any
1375 if (evtr_next_event(evtr
, &ev
)) {
1378 if (evtr_rewind(evtr
))
1387 evtr_open_write(FILE *f
)
1392 if (!(evtr
= evtr_alloc(f
))) {
1396 evtr
->flags
= EVTRF_WR
;
1397 if (!(evtr
->fmts
= strhash_new()))
1399 for (i
= 0; i
< EVTR_NS_MAX
; ++i
) {
1400 evtr
->strings
[i
] = strhash_new();
1401 if (!evtr
->strings
[i
]) {
1402 for (j
= 0; j
< i
; ++j
) {
1403 strhash_destroy(evtr
->strings
[j
]);
1411 strhash_destroy(evtr
->fmts
);
1419 hashtab_destroy(struct hashtab
*h
)
1421 struct hashentry
*ent
, *next
;
1423 for (i
= 0; i
< NR_BUCKETS
; ++i
) {
1424 for (ent
= h
->buckets
[i
]; ent
; ent
= next
) {
1433 evtr_close(evtr_t evtr
)
1437 if (evtr
->flags
& EVTRF_WR
) {
1438 hashtab_destroy(&evtr
->fmts
->tab
);
1439 for (i
= 0; i
< EVTR_NS_MAX
- 1; ++i
)
1440 hashtab_destroy(&evtr
->strings
[i
]->tab
);
1442 id_tree_free(&evtr
->fmtmap
.root
);
1443 for (i
= 0; i
< EVTR_NS_MAX
- 1; ++i
) {
1444 id_tree_free(&evtr
->maps
[i
].root
);
1452 evtr_read(evtr_t evtr
, void *buf
, size_t size
)
1455 assert_foff_in_sync(evtr
);
1456 printd(IO
, "evtr_read at %#jx, %zu bytes\n", evtr
->bytes
, size
);
1457 if (fread(buf
, size
, 1, evtr
->f
) != 1) {
1458 if (feof(evtr
->f
)) {
1459 evtr
->errmsg
= "incomplete record";
1461 evtr
->errmsg
= strerror(errno
);
1465 evtr
->bytes
+= size
;
1466 assert_foff_in_sync(evtr
);
1472 evtr_load_fmt(evtr_query_t q
, char *buf
)
1474 evtr_t evtr
= q
->evtr
;
1475 struct fmt_event_header
*evh
= (struct fmt_event_header
*)buf
;
1476 struct event_fmt
*fmt
;
1477 char *subsys
= NULL
, *fmtstr
;
1479 if (!(fmt
= malloc(sizeof(*fmt
)))) {
1483 if (evtr_read(evtr
, buf
+ sizeof(struct trace_event_header
),
1484 sizeof(*evh
) - sizeof(evh
->eh
))) {
1487 assert(!evh
->subsys_len
);
1488 if (evh
->subsys_len
) {
1489 if (!(subsys
= malloc(evh
->subsys_len
))) {
1493 if (evtr_read(evtr
, subsys
, evh
->subsys_len
)) {
1496 fmt
->subsys
= subsys
;
1500 if (!(fmtstr
= malloc(evh
->fmt_len
+ 1))) {
1504 if (evtr_read(evtr
, fmtstr
, evh
->fmt_len
)) {
1507 fmtstr
[evh
->fmt_len
] = '\0';
1510 printd(DS
, "fmt_map_insert (%d, %s)\n", evh
->id
, fmt
->fmt
);
1511 evtr
->err
= fmt_map_insert(&evtr
->fmtmap
.root
, fmt
, evh
->id
);
1512 switch (evtr
->err
) {
1514 evtr
->errmsg
= "out of memory";
1517 evtr
->errmsg
= "redefinition of an id to a "
1518 "different format (corrupt input)";
1537 evtr_load_string(evtr_t evtr
, char *buf
)
1539 char sbuf
[PATH_MAX
+ 1];
1540 struct string_event_header
*evh
= (struct string_event_header
*)buf
;
1542 if (evtr_read(evtr
, buf
+ sizeof(struct trace_event_header
),
1543 sizeof(*evh
) - sizeof(evh
->eh
))) {
1546 if (evh
->len
> PATH_MAX
) {
1547 evtr
->errmsg
= "string too large (corrupt input)";
1550 if (evh
->len
&& evtr_read(evtr
, sbuf
, evh
->len
)) {
1554 if (evh
->ns
>= EVTR_NS_MAX
) {
1555 evtr
->errmsg
= "invalid namespace (corrupt input)";
1558 validate_string(sbuf
);
1559 printd(DS
, "evtr_load_string:ns %d id %d : \"%s\"\n", evh
->ns
, evh
->id
,
1561 evtr
->err
= string_map_insert(&evtr
->maps
[evh
->ns
- 1].root
, sbuf
, evh
->id
);
1562 switch (evtr
->err
) {
1564 evtr
->errmsg
= "out of memory";
1567 evtr
->errmsg
= "redefinition of an id to a "
1568 "different string (corrupt input)";
1578 evtr_skip(evtr_t evtr
, off_t bytes
)
1580 if (fseek(evtr
->f
, bytes
, SEEK_CUR
)) {
1582 evtr
->errmsg
= strerror(errno
);
1585 evtr
->bytes
+= bytes
;
1590 * Make sure q->buf is at least len bytes
1594 evtr_query_reserve_buf(struct evtr_query
*q
, int len
)
1598 if (q
->bufsize
>= len
)
1600 if (!(tmp
= realloc(q
->buf
, len
)))
1609 evtr_load_probe(evtr_t evtr
, evtr_event_t ev
, char *buf
, struct evtr_query
*q
)
1611 struct probe_event_header
*evh
= (struct probe_event_header
*)buf
;
1614 if (evtr_read(evtr
, buf
+ sizeof(struct trace_event_header
),
1615 sizeof(*evh
) - sizeof(evh
->eh
)))
1617 memset(ev
, '\0', sizeof(*ev
));
1618 ev
->ts
= evh
->eh
.ts
;
1619 ev
->type
= EVTR_TYPE_PROBE
;
1620 ev
->line
= evh
->line
;
1622 if ((cpu
= evtr_cpu(evtr
, evh
->cpu
))) {
1628 ev
->file
= string_map_find(
1629 &evtr
->maps
[EVTR_NS_PATH
- 1].root
,
1632 evtr
->errmsg
= "unknown id for file path";
1634 ev
->file
= "<unknown>";
1636 validate_string(ev
->file
);
1639 ev
->file
= "<unknown>";
1642 const struct event_fmt
*fmt
;
1643 if (!(fmt
= fmt_map_find(&evtr
->fmtmap
.root
, evh
->fmt
))) {
1644 evtr
->errmsg
= "unknown id for event fmt";
1649 validate_string(fmt
->fmt
);
1653 if (evtr_query_reserve_buf(q
, evh
->datalen
+ 1)) {
1655 } else if (!evtr_read(evtr
, q
->buf
, evh
->datalen
)) {
1656 struct replace_ctx replctx
= {
1662 ev
->fmtdata
= q
->buf
;
1664 * If the format specifies any string pointers, there
1665 * is a string id stored in the fmtdata. Look it up
1666 * and replace it with a string pointer before
1667 * returning it to the user.
1669 if (mangle_string_ptrs(ev
->fmt
, __DECONST(uint8_t *,
1671 replace_strid
, &replctx
) < 0)
1675 ((char *)ev
->fmtdata
)[evh
->datalen
] = '\0';
1676 ev
->fmtdatalen
= evh
->datalen
;
1679 evtr_run_callbacks(ev
, q
);
1685 evtr_skip_to_record(evtr_t evtr
)
1689 skip
= REC_ALIGN
- (evtr
->bytes
% REC_ALIGN
);
1691 if (fseek(evtr
->f
, skip
, SEEK_CUR
)) {
1693 evtr
->errmsg
= strerror(errno
);
1696 evtr
->bytes
+= skip
;
1703 evtr_load_sysinfo(evtr_t evtr
)
1708 if (evtr_read(evtr
, &ncpus
, sizeof(ncpus
))) {
1713 evtr
->cpus
= malloc(ncpus
* sizeof(struct cpu
));
1718 evtr
->ncpus
= ncpus
;
1719 for (i
= 0; i
< ncpus
; ++i
) {
1720 evtr
->cpus
[i
].td
= NULL
;
1721 evtr
->cpus
[i
].freq
= -1.0;
1728 evtr_load_cpuinfo(evtr_t evtr
)
1730 struct cpuinfo_event_header cih
;
1733 if (evtr_read(evtr
, &cih
, sizeof(cih
))) {
1736 if (cih
.freq
< 0.0) {
1737 evtr
->errmsg
= "cpu freq is negative";
1742 * Notice that freq is merely a multiplier with
1743 * which we convert a timestamp to seconds; if
1744 * ts is not in cycles, freq is not the frequency.
1746 if (!(cpu
= evtr_cpu(evtr
, cih
.cpu
))) {
1747 evtr
->errmsg
= "freq for invalid cpu";
1751 cpu
->freq
= cih
.freq
;
1757 _evtr_next_event(evtr_t evtr
, evtr_event_t ev
, struct evtr_query
*q
)
1759 char buf
[MAX_EVHDR_SIZE
];
1761 struct trace_event_header
*evhdr
= (struct trace_event_header
*)buf
;
1763 for (ret
= 0; !ret
;) {
1764 if (q
->flags
& EVTRQF_PENDING
) {
1765 q
->off
= evtr
->bytes
;
1766 memcpy(ev
, &q
->pending_event
, sizeof(*ev
));
1767 q
->flags
&= ~EVTRQF_PENDING
;
1770 if (evtr_read(evtr
, &evhdr
->type
, 1)) {
1771 if (feof(evtr
->f
)) {
1772 evtr
->errmsg
= NULL
;
1779 * skip pad records -- this will only happen if there's a
1780 * variable sized record close to the boundary
1782 if (evhdr
->type
== EVTR_TYPE_PAD
) {
1783 evtr_skip_to_record(evtr
);
1786 if (evhdr
->type
== EVTR_TYPE_SYSINFO
) {
1787 evtr_load_sysinfo(evtr
);
1789 } else if (evhdr
->type
== EVTR_TYPE_CPUINFO
) {
1790 evtr_load_cpuinfo(evtr
);
1793 if (evtr_read(evtr
, buf
+ 1, sizeof(*evhdr
) - 1))
1794 return feof(evtr
->f
) ? -1 : !0;
1795 switch (evhdr
->type
) {
1796 case EVTR_TYPE_PROBE
:
1797 if ((err
= evtr_load_probe(evtr
, ev
, buf
, q
))) {
1809 if (evtr_load_string(evtr
, buf
)) {
1814 if (evtr_load_fmt(q
, buf
)) {
1820 evtr
->errmsg
= "unknown event type (corrupt input?)";
1823 evtr_skip_to_record(evtr
);
1825 if (!evtr_match_filters(q
, ev
)) {
1829 q
->off
= evtr
->bytes
;
1833 /* can't get here */
1839 evtr_next_event(evtr_t evtr
, evtr_event_t ev
)
1841 struct evtr_query
*q
;
1844 if (!(q
= evtr_query_init(evtr
, NULL
, 0))) {
1848 ret
= _evtr_next_event(evtr
, ev
, q
);
1849 evtr_query_destroy(q
);
1854 evtr_last_event(evtr_t evtr
, evtr_event_t ev
)
1858 off_t last_boundary
;
1860 if (evtr_error(evtr
))
1863 fd
= fileno(evtr
->f
);
1867 * This skips pseudo records, so we can't provide
1868 * an event with all fields filled in this way.
1869 * It's doable, just needs some care. TBD.
1871 if (0 && (st
.st_mode
& S_IFREG
)) {
1873 * Skip to last boundary, that's the closest to the EOF
1874 * location that we are sure contains a header so we can
1875 * pick up the stream.
1877 last_boundary
= rounddown(st
.st_size
, REC_BOUNDARY
);
1878 /* XXX: ->bytes should be in query */
1879 assert(evtr
->bytes
== 0);
1880 evtr_skip(evtr
, last_boundary
);
1885 * If we can't seek, we need to go through the whole file.
1886 * Since you can't seek back, this is pretty useless unless
1887 * you really are interested only in the last event.
1889 while (!evtr_next_event(evtr
, ev
))
1891 if (evtr_error(evtr
))
1898 evtr_query_init(evtr_t evtr
, evtr_filter_t filt
, int nfilt
)
1900 struct evtr_query
*q
;
1903 if (!(q
= malloc(sizeof(*q
)))) {
1907 if (!(q
->buf
= malloc(q
->bufsize
))) {
1910 if (!(q
->symtab
= symtab_new()))
1916 TAILQ_INIT(&q
->unresolved_filtq
);
1921 memset(&q
->pending_event
, '\0', sizeof(q
->pending_event
));
1922 if (evtr_register_callback(q
, &thread_creation_callback
, q
)) {
1925 if (evtr_register_callback(q
, &thread_switch_callback
, q
)) {
1928 if (evtr_query_needs_parsing(q
) &&
1929 evtr_register_callback(q
, &parse_callback
, q
)) {
1933 for (i
= 0; i
< nfilt
; ++i
) {
1935 if (filt
[i
].fmt
== NULL
)
1937 if (evtr_filter_register(q
, &filt
[i
])) {
1938 evtr_deregister_filters(q
, filt
, i
);
1945 evtr_deregister_callbacks(q
);
1947 symtab_destroy(q
->symtab
);
1956 evtr_query_destroy(struct evtr_query
*q
)
1958 evtr_deregister_filters(q
, q
->filt
, q
->nfilt
);
1965 evtr_query_next(struct evtr_query
*q
, evtr_event_t ev
)
1967 if (evtr_query_error(q
))
1969 /* we may support that in the future */
1970 if (q
->off
!= q
->evtr
->bytes
) {
1971 q
->errmsg
= "evtr/query offset mismatch";
1974 return _evtr_next_event(q
->evtr
, ev
, q
);
1978 evtr_ncpus(evtr_t evtr
)
1984 evtr_cpufreqs(evtr_t evtr
, double *freqs
)
1990 for (i
= 0; i
< evtr
->ncpus
; ++i
) {
1991 freqs
[i
] = evtr
->cpus
[i
].freq
;