perf: Enable more compiler warnings
[linux-2.6/btrfs-unstable.git] / tools / perf / util / parse-events.c
blob1cda97b3911863bb10f86d6334efdf62c98d791d
2 #include "../perf.h"
3 #include "util.h"
4 #include "parse-options.h"
5 #include "parse-events.h"
6 #include "exec_cmd.h"
7 #include "string.h"
8 #include "cache.h"
10 extern char *strcasestr(const char *haystack, const char *needle);
12 int nr_counters;
14 struct perf_counter_attr attrs[MAX_COUNTERS];
16 struct event_symbol {
17 u8 type;
18 u64 config;
19 const char *symbol;
20 const char *alias;
23 char debugfs_path[MAXPATHLEN];
25 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
26 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
28 static struct event_symbol event_symbols[] = {
29 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
30 { CHW(INSTRUCTIONS), "instructions", "" },
31 { CHW(CACHE_REFERENCES), "cache-references", "" },
32 { CHW(CACHE_MISSES), "cache-misses", "" },
33 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
34 { CHW(BRANCH_MISSES), "branch-misses", "" },
35 { CHW(BUS_CYCLES), "bus-cycles", "" },
37 { CSW(CPU_CLOCK), "cpu-clock", "" },
38 { CSW(TASK_CLOCK), "task-clock", "" },
39 { CSW(PAGE_FAULTS), "page-faults", "faults" },
40 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
41 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
42 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
43 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
46 #define __PERF_COUNTER_FIELD(config, name) \
47 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
49 #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
50 #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
51 #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
52 #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
54 static const char *hw_event_names[] = {
55 "cycles",
56 "instructions",
57 "cache-references",
58 "cache-misses",
59 "branches",
60 "branch-misses",
61 "bus-cycles",
64 static const char *sw_event_names[] = {
65 "cpu-clock-msecs",
66 "task-clock-msecs",
67 "page-faults",
68 "context-switches",
69 "CPU-migrations",
70 "minor-faults",
71 "major-faults",
74 #define MAX_ALIASES 8
76 static const char *hw_cache[][MAX_ALIASES] = {
77 { "L1-dcache", "l1-d", "l1d", "L1-data", },
78 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
79 { "LLC", "L2" },
80 { "dTLB", "d-tlb", "Data-TLB", },
81 { "iTLB", "i-tlb", "Instruction-TLB", },
82 { "branch", "branches", "bpu", "btb", "bpc", },
85 static const char *hw_cache_op[][MAX_ALIASES] = {
86 { "load", "loads", "read", },
87 { "store", "stores", "write", },
88 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
91 static const char *hw_cache_result[][MAX_ALIASES] = {
92 { "refs", "Reference", "ops", "access", },
93 { "misses", "miss", },
96 #define C(x) PERF_COUNT_HW_CACHE_##x
97 #define CACHE_READ (1 << C(OP_READ))
98 #define CACHE_WRITE (1 << C(OP_WRITE))
99 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
100 #define COP(x) (1 << x)
103 * cache operartion stat
104 * L1I : Read and prefetch only
105 * ITLB and BPU : Read-only
107 static unsigned long hw_cache_stat[C(MAX)] = {
108 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
109 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
110 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
111 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
112 [C(ITLB)] = (CACHE_READ),
113 [C(BPU)] = (CACHE_READ),
116 #define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st) \
117 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
118 if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path, \
119 sys_dirent.d_name) && \
120 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
121 (strcmp(sys_dirent.d_name, ".")) && \
122 (strcmp(sys_dirent.d_name, "..")))
124 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
126 char evt_path[MAXPATHLEN];
127 int fd;
129 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
130 sys_dir->d_name, evt_dir->d_name);
131 fd = open(evt_path, O_RDONLY);
132 if (fd < 0)
133 return -EINVAL;
134 close(fd);
136 return 0;
139 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \
140 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
141 if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \
142 sys_dirent.d_name, evt_dirent.d_name) && \
143 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
144 (strcmp(evt_dirent.d_name, ".")) && \
145 (strcmp(evt_dirent.d_name, "..")) && \
146 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
148 #define MAX_EVENT_LENGTH 30
150 int valid_debugfs_mount(const char *debugfs)
152 struct statfs st_fs;
154 if (statfs(debugfs, &st_fs) < 0)
155 return -ENOENT;
156 else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
157 return -ENOENT;
158 return 0;
161 static const char *tracepoint_id_to_name(u64 config)
163 static char tracepoint_name[2 * MAX_EVENT_LENGTH];
164 DIR *sys_dir, *evt_dir;
165 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
166 struct stat st;
167 char id_buf[4];
168 int fd;
169 u64 id;
170 char evt_path[MAXPATHLEN];
172 if (valid_debugfs_mount(debugfs_path))
173 return "unkown";
175 sys_dir = opendir(debugfs_path);
176 if (!sys_dir)
177 goto cleanup;
179 for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
180 evt_dir = opendir(evt_path);
181 if (!evt_dir)
182 goto cleanup;
183 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
184 evt_path, st) {
185 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id",
186 debugfs_path, sys_dirent.d_name,
187 evt_dirent.d_name);
188 fd = open(evt_path, O_RDONLY);
189 if (fd < 0)
190 continue;
191 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
192 close(fd);
193 continue;
195 close(fd);
196 id = atoll(id_buf);
197 if (id == config) {
198 closedir(evt_dir);
199 closedir(sys_dir);
200 snprintf(tracepoint_name, 2 * MAX_EVENT_LENGTH,
201 "%s:%s", sys_dirent.d_name,
202 evt_dirent.d_name);
203 return tracepoint_name;
206 closedir(evt_dir);
209 cleanup:
210 closedir(sys_dir);
211 return "unkown";
214 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
216 if (hw_cache_stat[cache_type] & COP(cache_op))
217 return 1; /* valid */
218 else
219 return 0; /* invalid */
222 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
224 static char name[50];
226 if (cache_result) {
227 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
228 hw_cache_op[cache_op][0],
229 hw_cache_result[cache_result][0]);
230 } else {
231 sprintf(name, "%s-%s", hw_cache[cache_type][0],
232 hw_cache_op[cache_op][1]);
235 return name;
238 const char *event_name(int counter)
240 u64 config = attrs[counter].config;
241 int type = attrs[counter].type;
243 return __event_name(type, config);
246 const char *__event_name(int type, u64 config)
248 static char buf[32];
250 if (type == PERF_TYPE_RAW) {
251 sprintf(buf, "raw 0x%llx", config);
252 return buf;
255 switch (type) {
256 case PERF_TYPE_HARDWARE:
257 if (config < PERF_COUNT_HW_MAX)
258 return hw_event_names[config];
259 return "unknown-hardware";
261 case PERF_TYPE_HW_CACHE: {
262 u8 cache_type, cache_op, cache_result;
264 cache_type = (config >> 0) & 0xff;
265 if (cache_type > PERF_COUNT_HW_CACHE_MAX)
266 return "unknown-ext-hardware-cache-type";
268 cache_op = (config >> 8) & 0xff;
269 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
270 return "unknown-ext-hardware-cache-op";
272 cache_result = (config >> 16) & 0xff;
273 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
274 return "unknown-ext-hardware-cache-result";
276 if (!is_cache_op_valid(cache_type, cache_op))
277 return "invalid-cache";
279 return event_cache_name(cache_type, cache_op, cache_result);
282 case PERF_TYPE_SOFTWARE:
283 if (config < PERF_COUNT_SW_MAX)
284 return sw_event_names[config];
285 return "unknown-software";
287 case PERF_TYPE_TRACEPOINT:
288 return tracepoint_id_to_name(config);
290 default:
291 break;
294 return "unknown";
297 static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
299 int i, j;
300 int n, longest = -1;
302 for (i = 0; i < size; i++) {
303 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
304 n = strlen(names[i][j]);
305 if (n > longest && !strncasecmp(*str, names[i][j], n))
306 longest = n;
308 if (longest > 0) {
309 *str += longest;
310 return i;
314 return -1;
317 static int
318 parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
320 const char *s = *str;
321 int cache_type = -1, cache_op = -1, cache_result = -1;
323 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
325 * No fallback - if we cannot get a clear cache type
326 * then bail out:
328 if (cache_type == -1)
329 return 0;
331 while ((cache_op == -1 || cache_result == -1) && *s == '-') {
332 ++s;
334 if (cache_op == -1) {
335 cache_op = parse_aliases(&s, hw_cache_op,
336 PERF_COUNT_HW_CACHE_OP_MAX);
337 if (cache_op >= 0) {
338 if (!is_cache_op_valid(cache_type, cache_op))
339 return 0;
340 continue;
344 if (cache_result == -1) {
345 cache_result = parse_aliases(&s, hw_cache_result,
346 PERF_COUNT_HW_CACHE_RESULT_MAX);
347 if (cache_result >= 0)
348 continue;
352 * Can't parse this as a cache op or result, so back up
353 * to the '-'.
355 --s;
356 break;
360 * Fall back to reads:
362 if (cache_op == -1)
363 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
366 * Fall back to accesses:
368 if (cache_result == -1)
369 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
371 attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
372 attr->type = PERF_TYPE_HW_CACHE;
374 *str = s;
375 return 1;
378 static int parse_tracepoint_event(const char **strp,
379 struct perf_counter_attr *attr)
381 const char *evt_name;
382 char *flags;
383 char sys_name[MAX_EVENT_LENGTH];
384 char id_buf[4];
385 int fd;
386 unsigned int sys_length, evt_length;
387 u64 id;
388 char evt_path[MAXPATHLEN];
390 if (valid_debugfs_mount(debugfs_path))
391 return 0;
393 evt_name = strchr(*strp, ':');
394 if (!evt_name)
395 return 0;
397 sys_length = evt_name - *strp;
398 if (sys_length >= MAX_EVENT_LENGTH)
399 return 0;
401 strncpy(sys_name, *strp, sys_length);
402 sys_name[sys_length] = '\0';
403 evt_name = evt_name + 1;
405 flags = strchr(evt_name, ':');
406 if (flags) {
407 *flags = '\0';
408 flags++;
409 if (!strncmp(flags, "record", strlen(flags)))
410 attr->sample_type |= PERF_SAMPLE_RAW;
413 evt_length = strlen(evt_name);
414 if (evt_length >= MAX_EVENT_LENGTH)
415 return 0;
417 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
418 sys_name, evt_name);
419 fd = open(evt_path, O_RDONLY);
420 if (fd < 0)
421 return 0;
423 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
424 close(fd);
425 return 0;
427 close(fd);
428 id = atoll(id_buf);
429 attr->config = id;
430 attr->type = PERF_TYPE_TRACEPOINT;
431 *strp = evt_name + evt_length;
432 return 1;
435 static int check_events(const char *str, unsigned int i)
437 int n;
439 n = strlen(event_symbols[i].symbol);
440 if (!strncmp(str, event_symbols[i].symbol, n))
441 return n;
443 n = strlen(event_symbols[i].alias);
444 if (n)
445 if (!strncmp(str, event_symbols[i].alias, n))
446 return n;
447 return 0;
450 static int
451 parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
453 const char *str = *strp;
454 unsigned int i;
455 int n;
457 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
458 n = check_events(str, i);
459 if (n > 0) {
460 attr->type = event_symbols[i].type;
461 attr->config = event_symbols[i].config;
462 *strp = str + n;
463 return 1;
466 return 0;
469 static int parse_raw_event(const char **strp, struct perf_counter_attr *attr)
471 const char *str = *strp;
472 u64 config;
473 int n;
475 if (*str != 'r')
476 return 0;
477 n = hex2u64(str + 1, &config);
478 if (n > 0) {
479 *strp = str + n + 1;
480 attr->type = PERF_TYPE_RAW;
481 attr->config = config;
482 return 1;
484 return 0;
487 static int
488 parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
490 const char *str = *strp;
491 char *endp;
492 unsigned long type;
493 u64 config;
495 type = strtoul(str, &endp, 0);
496 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
497 str = endp + 1;
498 config = strtoul(str, &endp, 0);
499 if (endp > str) {
500 attr->type = type;
501 attr->config = config;
502 *strp = endp;
503 return 1;
506 return 0;
509 static int
510 parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
512 const char *str = *strp;
513 int eu = 1, ek = 1, eh = 1;
515 if (*str++ != ':')
516 return 0;
517 while (*str) {
518 if (*str == 'u')
519 eu = 0;
520 else if (*str == 'k')
521 ek = 0;
522 else if (*str == 'h')
523 eh = 0;
524 else
525 break;
526 ++str;
528 if (str >= *strp + 2) {
529 *strp = str;
530 attr->exclude_user = eu;
531 attr->exclude_kernel = ek;
532 attr->exclude_hv = eh;
533 return 1;
535 return 0;
539 * Each event can have multiple symbolic names.
540 * Symbolic names are (almost) exactly matched.
542 static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
544 if (!(parse_tracepoint_event(str, attr) ||
545 parse_raw_event(str, attr) ||
546 parse_numeric_event(str, attr) ||
547 parse_symbolic_event(str, attr) ||
548 parse_generic_hw_event(str, attr)))
549 return 0;
551 parse_event_modifier(str, attr);
553 return 1;
556 int parse_events(const struct option *opt __used, const char *str, int unset __used)
558 struct perf_counter_attr attr;
560 for (;;) {
561 if (nr_counters == MAX_COUNTERS)
562 return -1;
564 memset(&attr, 0, sizeof(attr));
565 if (!parse_event_symbols(&str, &attr))
566 return -1;
568 if (!(*str == 0 || *str == ',' || isspace(*str)))
569 return -1;
571 attrs[nr_counters] = attr;
572 nr_counters++;
574 if (*str == 0)
575 break;
576 if (*str == ',')
577 ++str;
578 while (isspace(*str))
579 ++str;
582 return 0;
585 static const char * const event_type_descriptors[] = {
587 "Hardware event",
588 "Software event",
589 "Tracepoint event",
590 "Hardware cache event",
594 * Print the events from <debugfs_mount_point>/tracing/events
597 static void print_tracepoint_events(void)
599 DIR *sys_dir, *evt_dir;
600 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
601 struct stat st;
602 char evt_path[MAXPATHLEN];
604 if (valid_debugfs_mount(debugfs_path))
605 return;
607 sys_dir = opendir(debugfs_path);
608 if (!sys_dir)
609 goto cleanup;
611 for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
612 evt_dir = opendir(evt_path);
613 if (!evt_dir)
614 goto cleanup;
615 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
616 evt_path, st) {
617 snprintf(evt_path, MAXPATHLEN, "%s:%s",
618 sys_dirent.d_name, evt_dirent.d_name);
619 fprintf(stderr, " %-40s [%s]\n", evt_path,
620 event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
622 closedir(evt_dir);
625 cleanup:
626 closedir(sys_dir);
630 * Print the help text for the event symbols:
632 void print_events(void)
634 struct event_symbol *syms = event_symbols;
635 unsigned int i, type, op, prev_type = -1;
636 char name[40];
638 fprintf(stderr, "\n");
639 fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
641 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
642 type = syms->type + 1;
643 if (type >= ARRAY_SIZE(event_type_descriptors))
644 type = 0;
646 if (type != prev_type)
647 fprintf(stderr, "\n");
649 if (strlen(syms->alias))
650 sprintf(name, "%s OR %s", syms->symbol, syms->alias);
651 else
652 strcpy(name, syms->symbol);
653 fprintf(stderr, " %-40s [%s]\n", name,
654 event_type_descriptors[type]);
656 prev_type = type;
659 fprintf(stderr, "\n");
660 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
661 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
662 /* skip invalid cache type */
663 if (!is_cache_op_valid(type, op))
664 continue;
666 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
667 fprintf(stderr, " %-40s [%s]\n",
668 event_cache_name(type, op, i),
669 event_type_descriptors[4]);
674 fprintf(stderr, "\n");
675 fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
676 "rNNN");
677 fprintf(stderr, "\n");
679 print_tracepoint_events();
681 exit(129);